1 //
   2 // Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580     R29,                        // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649     R29, R29_H,                 // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "gc/shared/cardTableModRefBS.hpp"
 999 
1000 class CallStubImpl {
1001 
1002   //--------------------------------------------------------------
1003   //---<  Used for optimization in Compile::shorten_branches  >---
1004   //--------------------------------------------------------------
1005 
1006  public:
1007   // Size of call trampoline stub.
1008   static uint size_call_trampoline() {
1009     return 0; // no call trampolines on this platform
1010   }
1011 
1012   // number of relocations needed by a call trampoline stub
1013   static uint reloc_call_trampoline() {
1014     return 0; // no call trampolines on this platform
1015   }
1016 };
1017 
1018 class HandlerImpl {
1019 
1020  public:
1021 
1022   static int emit_exception_handler(CodeBuffer &cbuf);
1023   static int emit_deopt_handler(CodeBuffer& cbuf);
1024 
1025   static uint size_exception_handler() {
1026     return MacroAssembler::far_branch_size();
1027   }
1028 
1029   static uint size_deopt_handler() {
1030     // count one adr and one far branch instruction
1031     return 4 * NativeInstruction::instruction_size;
1032   }
1033 };
1034 
1035   // graph traversal helpers
1036 
1037   MemBarNode *parent_membar(const Node *n);
1038   MemBarNode *child_membar(const MemBarNode *n);
1039   bool leading_membar(const MemBarNode *barrier);
1040 
1041   bool is_card_mark_membar(const MemBarNode *barrier);
1042 
1043   MemBarNode *leading_to_normal(MemBarNode *leading);
1044   MemBarNode *normal_to_leading(const MemBarNode *barrier);
1045   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier);
1046   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing);
1047   MemBarNode *trailing_to_leading(const MemBarNode *trailing);
1048 
1049   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1050 
1051   bool unnecessary_acquire(const Node *barrier);
1052   bool needs_acquiring_load(const Node *load);
1053 
1054   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1055 
1056   bool unnecessary_release(const Node *barrier);
1057   bool unnecessary_volatile(const Node *barrier);
1058   bool needs_releasing_store(const Node *store);
1059 
1060   // predicate controlling translation of StoreCM
1061   bool unnecessary_storestore(const Node *storecm);
1062 %}
1063 
1064 source %{
1065 
1066   // Optimizaton of volatile gets and puts
1067   // -------------------------------------
1068   //
1069   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1070   // use to implement volatile reads and writes. For a volatile read
1071   // we simply need
1072   //
1073   //   ldar<x>
1074   //
1075   // and for a volatile write we need
1076   //
1077   //   stlr<x>
1078   // 
1079   // Alternatively, we can implement them by pairing a normal
1080   // load/store with a memory barrier. For a volatile read we need
1081   // 
1082   //   ldr<x>
1083   //   dmb ishld
1084   //
1085   // for a volatile write
1086   //
1087   //   dmb ish
1088   //   str<x>
1089   //   dmb ish
1090   //
1091   // In order to generate the desired instruction sequence we need to
1092   // be able to identify specific 'signature' ideal graph node
1093   // sequences which i) occur as a translation of a volatile reads or
1094   // writes and ii) do not occur through any other translation or
1095   // graph transformation. We can then provide alternative aldc
1096   // matching rules which translate these node sequences to the
1097   // desired machine code sequences. Selection of the alternative
1098   // rules can be implemented by predicates which identify the
1099   // relevant node sequences.
1100   //
1101   // The ideal graph generator translates a volatile read to the node
1102   // sequence
1103   //
1104   //   LoadX[mo_acquire]
1105   //   MemBarAcquire
1106   //
1107   // As a special case when using the compressed oops optimization we
1108   // may also see this variant
1109   //
1110   //   LoadN[mo_acquire]
1111   //   DecodeN
1112   //   MemBarAcquire
1113   //
1114   // A volatile write is translated to the node sequence
1115   //
1116   //   MemBarRelease
1117   //   StoreX[mo_release] {CardMark}-optional
1118   //   MemBarVolatile
1119   //
1120   // n.b. the above node patterns are generated with a strict
1121   // 'signature' configuration of input and output dependencies (see
1122   // the predicates below for exact details). The card mark may be as
1123   // simple as a few extra nodes or, in a few GC configurations, may
1124   // include more complex control flow between the leading and
1125   // trailing memory barriers. However, whatever the card mark
1126   // configuration these signatures are unique to translated volatile
1127   // reads/stores -- they will not appear as a result of any other
1128   // bytecode translation or inlining nor as a consequence of
1129   // optimizing transforms.
1130   //
1131   // We also want to catch inlined unsafe volatile gets and puts and
1132   // be able to implement them using either ldar<x>/stlr<x> or some
1133   // combination of ldr<x>/stlr<x> and dmb instructions.
1134   //
1135   // Inlined unsafe volatiles puts manifest as a minor variant of the
1136   // normal volatile put node sequence containing an extra cpuorder
1137   // membar
1138   //
1139   //   MemBarRelease
1140   //   MemBarCPUOrder
1141   //   StoreX[mo_release] {CardMark}-optional
1142   //   MemBarVolatile
1143   //
1144   // n.b. as an aside, the cpuorder membar is not itself subject to
1145   // matching and translation by adlc rules.  However, the rule
1146   // predicates need to detect its presence in order to correctly
1147   // select the desired adlc rules.
1148   //
1149   // Inlined unsafe volatile gets manifest as a somewhat different
1150   // node sequence to a normal volatile get
1151   //
1152   //   MemBarCPUOrder
1153   //        ||       \\
1154   //   MemBarAcquire LoadX[mo_acquire]
1155   //        ||
1156   //   MemBarCPUOrder
1157   //
1158   // In this case the acquire membar does not directly depend on the
1159   // load. However, we can be sure that the load is generated from an
1160   // inlined unsafe volatile get if we see it dependent on this unique
1161   // sequence of membar nodes. Similarly, given an acquire membar we
1162   // can know that it was added because of an inlined unsafe volatile
1163   // get if it is fed and feeds a cpuorder membar and if its feed
1164   // membar also feeds an acquiring load.
1165   //
1166   // So, where we can identify these volatile read and write
1167   // signatures we can choose to plant either of the above two code
1168   // sequences. For a volatile read we can simply plant a normal
1169   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1170   // also choose to inhibit translation of the MemBarAcquire and
1171   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1172   //
1173   // When we recognise a volatile store signature we can choose to
1174   // plant at a dmb ish as a translation for the MemBarRelease, a
1175   // normal str<x> and then a dmb ish for the MemBarVolatile.
1176   // Alternatively, we can inhibit translation of the MemBarRelease
1177   // and MemBarVolatile and instead plant a simple stlr<x>
1178   // instruction.
1179   //
1180   // Of course, the above only applies when we see these signature
1181   // configurations. We still want to plant dmb instructions in any
1182   // other cases where we may see a MemBarAcquire, MemBarRelease or
1183   // MemBarVolatile. For example, at the end of a constructor which
1184   // writes final/volatile fields we will see a MemBarRelease
1185   // instruction and this needs a 'dmb ish' lest we risk the
1186   // constructed object being visible without making the
1187   // final/volatile field writes visible.
1188   //
1189   // n.b. the translation rules below which rely on detection of the
1190   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1191   // If we see anything other than the signature configurations we
1192   // always just translate the loads and stores to ldr<x> and str<x>
1193   // and translate acquire, release and volatile membars to the
1194   // relevant dmb instructions.
1195   //
1196 
1197   // graph traversal helpers used for volatile put/get optimization
1198 
1199   // 1) general purpose helpers
1200 
1201   // if node n is linked to a parent MemBarNode by an intervening
1202   // Control and Memory ProjNode return the MemBarNode otherwise return
1203   // NULL.
1204   //
1205   // n may only be a Load or a MemBar.
1206 
1207   MemBarNode *parent_membar(const Node *n)
1208   {
1209     Node *ctl = NULL;
1210     Node *mem = NULL;
1211     Node *membar = NULL;
1212 
1213     if (n->is_Load()) {
1214       ctl = n->lookup(LoadNode::Control);
1215       mem = n->lookup(LoadNode::Memory);
1216     } else if (n->is_MemBar()) {
1217       ctl = n->lookup(TypeFunc::Control);
1218       mem = n->lookup(TypeFunc::Memory);
1219     } else {
1220         return NULL;
1221     }
1222 
1223     if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj())
1224       return NULL;
1225 
1226     membar = ctl->lookup(0);
1227 
1228     if (!membar || !membar->is_MemBar())
1229       return NULL;
1230 
1231     if (mem->lookup(0) != membar)
1232       return NULL;
1233 
1234     return membar->as_MemBar();
1235   }
1236 
1237   // if n is linked to a child MemBarNode by intervening Control and
1238   // Memory ProjNodes return the MemBarNode otherwise return NULL.
1239 
1240   MemBarNode *child_membar(const MemBarNode *n)
1241   {
1242     ProjNode *ctl = n->proj_out(TypeFunc::Control);
1243     ProjNode *mem = n->proj_out(TypeFunc::Memory);
1244 
1245     // MemBar needs to have both a Ctl and Mem projection
1246     if (! ctl || ! mem)
1247       return NULL;
1248 
1249     MemBarNode *child = NULL;
1250     Node *x;
1251 
1252     for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1253       x = ctl->fast_out(i);
1254       // if we see a membar we keep hold of it. we may also see a new
1255       // arena copy of the original but it will appear later
1256       if (x->is_MemBar()) {
1257           child = x->as_MemBar();
1258           break;
1259       }
1260     }
1261 
1262     if (child == NULL)
1263       return NULL;
1264 
1265     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1266       x = mem->fast_out(i);
1267       // if we see a membar we keep hold of it. we may also see a new
1268       // arena copy of the original but it will appear later
1269       if (x == child) {
1270         return child;
1271       }
1272     }
1273     return NULL;
1274   }
1275 
1276   // helper predicate use to filter candidates for a leading memory
1277   // barrier
1278   //
1279   // returns true if barrier is a MemBarRelease or a MemBarCPUOrder
1280   // whose Ctl and Mem feeds come from a MemBarRelease otherwise false
1281 
1282   bool leading_membar(const MemBarNode *barrier)
1283   {
1284     int opcode = barrier->Opcode();
1285     // if this is a release membar we are ok
1286     if (opcode == Op_MemBarRelease)
1287       return true;
1288     // if its a cpuorder membar . . .
1289     if (opcode != Op_MemBarCPUOrder)
1290       return false;
1291     // then the parent has to be a release membar
1292     MemBarNode *parent = parent_membar(barrier);
1293     if (!parent)
1294       return false;
1295     opcode = parent->Opcode();
1296     return opcode == Op_MemBarRelease;
1297   }
1298  
1299   // 2) card mark detection helper
1300 
1301   // helper predicate which can be used to detect a volatile membar
1302   // introduced as part of a conditional card mark sequence either by
1303   // G1 or by CMS when UseCondCardMark is true.
1304   //
1305   // membar can be definitively determined to be part of a card mark
1306   // sequence if and only if all the following hold
1307   //
1308   // i) it is a MemBarVolatile
1309   //
1310   // ii) either UseG1GC or (UseConcMarkSweepGC && UseCondCardMark) is
1311   // true
1312   //
1313   // iii) the node's Mem projection feeds a StoreCM node.
1314   
1315   bool is_card_mark_membar(const MemBarNode *barrier)
1316   {
1317     if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark))
1318       return false;
1319 
1320     if (barrier->Opcode() != Op_MemBarVolatile)
1321       return false;
1322 
1323     ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
1324 
1325     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax ; i++) {
1326       Node *y = mem->fast_out(i);
1327       if (y->Opcode() == Op_StoreCM) {
1328         return true;
1329       }
1330     }
1331   
1332     return false;
1333   }
1334 
1335 
1336   // 3) helper predicates to traverse volatile put graphs which may
1337   // contain GC barrier subgraphs
1338 
1339   // Preamble
1340   // --------
1341   //
1342   // for volatile writes we can omit generating barriers and employ a
1343   // releasing store when we see a node sequence sequence with a
1344   // leading MemBarRelease and a trailing MemBarVolatile as follows
1345   //
1346   //   MemBarRelease
1347   //  {      ||      } -- optional
1348   //  {MemBarCPUOrder}
1349   //         ||     \\
1350   //         ||     StoreX[mo_release]
1351   //         | \     /
1352   //         | MergeMem
1353   //         | /
1354   //   MemBarVolatile
1355   //
1356   // where
1357   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1358   //  | \ and / indicate further routing of the Ctl and Mem feeds
1359   // 
1360   // this is the graph we see for non-object stores. however, for a
1361   // volatile Object store (StoreN/P) we may see other nodes below the
1362   // leading membar because of the need for a GC pre- or post-write
1363   // barrier.
1364   //
1365   // with most GC configurations we with see this simple variant which
1366   // includes a post-write barrier card mark.
1367   //
1368   //   MemBarRelease______________________________
1369   //         ||    \\               Ctl \        \\
1370   //         ||    StoreN/P[mo_release] CastP2X  StoreB/CM
1371   //         | \     /                       . . .  /
1372   //         | MergeMem
1373   //         | /
1374   //         ||      /
1375   //   MemBarVolatile
1376   //
1377   // i.e. the leading membar feeds Ctl to a CastP2X (which converts
1378   // the object address to an int used to compute the card offset) and
1379   // Ctl+Mem to a StoreB node (which does the actual card mark).
1380   //
1381   // n.b. a StoreCM node will only appear in this configuration when
1382   // using CMS. StoreCM differs from a normal card mark write (StoreB)
1383   // because it implies a requirement to order visibility of the card
1384   // mark (StoreCM) relative to the object put (StoreP/N) using a
1385   // StoreStore memory barrier (arguably this ought to be represented
1386   // explicitly in the ideal graph but that is not how it works). This
1387   // ordering is required for both non-volatile and volatile
1388   // puts. Normally that means we need to translate a StoreCM using
1389   // the sequence
1390   //
1391   //   dmb ishst
1392   //   stlrb
1393   //
1394   // However, in the case of a volatile put if we can recognise this
1395   // configuration and plant an stlr for the object write then we can
1396   // omit the dmb and just plant an strb since visibility of the stlr
1397   // is ordered before visibility of subsequent stores. StoreCM nodes
1398   // also arise when using G1 or using CMS with conditional card
1399   // marking. In these cases (as we shall see) we don't need to insert
1400   // the dmb when translating StoreCM because there is already an
1401   // intervening StoreLoad barrier between it and the StoreP/N.
1402   //
1403   // It is also possible to perform the card mark conditionally on it
1404   // currently being unmarked in which case the volatile put graph
1405   // will look slightly different
1406   //
1407   //   MemBarRelease
1408   //   MemBarCPUOrder___________________________________________
1409   //         ||    \\               Ctl \     Ctl \     \\  Mem \
1410   //         ||    StoreN/P[mo_release] CastP2X   If   LoadB     |
1411   //         | \     /                              \            |
1412   //         | MergeMem                            . . .      StoreB
1413   //         | /                                                /
1414   //         ||     /
1415   //   MemBarVolatile
1416   //
1417   // It is worth noting at this stage that both the above
1418   // configurations can be uniquely identified by checking that the
1419   // memory flow includes the following subgraph:
1420   //
1421   //   MemBarRelease
1422   //   MemBarCPUOrder
1423   //          |  \      . . .
1424   //          |  StoreX[mo_release]  . . .
1425   //          |   /
1426   //         MergeMem
1427   //          |
1428   //   MemBarVolatile
1429   //
1430   // This is referred to as a *normal* subgraph. It can easily be
1431   // detected starting from any candidate MemBarRelease,
1432   // StoreX[mo_release] or MemBarVolatile.
1433   //
1434   // the code below uses two helper predicates, leading_to_normal and
1435   // normal_to_leading to identify this configuration, one validating
1436   // the layout starting from the top membar and searching down and
1437   // the other validating the layout starting from the lower membar
1438   // and searching up.
1439   //
1440   // There are two special case GC configurations when a normal graph
1441   // may not be generated: when using G1 (which always employs a
1442   // conditional card mark); and when using CMS with conditional card
1443   // marking configured. These GCs are both concurrent rather than
1444   // stop-the world GCs. So they introduce extra Ctl+Mem flow into the
1445   // graph between the leading and trailing membar nodes, in
1446   // particular enforcing stronger memory serialisation beween the
1447   // object put and the corresponding conditional card mark. CMS
1448   // employs a post-write GC barrier while G1 employs both a pre- and
1449   // post-write GC barrier. Of course the extra nodes may be absent --
1450   // they are only inserted for object puts. This significantly
1451   // complicates the task of identifying whether a MemBarRelease,
1452   // StoreX[mo_release] or MemBarVolatile forms part of a volatile put
1453   // when using these GC configurations (see below).
1454   //
1455   // In both cases the post-write subtree includes an auxiliary
1456   // MemBarVolatile (StoreLoad barrier) separating the object put and
1457   // the read of the corresponding card. This poses two additional
1458   // problems.
1459   //
1460   // Firstly, a card mark MemBarVolatile needs to be distinguished
1461   // from a normal trailing MemBarVolatile. Resolving this first
1462   // problem is straightforward: a card mark MemBarVolatile always
1463   // projects a Mem feed to a StoreCM node and that is a unique marker
1464   //
1465   //      MemBarVolatile (card mark)
1466   //       C |    \     . . .
1467   //         |   StoreCM   . . .
1468   //       . . .
1469   //
1470   // The second problem is how the code generator is to translate the
1471   // card mark barrier? It always needs to be translated to a "dmb
1472   // ish" instruction whether or not it occurs as part of a volatile
1473   // put. A StoreLoad barrier is needed after the object put to ensure
1474   // i) visibility to GC threads of the object put and ii) visibility
1475   // to the mutator thread of any card clearing write by a GC
1476   // thread. Clearly a normal store (str) will not guarantee this
1477   // ordering but neither will a releasing store (stlr). The latter
1478   // guarantees that the object put is visible but does not guarantee
1479   // that writes by other threads have also been observed.
1480   // 
1481   // So, returning to the task of translating the object put and the
1482   // leading/trailing membar nodes: what do the non-normal node graph
1483   // look like for these 2 special cases? and how can we determine the
1484   // status of a MemBarRelease, StoreX[mo_release] or MemBarVolatile
1485   // in both normal and non-normal cases?
1486   //
1487   // A CMS GC post-barrier wraps its card write (StoreCM) inside an If
1488   // which selects conditonal execution based on the value loaded
1489   // (LoadB) from the card. Ctl and Mem are fed to the If via an
1490   // intervening StoreLoad barrier (MemBarVolatile).
1491   //
1492   // So, with CMS we may see a node graph which looks like this
1493   //
1494   //   MemBarRelease
1495   //   MemBarCPUOrder_(leading)__________________
1496   //     C |    M \       \\                   C \
1497   //       |       \    StoreN/P[mo_release]  CastP2X
1498   //       |    Bot \    /
1499   //       |       MergeMem
1500   //       |         /
1501   //      MemBarVolatile (card mark)
1502   //     C |  ||    M |
1503   //       | LoadB    |
1504   //       |   |      |
1505   //       | Cmp      |\
1506   //       | /        | \
1507   //       If         |  \
1508   //       | \        |   \
1509   // IfFalse  IfTrue  |    \
1510   //       \     / \  |     \
1511   //        \   / StoreCM    |
1512   //         \ /      |      |
1513   //        Region   . . .   |
1514   //          | \           /
1515   //          |  . . .  \  / Bot
1516   //          |       MergeMem
1517   //          |          |
1518   //        MemBarVolatile (trailing)
1519   //
1520   // The first MergeMem merges the AliasIdxBot Mem slice from the
1521   // leading membar and the oopptr Mem slice from the Store into the
1522   // card mark membar. The trailing MergeMem merges the AliasIdxBot
1523   // Mem slice from the card mark membar and the AliasIdxRaw slice
1524   // from the StoreCM into the trailing membar (n.b. the latter
1525   // proceeds via a Phi associated with the If region).
1526   //
1527   // G1 is quite a lot more complicated. The nodes inserted on behalf
1528   // of G1 may comprise: a pre-write graph which adds the old value to
1529   // the SATB queue; the releasing store itself; and, finally, a
1530   // post-write graph which performs a card mark.
1531   //
1532   // The pre-write graph may be omitted, but only when the put is
1533   // writing to a newly allocated (young gen) object and then only if
1534   // there is a direct memory chain to the Initialize node for the
1535   // object allocation. This will not happen for a volatile put since
1536   // any memory chain passes through the leading membar.
1537   //
1538   // The pre-write graph includes a series of 3 If tests. The outermost
1539   // If tests whether SATB is enabled (no else case). The next If tests
1540   // whether the old value is non-NULL (no else case). The third tests
1541   // whether the SATB queue index is > 0, if so updating the queue. The
1542   // else case for this third If calls out to the runtime to allocate a
1543   // new queue buffer.
1544   //
1545   // So with G1 the pre-write and releasing store subgraph looks like
1546   // this (the nested Ifs are omitted).
1547   //
1548   //  MemBarRelease (leading)____________
1549   //     C |  ||  M \   M \    M \  M \ . . .
1550   //       | LoadB   \  LoadL  LoadN   \
1551   //       | /        \                 \
1552   //       If         |\                 \
1553   //       | \        | \                 \
1554   //  IfFalse  IfTrue |  \                 \
1555   //       |     |    |   \                 |
1556   //       |     If   |   /\                |
1557   //       |     |          \               |
1558   //       |                 \              |
1559   //       |    . . .         \             |
1560   //       | /       | /       |            |
1561   //      Region  Phi[M]       |            |
1562   //       | \       |         |            |
1563   //       |  \_____ | ___     |            |
1564   //     C | C \     |   C \ M |            |
1565   //       | CastP2X | StoreN/P[mo_release] |
1566   //       |         |         |            |
1567   //     C |       M |       M |          M |
1568   //        \        |         |           /
1569   //                  . . . 
1570   //          (post write subtree elided)
1571   //                    . . .
1572   //             C \         M /
1573   //         MemBarVolatile (trailing)
1574   //
1575   // n.b. the LoadB in this subgraph is not the card read -- it's a
1576   // read of the SATB queue active flag.
1577   //
1578   // The G1 post-write subtree is also optional, this time when the
1579   // new value being written is either null or can be identified as a
1580   // newly allocated (young gen) object with no intervening control
1581   // flow. The latter cannot happen but the former may, in which case
1582   // the card mark membar is omitted and the memory feeds from the
1583   // leading membar and the StoreN/P are merged direct into the
1584   // trailing membar as per the normal subgraph. So, the only special
1585   // case which arises is when the post-write subgraph is generated.
1586   //
1587   // The kernel of the post-write G1 subgraph is the card mark itself
1588   // which includes a card mark memory barrier (MemBarVolatile), a
1589   // card test (LoadB), and a conditional update (If feeding a
1590   // StoreCM). These nodes are surrounded by a series of nested Ifs
1591   // which try to avoid doing the card mark. The top level If skips if
1592   // the object reference does not cross regions (i.e. it tests if
1593   // (adr ^ val) >> log2(regsize) != 0) -- intra-region references
1594   // need not be recorded. The next If, which skips on a NULL value,
1595   // may be absent (it is not generated if the type of value is >=
1596   // OopPtr::NotNull). The 3rd If skips writes to young regions (by
1597   // checking if card_val != young).  n.b. although this test requires
1598   // a pre-read of the card it can safely be done before the StoreLoad
1599   // barrier. However that does not bypass the need to reread the card
1600   // after the barrier.
1601   //
1602   //                (pre-write subtree elided)
1603   //        . . .                  . . .    . . .  . . .
1604   //        C |                    M |     M |    M |
1605   //       Region                  Phi[M] StoreN    |
1606   //          |                     / \      |      |
1607   //         / \_______            /   \     |      |
1608   //      C / C \      . . .            \    |      |
1609   //       If   CastP2X . . .            |   |      |
1610   //       / \                           |   |      |
1611   //      /   \                          |   |      |
1612   // IfFalse IfTrue                      |   |      |
1613   //   |       |                         |   |     /|
1614   //   |       If                        |   |    / |
1615   //   |      / \                        |   |   /  |
1616   //   |     /   \                        \  |  /   |
1617   //   | IfFalse IfTrue                   MergeMem  |
1618   //   |  . . .    / \                       /      |
1619   //   |          /   \                     /       |
1620   //   |     IfFalse IfTrue                /        |
1621   //   |      . . .    |                  /         |
1622   //   |               If                /          |
1623   //   |               / \              /           |
1624   //   |              /   \            /            |
1625   //   |         IfFalse IfTrue       /             |
1626   //   |           . . .   |         /              |
1627   //   |                    \       /               |
1628   //   |                     \     /                |
1629   //   |             MemBarVolatile__(card mark)    |
1630   //   |                ||   C |  M \  M \          |
1631   //   |               LoadB   If    |    |         |
1632   //   |                      / \    |    |         |
1633   //   |                     . . .   |    |         |
1634   //   |                          \  |    |        /
1635   //   |                        StoreCM   |       /
1636   //   |                          . . .   |      /
1637   //   |                        _________/      /
1638   //   |                       /  _____________/
1639   //   |   . . .       . . .  |  /            /
1640   //   |    |                 | /   _________/
1641   //   |    |               Phi[M] /        /
1642   //   |    |                 |   /        /
1643   //   |    |                 |  /        /
1644   //   |  Region  . . .     Phi[M]  _____/
1645   //   |    /                 |    /
1646   //   |                      |   /   
1647   //   | . . .   . . .        |  /
1648   //   | /                    | /
1649   // Region           |  |  Phi[M]
1650   //   |              |  |  / Bot
1651   //    \            MergeMem 
1652   //     \            /
1653   //     MemBarVolatile
1654   //
1655   // As with CMS the initial MergeMem merges the AliasIdxBot Mem slice
1656   // from the leading membar and the oopptr Mem slice from the Store
1657   // into the card mark membar i.e. the memory flow to the card mark
1658   // membar still looks like a normal graph.
1659   //
1660   // The trailing MergeMem merges an AliasIdxBot Mem slice with other
1661   // Mem slices (from the StoreCM and other card mark queue stores).
1662   // However in this case the AliasIdxBot Mem slice does not come
1663   // direct from the card mark membar. It is merged through a series
1664   // of Phi nodes. These are needed to merge the AliasIdxBot Mem flow
1665   // from the leading membar with the Mem feed from the card mark
1666   // membar. Each Phi corresponds to one of the Ifs which may skip
1667   // around the card mark membar. So when the If implementing the NULL
1668   // value check has been elided the total number of Phis is 2
1669   // otherwise it is 3.
1670   //
1671   // So, the upshot is that in all cases the volatile put graph will
1672   // include a *normal* memory subgraph betwen the leading membar and
1673   // its child membar. When that child is not a card mark membar then
1674   // it marks the end of a volatile put subgraph. If the child is a
1675   // card mark membar then the normal subgraph will form part of a
1676   // volatile put subgraph if and only if the child feeds an
1677   // AliasIdxBot Mem feed to a trailing barrier via a MergeMem. That
1678   // feed is either direct (for CMS) or via 2 or 3 Phi nodes merging
1679   // the leading barrier memory flow (for G1).
1680   // 
1681   // The predicates controlling generation of instructions for store
1682   // and barrier nodes employ a few simple helper functions (described
1683   // below) which identify the presence or absence of these subgraph
1684   // configurations and provide a means of traversing from one node in
1685   // the subgraph to another.
1686 
1687   // leading_to_normal
1688   //
1689   //graph traversal helper which detects the normal case Mem feed
1690   // from a release membar (or, optionally, its cpuorder child) to a
1691   // dependent volatile membar i.e. it ensures that the following Mem
1692   // flow subgraph is present.
1693   //
1694   //   MemBarRelease
1695   //   MemBarCPUOrder
1696   //          |  \      . . .
1697   //          |  StoreN/P[mo_release]  . . .
1698   //          |   /
1699   //         MergeMem
1700   //          |
1701   //   MemBarVolatile
1702   //
1703   // if the correct configuration is present returns the volatile
1704   // membar otherwise NULL.
1705   //
1706   // the input membar is expected to be either a cpuorder membar or a
1707   // release membar. in the latter case it should not have a cpu membar
1708   // child.
1709   //
1710   // the returned membar may be a card mark membar rather than a
1711   // trailing membar.
1712 
1713   MemBarNode *leading_to_normal(MemBarNode *leading)
1714   {
1715     assert((leading->Opcode() == Op_MemBarRelease ||
1716             leading->Opcode() == Op_MemBarCPUOrder),
1717            "expecting a volatile or cpuroder membar!");
1718 
1719     // check the mem flow
1720     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
1721 
1722     if (!mem)
1723       return NULL;
1724 
1725     Node *x = NULL;
1726     StoreNode * st = NULL;
1727     MergeMemNode *mm = NULL;
1728 
1729     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1730       x = mem->fast_out(i);
1731       if (x->is_MergeMem()) {
1732         if (mm != NULL)
1733           return NULL;
1734         // two merge mems is one too many
1735         mm = x->as_MergeMem();
1736       } else if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
1737         // two releasing stores is one too many
1738         if (st != NULL)
1739           return NULL;
1740         st = x->as_Store();
1741       }
1742     }
1743 
1744     if (!mm || !st)
1745       return NULL;
1746 
1747     bool found = false;
1748     // ensure the store feeds the merge
1749     for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
1750       if (st->fast_out(i) == mm) {
1751         found = true;
1752         break;
1753       }
1754     }
1755 
1756     if (!found)
1757       return NULL;
1758 
1759     MemBarNode *mbvol = NULL;
1760     // ensure the merge feeds a volatile membar
1761     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
1762       x = mm->fast_out(i);
1763       if (x->is_MemBar() && x->Opcode() == Op_MemBarVolatile) {
1764         mbvol = x->as_MemBar();
1765         break;
1766       }
1767     }
1768 
1769     return mbvol;
1770   }
1771 
1772   // normal_to_leading
1773   //
1774   // graph traversal helper which detects the normal case Mem feed
1775   // from either a card mark or a trailing membar to a preceding
1776   // release membar (optionally its cpuorder child) i.e. it ensures
1777   // that the following Mem flow subgraph is present.
1778   //
1779   //   MemBarRelease
1780   //   MemBarCPUOrder {leading}
1781   //          |  \      . . .
1782   //          |  StoreN/P[mo_release]  . . .
1783   //          |   /
1784   //         MergeMem
1785   //          |
1786   //   MemBarVolatile
1787   //
1788   // this predicate checks for the same flow as the previous predicate
1789   // but starting from the bottom rather than the top.
1790   //
1791   // if the configuration is present returns the cpuorder member for
1792   // preference or when absent the release membar otherwise NULL.
1793   //
1794   // n.b. the input membar is expected to be a MemBarVolatile but
1795   // need not be a card mark membar.
1796 
1797   MemBarNode *normal_to_leading(const MemBarNode *barrier)
1798   {
1799     // input must be a volatile membar
1800     assert(barrier->Opcode() == Op_MemBarVolatile, "expecting a volatile membar");
1801     Node *x;
1802 
1803     // the Mem feed to the membar should be a merge
1804     x = barrier->in(TypeFunc::Memory);
1805     if (!x->is_MergeMem())
1806       return NULL;
1807 
1808     MergeMemNode *mm = x->as_MergeMem();
1809 
1810     // the AliasIdxBot slice should be another MemBar projection
1811     x = mm->in(Compile::AliasIdxBot);
1812     // ensure this is a non control projection
1813     if (!x->is_Proj() || x->is_CFG())
1814       return NULL;
1815     // if it is fed by a membar that's the one we want
1816     x = x->in(0);
1817 
1818     if (!x->is_MemBar())
1819       return NULL;
1820 
1821     MemBarNode *leading = x->as_MemBar();
1822     // reject invalid candidates
1823     if (!leading_membar(leading))
1824       return NULL;
1825 
1826     // ok, we have a leading ReleaseMembar, now for the sanity clauses
1827 
1828     // the leading membar must feed Mem to a releasing store
1829     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
1830     StoreNode *st = NULL;
1831     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1832       x = mem->fast_out(i);
1833       if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
1834         st = x->as_Store();
1835         break;
1836       }
1837     }
1838     if (st == NULL)
1839       return NULL;
1840 
1841     // the releasing store has to feed the same merge
1842     for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
1843       if (st->fast_out(i) == mm)
1844         return leading;
1845     }
1846 
1847     return NULL;
1848   }
1849 
1850   // card_mark_to_trailing
1851   //
1852   // graph traversal helper which detects extra, non-normal Mem feed
1853   // from a card mark volatile membar to a trailing membar i.e. it
1854   // ensures that one of the following three GC post-write Mem flow
1855   // subgraphs is present.
1856   //
1857   // 1)
1858   //     . . .
1859   //       |
1860   //   MemBarVolatile (card mark)
1861   //      |          |     
1862   //      |        StoreCM
1863   //      |          |
1864   //      |        . . .
1865   //  Bot |  / 
1866   //   MergeMem 
1867   //      |
1868   //   MemBarVolatile (trailing)
1869   //
1870   //
1871   // 2)
1872   //   MemBarRelease/CPUOrder (leading)
1873   //    |
1874   //    | 
1875   //    |\       . . .
1876   //    | \        | 
1877   //    |  \  MemBarVolatile (card mark) 
1878   //    |   \   |     |
1879   //     \   \  |   StoreCM    . . .
1880   //      \   \ |
1881   //       \  Phi
1882   //        \ /
1883   //        Phi  . . .
1884   //     Bot |   /
1885   //       MergeMem
1886   //         |
1887   //   MemBarVolatile (trailing)
1888   //
1889   // 3)
1890   //   MemBarRelease/CPUOrder (leading)
1891   //    |
1892   //    |\
1893   //    | \
1894   //    |  \      . . .
1895   //    |   \       |
1896   //    |\   \  MemBarVolatile (card mark)
1897   //    | \   \   |     |
1898   //    |  \   \  |   StoreCM    . . .
1899   //    |   \   \ |
1900   //     \   \  Phi
1901   //      \   \ /  
1902   //       \  Phi
1903   //        \ /
1904   //        Phi  . . .
1905   //     Bot |   /
1906   //       MergeMem
1907   //         |
1908   //   MemBarVolatile (trailing)
1909   //
1910   // configuration 1 is only valid if UseConcMarkSweepGC &&
1911   // UseCondCardMark
1912   //
1913   // configurations 2 and 3 are only valid if UseG1GC.
1914   //
1915   // if a valid configuration is present returns the trailing membar
1916   // otherwise NULL.
1917   //
1918   // n.b. the supplied membar is expected to be a card mark
1919   // MemBarVolatile i.e. the caller must ensure the input node has the
1920   // correct operand and feeds Mem to a StoreCM node
1921 
1922   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier)
1923   {
1924     // input must be a card mark volatile membar
1925     assert(is_card_mark_membar(barrier), "expecting a card mark membar");
1926 
1927     Node *feed = barrier->proj_out(TypeFunc::Memory);
1928     Node *x;
1929     MergeMemNode *mm = NULL;
1930 
1931     const int MAX_PHIS = 3;     // max phis we will search through
1932     int phicount = 0;           // current search count
1933 
1934     bool retry_feed = true;
1935     while (retry_feed) {
1936       // see if we have a direct MergeMem feed
1937       for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
1938         x = feed->fast_out(i);
1939         // the correct Phi will be merging a Bot memory slice
1940         if (x->is_MergeMem()) {
1941           mm = x->as_MergeMem();
1942           break;
1943         }
1944       }
1945       if (mm) {
1946         retry_feed = false;
1947       } else if (UseG1GC & phicount++ < MAX_PHIS) {
1948         // the barrier may feed indirectly via one or two Phi nodes
1949         PhiNode *phi = NULL;
1950         for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
1951           x = feed->fast_out(i);
1952           // the correct Phi will be merging a Bot memory slice
1953           if (x->is_Phi() && x->adr_type() == TypePtr::BOTTOM) {
1954             phi = x->as_Phi();
1955             break;
1956           }
1957         }
1958         if (!phi)
1959           return NULL;
1960         // look for another merge below this phi
1961         feed = phi;
1962       } else {
1963         // couldn't find a merge
1964         return NULL;
1965       }
1966     }
1967 
1968     // sanity check this feed turns up as the expected slice
1969     assert(mm->as_MergeMem()->in(Compile::AliasIdxBot) == feed, "expecting membar to feed AliasIdxBot slice to Merge");
1970 
1971     MemBarNode *trailing = NULL;
1972     // be sure we have a volatile membar below the merge
1973     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
1974       x = mm->fast_out(i);
1975       if (x->is_MemBar() && x->Opcode() == Op_MemBarVolatile) {
1976         trailing = x->as_MemBar();
1977         break;
1978       }
1979     }
1980 
1981     return trailing;
1982   }
1983 
1984   // trailing_to_card_mark
1985   //
1986   // graph traversal helper which detects extra, non-normal Mem feed
1987   // from a trailing membar to a preceding card mark volatile membar
1988   // i.e. it identifies whether one of the three possible extra GC
1989   // post-write Mem flow subgraphs is present
1990   //
1991   // this predicate checks for the same flow as the previous predicate
1992   // but starting from the bottom rather than the top.
1993   //
1994   // if the configurationis present returns the card mark membar
1995   // otherwise NULL
1996 
1997   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing)
1998   {
1999     assert(!is_card_mark_membar(trailing), "not expecting a card mark membar");
2000 
2001     Node *x = trailing->in(TypeFunc::Memory);
2002     // the Mem feed to the membar should be a merge
2003     if (!x->is_MergeMem())
2004       return NULL;
2005 
2006     MergeMemNode *mm = x->as_MergeMem();
2007 
2008     x = mm->in(Compile::AliasIdxBot);
2009     // with G1 we may possibly see a Phi or two before we see a Memory
2010     // Proj from the card mark membar
2011 
2012     const int MAX_PHIS = 3;     // max phis we will search through
2013     int phicount = 0;           // current search count
2014 
2015     bool retry_feed = !x->is_Proj();
2016 
2017     while (retry_feed) {
2018       if (UseG1GC && x->is_Phi() && phicount++ < MAX_PHIS) {
2019         PhiNode *phi = x->as_Phi();
2020         ProjNode *proj = NULL;
2021         PhiNode *nextphi = NULL;
2022         bool found_leading = false;
2023         for (uint i = 1; i < phi->req(); i++) {
2024           x = phi->in(i);
2025           if (x->is_Phi()) {
2026             nextphi = x->as_Phi();
2027           } else if (x->is_Proj()) {
2028             int opcode = x->in(0)->Opcode();
2029             if (opcode == Op_MemBarVolatile) {
2030               proj = x->as_Proj();
2031             } else if (opcode == Op_MemBarRelease ||
2032                        opcode == Op_MemBarCPUOrder) {
2033               // probably a leading membar
2034               found_leading = true;
2035             }
2036           }
2037         }
2038         // if we found a correct looking proj then retry from there
2039         // otherwise we must see a leading and a phi or this the
2040         // wrong config
2041         if (proj != NULL) {
2042           x = proj;
2043           retry_feed = false;
2044         } else if (found_leading && nextphi != NULL) {
2045           // retry from this phi to check phi2
2046           x = nextphi;
2047         } else {
2048           // not what we were looking for
2049           return NULL;
2050         }
2051       } else {
2052         return NULL;
2053       }
2054     }
2055     // the proj has to come from the card mark membar
2056     x = x->in(0);
2057     if (!x->is_MemBar())
2058       return NULL;
2059 
2060     MemBarNode *card_mark_membar = x->as_MemBar();
2061 
2062     if (!is_card_mark_membar(card_mark_membar))
2063       return NULL;
2064 
2065     return card_mark_membar;
2066   }
2067 
2068   // trailing_to_leading
2069   //
2070   // graph traversal helper which checks the Mem flow up the graph
2071   // from a (non-card mark) volatile membar attempting to locate and
2072   // return an associated leading membar. it first looks for a
2073   // subgraph in the normal configuration (relying on helper
2074   // normal_to_leading). failing that it then looks for one of the
2075   // possible post-write card mark subgraphs linking the trailing node
2076   // to a the card mark membar (relying on helper
2077   // trailing_to_card_mark), and then checks that the card mark membar
2078   // is fed by a leading membar (once again relying on auxiliary
2079   // predicate normal_to_leading).
2080   //
2081   // if the configuration is valid returns the cpuorder member for
2082   // preference or when absent the release membar otherwise NULL.
2083   //
2084   // n.b. the input membar is expected to be a volatile membar but
2085   // must *not* be a card mark membar.
2086 
2087   MemBarNode *trailing_to_leading(const MemBarNode *trailing)
2088   {
2089     assert(!is_card_mark_membar(trailing), "not expecting a card mark membar");
2090 
2091     MemBarNode *leading = normal_to_leading(trailing);
2092 
2093     if (leading)
2094       return leading;
2095 
2096     MemBarNode *card_mark_membar = trailing_to_card_mark(trailing);
2097 
2098     if (!card_mark_membar)
2099       return NULL;
2100 
2101     return normal_to_leading(card_mark_membar);
2102   }
2103 
2104   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
2105 
2106 bool unnecessary_acquire(const Node *barrier)
2107 {
2108   // assert barrier->is_MemBar();
2109   if (UseBarriersForVolatile)
2110     // we need to plant a dmb
2111     return false;
2112 
2113   // a volatile read derived from bytecode (or also from an inlined
2114   // SHA field read via LibraryCallKit::load_field_from_object)
2115   // manifests as a LoadX[mo_acquire] followed by an acquire membar
2116   // with a bogus read dependency on it's preceding load. so in those
2117   // cases we will find the load node at the PARMS offset of the
2118   // acquire membar.  n.b. there may be an intervening DecodeN node.
2119   //
2120   // a volatile load derived from an inlined unsafe field access
2121   // manifests as a cpuorder membar with Ctl and Mem projections
2122   // feeding both an acquire membar and a LoadX[mo_acquire]. The
2123   // acquire then feeds another cpuorder membar via Ctl and Mem
2124   // projections. The load has no output dependency on these trailing
2125   // membars because subsequent nodes inserted into the graph take
2126   // their control feed from the final membar cpuorder meaning they
2127   // are all ordered after the load.
2128 
2129   Node *x = barrier->lookup(TypeFunc::Parms);
2130   if (x) {
2131     // we are starting from an acquire and it has a fake dependency
2132     //
2133     // need to check for
2134     //
2135     //   LoadX[mo_acquire]
2136     //   {  |1   }
2137     //   {DecodeN}
2138     //      |Parms
2139     //   MemBarAcquire*
2140     //
2141     // where * tags node we were passed
2142     // and |k means input k
2143     if (x->is_DecodeNarrowPtr())
2144       x = x->in(1);
2145 
2146     return (x->is_Load() && x->as_Load()->is_acquire());
2147   }
2148   
2149   // now check for an unsafe volatile get
2150 
2151   // need to check for
2152   //
2153   //   MemBarCPUOrder
2154   //        ||       \\
2155   //   MemBarAcquire* LoadX[mo_acquire]
2156   //        ||
2157   //   MemBarCPUOrder
2158   //
2159   // where * tags node we were passed
2160   // and || or \\ are Ctl+Mem feeds via intermediate Proj Nodes
2161 
2162   // check for a parent MemBarCPUOrder
2163   ProjNode *ctl;
2164   ProjNode *mem;
2165   MemBarNode *parent = parent_membar(barrier);
2166   if (!parent || parent->Opcode() != Op_MemBarCPUOrder)
2167     return false;
2168   ctl = parent->proj_out(TypeFunc::Control);
2169   mem = parent->proj_out(TypeFunc::Memory);
2170   if (!ctl || !mem)
2171     return false;
2172   // ensure the proj nodes both feed a LoadX[mo_acquire]
2173   LoadNode *ld = NULL;
2174   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
2175     x = ctl->fast_out(i);
2176     // if we see a load we keep hold of it and stop searching
2177     if (x->is_Load()) {
2178       ld = x->as_Load();
2179       break;
2180     }
2181   }
2182   // it must be an acquiring load
2183   if (! ld || ! ld->is_acquire())
2184     return false;
2185   for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2186     x = mem->fast_out(i);
2187     // if we see the same load we drop it and stop searching
2188     if (x == ld) {
2189       ld = NULL;
2190       break;
2191     }
2192   }
2193   // we must have dropped the load
2194   if (ld)
2195     return false;
2196   // check for a child cpuorder membar
2197   MemBarNode *child  = child_membar(barrier->as_MemBar());
2198   if (!child || child->Opcode() != Op_MemBarCPUOrder)
2199     return false;
2200 
2201   return true;
2202 }
2203 
2204 bool needs_acquiring_load(const Node *n)
2205 {
2206   // assert n->is_Load();
2207   if (UseBarriersForVolatile)
2208     // we use a normal load and a dmb
2209     return false;
2210 
2211   LoadNode *ld = n->as_Load();
2212 
2213   if (!ld->is_acquire())
2214     return false;
2215 
2216   // check if this load is feeding an acquire membar
2217   //
2218   //   LoadX[mo_acquire]
2219   //   {  |1   }
2220   //   {DecodeN}
2221   //      |Parms
2222   //   MemBarAcquire*
2223   //
2224   // where * tags node we were passed
2225   // and |k means input k
2226 
2227   Node *start = ld;
2228   Node *mbacq = NULL;
2229 
2230   // if we hit a DecodeNarrowPtr we reset the start node and restart
2231   // the search through the outputs
2232  restart:
2233 
2234   for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) {
2235     Node *x = start->fast_out(i);
2236     if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) {
2237       mbacq = x;
2238     } else if (!mbacq &&
2239                (x->is_DecodeNarrowPtr() ||
2240                 (x->is_Mach() && x->Opcode() == Op_DecodeN))) {
2241       start = x;
2242       goto restart;
2243     }
2244   }
2245 
2246   if (mbacq) {
2247     return true;
2248   }
2249 
2250   // now check for an unsafe volatile get
2251 
2252   // check if Ctl and Proj feed comes from a MemBarCPUOrder
2253   //
2254   //     MemBarCPUOrder
2255   //        ||       \\
2256   //   MemBarAcquire* LoadX[mo_acquire]
2257   //        ||
2258   //   MemBarCPUOrder
2259 
2260   MemBarNode *membar;
2261 
2262   membar = parent_membar(ld);
2263 
2264   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder)
2265     return false;
2266 
2267   // ensure that there is a CPUOrder->Acquire->CPUOrder membar chain
2268 
2269   membar = child_membar(membar);
2270 
2271   if (!membar || !membar->Opcode() == Op_MemBarAcquire)
2272     return false;
2273 
2274   membar = child_membar(membar);
2275   
2276   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder)
2277     return false;
2278 
2279   return true;
2280 }
2281 
2282 bool unnecessary_release(const Node *n)
2283 {
2284   assert((n->is_MemBar() &&
2285           n->Opcode() == Op_MemBarRelease),
2286          "expecting a release membar");
2287 
2288   if (UseBarriersForVolatile)
2289     // we need to plant a dmb
2290     return false;
2291 
2292   // if there is a dependent CPUOrder barrier then use that as the
2293   // leading
2294 
2295   MemBarNode *barrier = n->as_MemBar();
2296   // check for an intervening cpuorder membar
2297   MemBarNode *b = child_membar(barrier);
2298   if (b && b->Opcode() == Op_MemBarCPUOrder) {
2299     // ok, so start the check from the dependent cpuorder barrier
2300     barrier = b;
2301   }
2302 
2303   // must start with a normal feed
2304   MemBarNode *child_barrier = leading_to_normal(barrier);
2305 
2306   if (!child_barrier)
2307     return false;
2308 
2309   if (!is_card_mark_membar(child_barrier))
2310     // this is the trailing membar and we are done
2311     return true;
2312 
2313   // must be sure this card mark feeds a trailing membar
2314   MemBarNode *trailing = card_mark_to_trailing(child_barrier);
2315   return (trailing != NULL);
2316 }
2317 
2318 bool unnecessary_volatile(const Node *n)
2319 {
2320   // assert n->is_MemBar();
2321   if (UseBarriersForVolatile)
2322     // we need to plant a dmb
2323     return false;
2324 
2325   MemBarNode *mbvol = n->as_MemBar();
2326 
2327   // first we check if this is part of a card mark. if so then we have
2328   // to generate a StoreLoad barrier
2329   
2330   if (is_card_mark_membar(mbvol))
2331       return false;
2332 
2333   // ok, if it's not a card mark then we still need to check if it is
2334   // a trailing membar of a volatile put hgraph.
2335 
2336   return (trailing_to_leading(mbvol) != NULL);
2337 }
2338 
2339 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
2340 
2341 bool needs_releasing_store(const Node *n)
2342 {
2343   // assert n->is_Store();
2344   if (UseBarriersForVolatile)
2345     // we use a normal store and dmb combination
2346     return false;
2347 
2348   StoreNode *st = n->as_Store();
2349 
2350   // the store must be marked as releasing
2351   if (!st->is_release())
2352     return false;
2353 
2354   // the store must be fed by a membar
2355 
2356   Node *x = st->lookup(StoreNode::Memory);
2357 
2358   if (! x || !x->is_Proj())
2359     return false;
2360 
2361   ProjNode *proj = x->as_Proj();
2362 
2363   x = proj->lookup(0);
2364 
2365   if (!x || !x->is_MemBar())
2366     return false;
2367 
2368   MemBarNode *barrier = x->as_MemBar();
2369 
2370   // if the barrier is a release membar or a cpuorder mmebar fed by a
2371   // release membar then we need to check whether that forms part of a
2372   // volatile put graph.
2373 
2374   // reject invalid candidates
2375   if (!leading_membar(barrier))
2376     return false;
2377 
2378   // does this lead a normal subgraph?
2379   MemBarNode *mbvol = leading_to_normal(barrier);
2380 
2381   if (!mbvol)
2382     return false;
2383 
2384   // all done unless this is a card mark
2385   if (!is_card_mark_membar(mbvol))
2386     return true;
2387   
2388   // we found a card mark -- just make sure we have a trailing barrier
2389 
2390   return (card_mark_to_trailing(mbvol) != NULL);
2391 }
2392 
2393 // predicate controlling translation of StoreCM
2394 //
2395 // returns true if a StoreStore must precede the card write otherwise
2396 // false
2397 
2398 bool unnecessary_storestore(const Node *storecm)
2399 {
2400   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
2401 
2402   // we only ever need to generate a dmb ishst between an object put
2403   // and the associated card mark when we are using CMS without
2404   // conditional card marking
2405 
2406   if (!UseConcMarkSweepGC || UseCondCardMark)
2407     return true;
2408 
2409   // if we are implementing volatile puts using barriers then the
2410   // object put as an str so we must insert the dmb ishst
2411 
2412   if (UseBarriersForVolatile)
2413     return false;
2414 
2415   // we can omit the dmb ishst if this StoreCM is part of a volatile
2416   // put because in thta case the put will be implemented by stlr
2417   //
2418   // we need to check for a normal subgraph feeding this StoreCM.
2419   // that means the StoreCM must be fed Memory from a leading membar,
2420   // either a MemBarRelease or its dependent MemBarCPUOrder, and the
2421   // leading membar must be part of a normal subgraph
2422 
2423   Node *x = storecm->in(StoreNode::Memory);
2424 
2425   if (!x->is_Proj())
2426     return false;
2427 
2428   x = x->in(0);
2429 
2430   if (!x->is_MemBar())
2431     return false;
2432 
2433   MemBarNode *leading = x->as_MemBar();
2434 
2435   // reject invalid candidates
2436   if (!leading_membar(leading))
2437     return false;
2438 
2439   // we can omit the StoreStore if it is the head of a normal subgraph
2440   return (leading_to_normal(leading) != NULL);
2441 }
2442 
2443 
2444 #define __ _masm.
2445 
2446 // advance declarations for helper functions to convert register
2447 // indices to register objects
2448 
2449 // the ad file has to provide implementations of certain methods
2450 // expected by the generic code
2451 //
2452 // REQUIRED FUNCTIONALITY
2453 
2454 //=============================================================================
2455 
2456 // !!!!! Special hack to get all types of calls to specify the byte offset
2457 //       from the start of the call to the point where the return address
2458 //       will point.
2459 
2460 int MachCallStaticJavaNode::ret_addr_offset()
2461 {
2462   // call should be a simple bl
2463   int off = 4;
2464   return off;
2465 }
2466 
2467 int MachCallDynamicJavaNode::ret_addr_offset()
2468 {
2469   return 16; // movz, movk, movk, bl
2470 }
2471 
2472 int MachCallRuntimeNode::ret_addr_offset() {
2473   // for generated stubs the call will be
2474   //   far_call(addr)
2475   // for real runtime callouts it will be six instructions
2476   // see aarch64_enc_java_to_runtime
2477   //   adr(rscratch2, retaddr)
2478   //   lea(rscratch1, RuntimeAddress(addr)
2479   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
2480   //   blrt rscratch1
2481   CodeBlob *cb = CodeCache::find_blob(_entry_point);
2482   if (cb) {
2483     return MacroAssembler::far_branch_size();
2484   } else {
2485     return 6 * NativeInstruction::instruction_size;
2486   }
2487 }
2488 
2489 // Indicate if the safepoint node needs the polling page as an input
2490 
2491 // the shared code plants the oop data at the start of the generated
2492 // code for the safepoint node and that needs ot be at the load
2493 // instruction itself. so we cannot plant a mov of the safepoint poll
2494 // address followed by a load. setting this to true means the mov is
2495 // scheduled as a prior instruction. that's better for scheduling
2496 // anyway.
2497 
2498 bool SafePointNode::needs_polling_address_input()
2499 {
2500   return true;
2501 }
2502 
2503 //=============================================================================
2504 
2505 #ifndef PRODUCT
2506 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2507   st->print("BREAKPOINT");
2508 }
2509 #endif
2510 
2511 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2512   MacroAssembler _masm(&cbuf);
2513   __ brk(0);
2514 }
2515 
2516 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
2517   return MachNode::size(ra_);
2518 }
2519 
2520 //=============================================================================
2521 
2522 #ifndef PRODUCT
2523   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
2524     st->print("nop \t# %d bytes pad for loops and calls", _count);
2525   }
2526 #endif
2527 
2528   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
2529     MacroAssembler _masm(&cbuf);
2530     for (int i = 0; i < _count; i++) {
2531       __ nop();
2532     }
2533   }
2534 
2535   uint MachNopNode::size(PhaseRegAlloc*) const {
2536     return _count * NativeInstruction::instruction_size;
2537   }
2538 
2539 //=============================================================================
2540 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
2541 
2542 int Compile::ConstantTable::calculate_table_base_offset() const {
2543   return 0;  // absolute addressing, no offset
2544 }
2545 
2546 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
2547 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
2548   ShouldNotReachHere();
2549 }
2550 
2551 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
2552   // Empty encoding
2553 }
2554 
2555 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
2556   return 0;
2557 }
2558 
2559 #ifndef PRODUCT
2560 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
2561   st->print("-- \t// MachConstantBaseNode (empty encoding)");
2562 }
2563 #endif
2564 
2565 #ifndef PRODUCT
2566 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2567   Compile* C = ra_->C;
2568 
2569   int framesize = C->frame_slots() << LogBytesPerInt;
2570 
2571   if (C->need_stack_bang(framesize))
2572     st->print("# stack bang size=%d\n\t", framesize);
2573 
2574   if (framesize < ((1 << 9) + 2 * wordSize)) {
2575     st->print("sub  sp, sp, #%d\n\t", framesize);
2576     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
2577     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
2578   } else {
2579     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
2580     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
2581     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
2582     st->print("sub  sp, sp, rscratch1");
2583   }
2584 }
2585 #endif
2586 
2587 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2588   Compile* C = ra_->C;
2589   MacroAssembler _masm(&cbuf);
2590 
2591   // n.b. frame size includes space for return pc and rfp
2592   const long framesize = C->frame_size_in_bytes();
2593   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
2594 
2595   // insert a nop at the start of the prolog so we can patch in a
2596   // branch if we need to invalidate the method later
2597   __ nop();
2598 
2599   int bangsize = C->bang_size_in_bytes();
2600   if (C->need_stack_bang(bangsize) && UseStackBanging)
2601     __ generate_stack_overflow_check(bangsize);
2602 
2603   __ build_frame(framesize);
2604 
2605   if (NotifySimulator) {
2606     __ notify(Assembler::method_entry);
2607   }
2608 
2609   if (VerifyStackAtCalls) {
2610     Unimplemented();
2611   }
2612 
2613   C->set_frame_complete(cbuf.insts_size());
2614 
2615   if (C->has_mach_constant_base_node()) {
2616     // NOTE: We set the table base offset here because users might be
2617     // emitted before MachConstantBaseNode.
2618     Compile::ConstantTable& constant_table = C->constant_table();
2619     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
2620   }
2621 }
2622 
2623 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
2624 {
2625   return MachNode::size(ra_); // too many variables; just compute it
2626                               // the hard way
2627 }
2628 
2629 int MachPrologNode::reloc() const
2630 {
2631   return 0;
2632 }
2633 
2634 //=============================================================================
2635 
2636 #ifndef PRODUCT
2637 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2638   Compile* C = ra_->C;
2639   int framesize = C->frame_slots() << LogBytesPerInt;
2640 
2641   st->print("# pop frame %d\n\t",framesize);
2642 
2643   if (framesize == 0) {
2644     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
2645   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
2646     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
2647     st->print("add  sp, sp, #%d\n\t", framesize);
2648   } else {
2649     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
2650     st->print("add  sp, sp, rscratch1\n\t");
2651     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
2652   }
2653 
2654   if (do_polling() && C->is_method_compilation()) {
2655     st->print("# touch polling page\n\t");
2656     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
2657     st->print("ldr zr, [rscratch1]");
2658   }
2659 }
2660 #endif
2661 
2662 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2663   Compile* C = ra_->C;
2664   MacroAssembler _masm(&cbuf);
2665   int framesize = C->frame_slots() << LogBytesPerInt;
2666 
2667   __ remove_frame(framesize);
2668 
2669   if (NotifySimulator) {
2670     __ notify(Assembler::method_reentry);
2671   }
2672 
2673   if (do_polling() && C->is_method_compilation()) {
2674     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
2675   }
2676 }
2677 
2678 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
2679   // Variable size. Determine dynamically.
2680   return MachNode::size(ra_);
2681 }
2682 
2683 int MachEpilogNode::reloc() const {
2684   // Return number of relocatable values contained in this instruction.
2685   return 1; // 1 for polling page.
2686 }
2687 
2688 const Pipeline * MachEpilogNode::pipeline() const {
2689   return MachNode::pipeline_class();
2690 }
2691 
2692 // This method seems to be obsolete. It is declared in machnode.hpp
2693 // and defined in all *.ad files, but it is never called. Should we
2694 // get rid of it?
2695 int MachEpilogNode::safepoint_offset() const {
2696   assert(do_polling(), "no return for this epilog node");
2697   return 4;
2698 }
2699 
2700 //=============================================================================
2701 
2702 // Figure out which register class each belongs in: rc_int, rc_float or
2703 // rc_stack.
2704 enum RC { rc_bad, rc_int, rc_float, rc_stack };
2705 
2706 static enum RC rc_class(OptoReg::Name reg) {
2707 
2708   if (reg == OptoReg::Bad) {
2709     return rc_bad;
2710   }
2711 
2712   // we have 30 int registers * 2 halves
2713   // (rscratch1 and rscratch2 are omitted)
2714 
2715   if (reg < 60) {
2716     return rc_int;
2717   }
2718 
2719   // we have 32 float register * 2 halves
2720   if (reg < 60 + 128) {
2721     return rc_float;
2722   }
2723 
2724   // Between float regs & stack is the flags regs.
2725   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
2726 
2727   return rc_stack;
2728 }
2729 
2730 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
2731   Compile* C = ra_->C;
2732 
2733   // Get registers to move.
2734   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
2735   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
2736   OptoReg::Name dst_hi = ra_->get_reg_second(this);
2737   OptoReg::Name dst_lo = ra_->get_reg_first(this);
2738 
2739   enum RC src_hi_rc = rc_class(src_hi);
2740   enum RC src_lo_rc = rc_class(src_lo);
2741   enum RC dst_hi_rc = rc_class(dst_hi);
2742   enum RC dst_lo_rc = rc_class(dst_lo);
2743 
2744   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
2745 
2746   if (src_hi != OptoReg::Bad) {
2747     assert((src_lo&1)==0 && src_lo+1==src_hi &&
2748            (dst_lo&1)==0 && dst_lo+1==dst_hi,
2749            "expected aligned-adjacent pairs");
2750   }
2751 
2752   if (src_lo == dst_lo && src_hi == dst_hi) {
2753     return 0;            // Self copy, no move.
2754   }
2755 
2756   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
2757               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
2758   int src_offset = ra_->reg2offset(src_lo);
2759   int dst_offset = ra_->reg2offset(dst_lo);
2760 
2761   if (bottom_type()->isa_vect() != NULL) {
2762     uint ireg = ideal_reg();
2763     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
2764     if (cbuf) {
2765       MacroAssembler _masm(cbuf);
2766       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
2767       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
2768         // stack->stack
2769         assert((src_offset & 7) && (dst_offset & 7), "unaligned stack offset");
2770         if (ireg == Op_VecD) {
2771           __ unspill(rscratch1, true, src_offset);
2772           __ spill(rscratch1, true, dst_offset);
2773         } else {
2774           __ spill_copy128(src_offset, dst_offset);
2775         }
2776       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
2777         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2778                ireg == Op_VecD ? __ T8B : __ T16B,
2779                as_FloatRegister(Matcher::_regEncode[src_lo]));
2780       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
2781         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
2782                        ireg == Op_VecD ? __ D : __ Q,
2783                        ra_->reg2offset(dst_lo));
2784       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
2785         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2786                        ireg == Op_VecD ? __ D : __ Q,
2787                        ra_->reg2offset(src_lo));
2788       } else {
2789         ShouldNotReachHere();
2790       }
2791     }
2792   } else if (cbuf) {
2793     MacroAssembler _masm(cbuf);
2794     switch (src_lo_rc) {
2795     case rc_int:
2796       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
2797         if (is64) {
2798             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
2799                    as_Register(Matcher::_regEncode[src_lo]));
2800         } else {
2801             MacroAssembler _masm(cbuf);
2802             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
2803                     as_Register(Matcher::_regEncode[src_lo]));
2804         }
2805       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
2806         if (is64) {
2807             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2808                      as_Register(Matcher::_regEncode[src_lo]));
2809         } else {
2810             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2811                      as_Register(Matcher::_regEncode[src_lo]));
2812         }
2813       } else {                    // gpr --> stack spill
2814         assert(dst_lo_rc == rc_stack, "spill to bad register class");
2815         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
2816       }
2817       break;
2818     case rc_float:
2819       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
2820         if (is64) {
2821             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
2822                      as_FloatRegister(Matcher::_regEncode[src_lo]));
2823         } else {
2824             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
2825                      as_FloatRegister(Matcher::_regEncode[src_lo]));
2826         }
2827       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
2828           if (cbuf) {
2829             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2830                      as_FloatRegister(Matcher::_regEncode[src_lo]));
2831         } else {
2832             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2833                      as_FloatRegister(Matcher::_regEncode[src_lo]));
2834         }
2835       } else {                    // fpr --> stack spill
2836         assert(dst_lo_rc == rc_stack, "spill to bad register class");
2837         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
2838                  is64 ? __ D : __ S, dst_offset);
2839       }
2840       break;
2841     case rc_stack:
2842       if (dst_lo_rc == rc_int) {  // stack --> gpr load
2843         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
2844       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
2845         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2846                    is64 ? __ D : __ S, src_offset);
2847       } else {                    // stack --> stack copy
2848         assert(dst_lo_rc == rc_stack, "spill to bad register class");
2849         __ unspill(rscratch1, is64, src_offset);
2850         __ spill(rscratch1, is64, dst_offset);
2851       }
2852       break;
2853     default:
2854       assert(false, "bad rc_class for spill");
2855       ShouldNotReachHere();
2856     }
2857   }
2858 
2859   if (st) {
2860     st->print("spill ");
2861     if (src_lo_rc == rc_stack) {
2862       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
2863     } else {
2864       st->print("%s -> ", Matcher::regName[src_lo]);
2865     }
2866     if (dst_lo_rc == rc_stack) {
2867       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
2868     } else {
2869       st->print("%s", Matcher::regName[dst_lo]);
2870     }
2871     if (bottom_type()->isa_vect() != NULL) {
2872       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
2873     } else {
2874       st->print("\t# spill size = %d", is64 ? 64:32);
2875     }
2876   }
2877 
2878   return 0;
2879 
2880 }
2881 
2882 #ifndef PRODUCT
2883 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2884   if (!ra_)
2885     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
2886   else
2887     implementation(NULL, ra_, false, st);
2888 }
2889 #endif
2890 
2891 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2892   implementation(&cbuf, ra_, false, NULL);
2893 }
2894 
2895 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
2896   return MachNode::size(ra_);
2897 }
2898 
2899 //=============================================================================
2900 
2901 #ifndef PRODUCT
2902 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2903   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
2904   int reg = ra_->get_reg_first(this);
2905   st->print("add %s, rsp, #%d]\t# box lock",
2906             Matcher::regName[reg], offset);
2907 }
2908 #endif
2909 
2910 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2911   MacroAssembler _masm(&cbuf);
2912 
2913   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
2914   int reg    = ra_->get_encode(this);
2915 
2916   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
2917     __ add(as_Register(reg), sp, offset);
2918   } else {
2919     ShouldNotReachHere();
2920   }
2921 }
2922 
2923 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
2924   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
2925   return 4;
2926 }
2927 
2928 //=============================================================================
2929 
2930 #ifndef PRODUCT
2931 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
2932 {
2933   st->print_cr("# MachUEPNode");
2934   if (UseCompressedClassPointers) {
2935     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
2936     if (Universe::narrow_klass_shift() != 0) {
2937       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
2938     }
2939   } else {
2940    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
2941   }
2942   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
2943   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
2944 }
2945 #endif
2946 
2947 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
2948 {
2949   // This is the unverified entry point.
2950   MacroAssembler _masm(&cbuf);
2951 
2952   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
2953   Label skip;
2954   // TODO
2955   // can we avoid this skip and still use a reloc?
2956   __ br(Assembler::EQ, skip);
2957   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
2958   __ bind(skip);
2959 }
2960 
2961 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
2962 {
2963   return MachNode::size(ra_);
2964 }
2965 
2966 // REQUIRED EMIT CODE
2967 
2968 //=============================================================================
2969 
2970 // Emit exception handler code.
2971 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
2972 {
2973   // mov rscratch1 #exception_blob_entry_point
2974   // br rscratch1
2975   // Note that the code buffer's insts_mark is always relative to insts.
2976   // That's why we must use the macroassembler to generate a handler.
2977   MacroAssembler _masm(&cbuf);
2978   address base = __ start_a_stub(size_exception_handler());
2979   if (base == NULL) {
2980     ciEnv::current()->record_failure("CodeCache is full");
2981     return 0;  // CodeBuffer::expand failed
2982   }
2983   int offset = __ offset();
2984   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
2985   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
2986   __ end_a_stub();
2987   return offset;
2988 }
2989 
2990 // Emit deopt handler code.
2991 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
2992 {
2993   // Note that the code buffer's insts_mark is always relative to insts.
2994   // That's why we must use the macroassembler to generate a handler.
2995   MacroAssembler _masm(&cbuf);
2996   address base = __ start_a_stub(size_deopt_handler());
2997   if (base == NULL) {
2998     ciEnv::current()->record_failure("CodeCache is full");
2999     return 0;  // CodeBuffer::expand failed
3000   }
3001   int offset = __ offset();
3002 
3003   __ adr(lr, __ pc());
3004   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
3005 
3006   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
3007   __ end_a_stub();
3008   return offset;
3009 }
3010 
3011 // REQUIRED MATCHER CODE
3012 
3013 //=============================================================================
3014 
3015 const bool Matcher::match_rule_supported(int opcode) {
3016 
3017   // TODO
3018   // identify extra cases that we might want to provide match rules for
3019   // e.g. Op_StrEquals and other intrinsics
3020   if (!has_match_rule(opcode)) {
3021     return false;
3022   }
3023 
3024   return true;  // Per default match rules are supported.
3025 }
3026 
3027 const int Matcher::float_pressure_scale(void) {
3028   return 1;
3029 }
3030 
3031 int Matcher::regnum_to_fpu_offset(int regnum)
3032 {
3033   Unimplemented();
3034   return 0;
3035 }
3036 
3037 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset)
3038 {
3039   Unimplemented();
3040   return false;
3041 }
3042 
3043 const bool Matcher::isSimpleConstant64(jlong value) {
3044   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
3045   // Probably always true, even if a temp register is required.
3046   return true;
3047 }
3048 
3049 // true just means we have fast l2f conversion
3050 const bool Matcher::convL2FSupported(void) {
3051   return true;
3052 }
3053 
3054 // Vector width in bytes.
3055 const int Matcher::vector_width_in_bytes(BasicType bt) {
3056   int size = MIN2(16,(int)MaxVectorSize);
3057   // Minimum 2 values in vector
3058   if (size < 2*type2aelembytes(bt)) size = 0;
3059   // But never < 4
3060   if (size < 4) size = 0;
3061   return size;
3062 }
3063 
3064 // Limits on vector size (number of elements) loaded into vector.
3065 const int Matcher::max_vector_size(const BasicType bt) {
3066   return vector_width_in_bytes(bt)/type2aelembytes(bt);
3067 }
3068 const int Matcher::min_vector_size(const BasicType bt) {
3069 //  For the moment limit the vector size to 8 bytes
3070     int size = 8 / type2aelembytes(bt);
3071     if (size < 2) size = 2;
3072     return size;
3073 }
3074 
3075 // Vector ideal reg.
3076 const int Matcher::vector_ideal_reg(int len) {
3077   switch(len) {
3078     case  8: return Op_VecD;
3079     case 16: return Op_VecX;
3080   }
3081   ShouldNotReachHere();
3082   return 0;
3083 }
3084 
3085 const int Matcher::vector_shift_count_ideal_reg(int size) {
3086   return Op_VecX;
3087 }
3088 
3089 // AES support not yet implemented
3090 const bool Matcher::pass_original_key_for_aes() {
3091   return false;
3092 }
3093 
3094 // x86 supports misaligned vectors store/load.
3095 const bool Matcher::misaligned_vectors_ok() {
3096   return !AlignVector; // can be changed by flag
3097 }
3098 
3099 // false => size gets scaled to BytesPerLong, ok.
3100 const bool Matcher::init_array_count_is_in_bytes = false;
3101 
3102 // Threshold size for cleararray.
3103 const int Matcher::init_array_short_size = 18 * BytesPerLong;
3104 
3105 // Use conditional move (CMOVL)
3106 const int Matcher::long_cmove_cost() {
3107   // long cmoves are no more expensive than int cmoves
3108   return 0;
3109 }
3110 
3111 const int Matcher::float_cmove_cost() {
3112   // float cmoves are no more expensive than int cmoves
3113   return 0;
3114 }
3115 
3116 // Does the CPU require late expand (see block.cpp for description of late expand)?
3117 const bool Matcher::require_postalloc_expand = false;
3118 
3119 // Should the Matcher clone shifts on addressing modes, expecting them
3120 // to be subsumed into complex addressing expressions or compute them
3121 // into registers?  True for Intel but false for most RISCs
3122 const bool Matcher::clone_shift_expressions = false;
3123 
3124 // Do we need to mask the count passed to shift instructions or does
3125 // the cpu only look at the lower 5/6 bits anyway?
3126 const bool Matcher::need_masked_shift_count = false;
3127 
3128 // This affects two different things:
3129 //  - how Decode nodes are matched
3130 //  - how ImplicitNullCheck opportunities are recognized
3131 // If true, the matcher will try to remove all Decodes and match them
3132 // (as operands) into nodes. NullChecks are not prepared to deal with
3133 // Decodes by final_graph_reshaping().
3134 // If false, final_graph_reshaping() forces the decode behind the Cmp
3135 // for a NullCheck. The matcher matches the Decode node into a register.
3136 // Implicit_null_check optimization moves the Decode along with the
3137 // memory operation back up before the NullCheck.
3138 bool Matcher::narrow_oop_use_complex_address() {
3139   return Universe::narrow_oop_shift() == 0;
3140 }
3141 
3142 bool Matcher::narrow_klass_use_complex_address() {
3143 // TODO
3144 // decide whether we need to set this to true
3145   return false;
3146 }
3147 
3148 // Is it better to copy float constants, or load them directly from
3149 // memory?  Intel can load a float constant from a direct address,
3150 // requiring no extra registers.  Most RISCs will have to materialize
3151 // an address into a register first, so they would do better to copy
3152 // the constant from stack.
3153 const bool Matcher::rematerialize_float_constants = false;
3154 
3155 // If CPU can load and store mis-aligned doubles directly then no
3156 // fixup is needed.  Else we split the double into 2 integer pieces
3157 // and move it piece-by-piece.  Only happens when passing doubles into
3158 // C code as the Java calling convention forces doubles to be aligned.
3159 const bool Matcher::misaligned_doubles_ok = true;
3160 
3161 // No-op on amd64
3162 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
3163   Unimplemented();
3164 }
3165 
3166 // Advertise here if the CPU requires explicit rounding operations to
3167 // implement the UseStrictFP mode.
3168 const bool Matcher::strict_fp_requires_explicit_rounding = false;
3169 
3170 // Are floats converted to double when stored to stack during
3171 // deoptimization?
3172 bool Matcher::float_in_double() { return true; }
3173 
3174 // Do ints take an entire long register or just half?
3175 // The relevant question is how the int is callee-saved:
3176 // the whole long is written but de-opt'ing will have to extract
3177 // the relevant 32 bits.
3178 const bool Matcher::int_in_long = true;
3179 
3180 // Return whether or not this register is ever used as an argument.
3181 // This function is used on startup to build the trampoline stubs in
3182 // generateOptoStub.  Registers not mentioned will be killed by the VM
3183 // call in the trampoline, and arguments in those registers not be
3184 // available to the callee.
3185 bool Matcher::can_be_java_arg(int reg)
3186 {
3187   return
3188     reg ==  R0_num || reg == R0_H_num ||
3189     reg ==  R1_num || reg == R1_H_num ||
3190     reg ==  R2_num || reg == R2_H_num ||
3191     reg ==  R3_num || reg == R3_H_num ||
3192     reg ==  R4_num || reg == R4_H_num ||
3193     reg ==  R5_num || reg == R5_H_num ||
3194     reg ==  R6_num || reg == R6_H_num ||
3195     reg ==  R7_num || reg == R7_H_num ||
3196     reg ==  V0_num || reg == V0_H_num ||
3197     reg ==  V1_num || reg == V1_H_num ||
3198     reg ==  V2_num || reg == V2_H_num ||
3199     reg ==  V3_num || reg == V3_H_num ||
3200     reg ==  V4_num || reg == V4_H_num ||
3201     reg ==  V5_num || reg == V5_H_num ||
3202     reg ==  V6_num || reg == V6_H_num ||
3203     reg ==  V7_num || reg == V7_H_num;
3204 }
3205 
3206 bool Matcher::is_spillable_arg(int reg)
3207 {
3208   return can_be_java_arg(reg);
3209 }
3210 
3211 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
3212   return false;
3213 }
3214 
3215 RegMask Matcher::divI_proj_mask() {
3216   ShouldNotReachHere();
3217   return RegMask();
3218 }
3219 
3220 // Register for MODI projection of divmodI.
3221 RegMask Matcher::modI_proj_mask() {
3222   ShouldNotReachHere();
3223   return RegMask();
3224 }
3225 
3226 // Register for DIVL projection of divmodL.
3227 RegMask Matcher::divL_proj_mask() {
3228   ShouldNotReachHere();
3229   return RegMask();
3230 }
3231 
3232 // Register for MODL projection of divmodL.
3233 RegMask Matcher::modL_proj_mask() {
3234   ShouldNotReachHere();
3235   return RegMask();
3236 }
3237 
3238 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
3239   return FP_REG_mask();
3240 }
3241 
3242 // helper for encoding java_to_runtime calls on sim
3243 //
3244 // this is needed to compute the extra arguments required when
3245 // planting a call to the simulator blrt instruction. the TypeFunc
3246 // can be queried to identify the counts for integral, and floating
3247 // arguments and the return type
3248 
3249 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
3250 {
3251   int gps = 0;
3252   int fps = 0;
3253   const TypeTuple *domain = tf->domain();
3254   int max = domain->cnt();
3255   for (int i = TypeFunc::Parms; i < max; i++) {
3256     const Type *t = domain->field_at(i);
3257     switch(t->basic_type()) {
3258     case T_FLOAT:
3259     case T_DOUBLE:
3260       fps++;
3261     default:
3262       gps++;
3263     }
3264   }
3265   gpcnt = gps;
3266   fpcnt = fps;
3267   BasicType rt = tf->return_type();
3268   switch (rt) {
3269   case T_VOID:
3270     rtype = MacroAssembler::ret_type_void;
3271     break;
3272   default:
3273     rtype = MacroAssembler::ret_type_integral;
3274     break;
3275   case T_FLOAT:
3276     rtype = MacroAssembler::ret_type_float;
3277     break;
3278   case T_DOUBLE:
3279     rtype = MacroAssembler::ret_type_double;
3280     break;
3281   }
3282 }
3283 
3284 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
3285   MacroAssembler _masm(&cbuf);                                          \
3286   {                                                                     \
3287     guarantee(INDEX == -1, "mode not permitted for volatile");          \
3288     guarantee(DISP == 0, "mode not permitted for volatile");            \
3289     guarantee(SCALE == 0, "mode not permitted for volatile");           \
3290     __ INSN(REG, as_Register(BASE));                                    \
3291   }
3292 
3293 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
3294 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
3295 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
3296                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
3297 
3298   // Used for all non-volatile memory accesses.  The use of
3299   // $mem->opcode() to discover whether this pattern uses sign-extended
3300   // offsets is something of a kludge.
3301   static void loadStore(MacroAssembler masm, mem_insn insn,
3302                          Register reg, int opcode,
3303                          Register base, int index, int size, int disp)
3304   {
3305     Address::extend scale;
3306 
3307     // Hooboy, this is fugly.  We need a way to communicate to the
3308     // encoder that the index needs to be sign extended, so we have to
3309     // enumerate all the cases.
3310     switch (opcode) {
3311     case INDINDEXSCALEDOFFSETI2L:
3312     case INDINDEXSCALEDI2L:
3313     case INDINDEXSCALEDOFFSETI2LN:
3314     case INDINDEXSCALEDI2LN:
3315     case INDINDEXOFFSETI2L:
3316     case INDINDEXOFFSETI2LN:
3317       scale = Address::sxtw(size);
3318       break;
3319     default:
3320       scale = Address::lsl(size);
3321     }
3322 
3323     if (index == -1) {
3324       (masm.*insn)(reg, Address(base, disp));
3325     } else {
3326       if (disp == 0) {
3327         (masm.*insn)(reg, Address(base, as_Register(index), scale));
3328       } else {
3329         masm.lea(rscratch1, Address(base, disp));
3330         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
3331       }
3332     }
3333   }
3334 
3335   static void loadStore(MacroAssembler masm, mem_float_insn insn,
3336                          FloatRegister reg, int opcode,
3337                          Register base, int index, int size, int disp)
3338   {
3339     Address::extend scale;
3340 
3341     switch (opcode) {
3342     case INDINDEXSCALEDOFFSETI2L:
3343     case INDINDEXSCALEDI2L:
3344     case INDINDEXSCALEDOFFSETI2LN:
3345     case INDINDEXSCALEDI2LN:
3346       scale = Address::sxtw(size);
3347       break;
3348     default:
3349       scale = Address::lsl(size);
3350     }
3351 
3352      if (index == -1) {
3353       (masm.*insn)(reg, Address(base, disp));
3354     } else {
3355       if (disp == 0) {
3356         (masm.*insn)(reg, Address(base, as_Register(index), scale));
3357       } else {
3358         masm.lea(rscratch1, Address(base, disp));
3359         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
3360       }
3361     }
3362   }
3363 
3364   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
3365                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
3366                          int opcode, Register base, int index, int size, int disp)
3367   {
3368     if (index == -1) {
3369       (masm.*insn)(reg, T, Address(base, disp));
3370     } else {
3371       assert(disp == 0, "unsupported address mode");
3372       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
3373     }
3374   }
3375 
3376 %}
3377 
3378 
3379 
3380 //----------ENCODING BLOCK-----------------------------------------------------
3381 // This block specifies the encoding classes used by the compiler to
3382 // output byte streams.  Encoding classes are parameterized macros
3383 // used by Machine Instruction Nodes in order to generate the bit
3384 // encoding of the instruction.  Operands specify their base encoding
3385 // interface with the interface keyword.  There are currently
3386 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
3387 // COND_INTER.  REG_INTER causes an operand to generate a function
3388 // which returns its register number when queried.  CONST_INTER causes
3389 // an operand to generate a function which returns the value of the
3390 // constant when queried.  MEMORY_INTER causes an operand to generate
3391 // four functions which return the Base Register, the Index Register,
3392 // the Scale Value, and the Offset Value of the operand when queried.
3393 // COND_INTER causes an operand to generate six functions which return
3394 // the encoding code (ie - encoding bits for the instruction)
3395 // associated with each basic boolean condition for a conditional
3396 // instruction.
3397 //
3398 // Instructions specify two basic values for encoding.  Again, a
3399 // function is available to check if the constant displacement is an
3400 // oop. They use the ins_encode keyword to specify their encoding
3401 // classes (which must be a sequence of enc_class names, and their
3402 // parameters, specified in the encoding block), and they use the
3403 // opcode keyword to specify, in order, their primary, secondary, and
3404 // tertiary opcode.  Only the opcode sections which a particular
3405 // instruction needs for encoding need to be specified.
3406 encode %{
3407   // Build emit functions for each basic byte or larger field in the
3408   // intel encoding scheme (opcode, rm, sib, immediate), and call them
3409   // from C++ code in the enc_class source block.  Emit functions will
3410   // live in the main source block for now.  In future, we can
3411   // generalize this by adding a syntax that specifies the sizes of
3412   // fields in an order, so that the adlc can build the emit functions
3413   // automagically
3414 
3415   // catch all for unimplemented encodings
3416   enc_class enc_unimplemented %{
3417     MacroAssembler _masm(&cbuf);
3418     __ unimplemented("C2 catch all");
3419   %}
3420 
3421   // BEGIN Non-volatile memory access
3422 
3423   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
3424     Register dst_reg = as_Register($dst$$reg);
3425     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
3426                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3427   %}
3428 
3429   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
3430     Register dst_reg = as_Register($dst$$reg);
3431     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
3432                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3433   %}
3434 
3435   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
3436     Register dst_reg = as_Register($dst$$reg);
3437     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3438                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3439   %}
3440 
3441   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
3442     Register dst_reg = as_Register($dst$$reg);
3443     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3444                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3445   %}
3446 
3447   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
3448     Register dst_reg = as_Register($dst$$reg);
3449     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
3450                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3451   %}
3452 
3453   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
3454     Register dst_reg = as_Register($dst$$reg);
3455     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
3456                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3457   %}
3458 
3459   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
3460     Register dst_reg = as_Register($dst$$reg);
3461     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3462                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3463   %}
3464 
3465   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
3466     Register dst_reg = as_Register($dst$$reg);
3467     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3468                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3469   %}
3470 
3471   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
3472     Register dst_reg = as_Register($dst$$reg);
3473     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3474                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3475   %}
3476 
3477   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
3478     Register dst_reg = as_Register($dst$$reg);
3479     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3480                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3481   %}
3482 
3483   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
3484     Register dst_reg = as_Register($dst$$reg);
3485     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
3486                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3487   %}
3488 
3489   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
3490     Register dst_reg = as_Register($dst$$reg);
3491     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
3492                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3493   %}
3494 
3495   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
3496     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3497     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
3498                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3499   %}
3500 
3501   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
3502     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3503     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
3504                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3505   %}
3506 
3507   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
3508     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3509     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
3510        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3511   %}
3512 
3513   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
3514     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3515     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
3516        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3517   %}
3518 
3519   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
3520     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3521     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
3522        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3523   %}
3524 
3525   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
3526     Register src_reg = as_Register($src$$reg);
3527     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
3528                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3529   %}
3530 
3531   enc_class aarch64_enc_strb0(memory mem) %{
3532     MacroAssembler _masm(&cbuf);
3533     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3534                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3535   %}
3536 
3537   enc_class aarch64_enc_strb0_ordered(memory mem) %{
3538     MacroAssembler _masm(&cbuf);
3539     __ membar(Assembler::StoreStore);
3540     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3541                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3542   %}
3543 
3544   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
3545     Register src_reg = as_Register($src$$reg);
3546     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
3547                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3548   %}
3549 
3550   enc_class aarch64_enc_strh0(memory mem) %{
3551     MacroAssembler _masm(&cbuf);
3552     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
3553                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3554   %}
3555 
3556   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
3557     Register src_reg = as_Register($src$$reg);
3558     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
3559                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3560   %}
3561 
3562   enc_class aarch64_enc_strw0(memory mem) %{
3563     MacroAssembler _masm(&cbuf);
3564     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
3565                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3566   %}
3567 
3568   enc_class aarch64_enc_str(iRegL src, memory mem) %{
3569     Register src_reg = as_Register($src$$reg);
3570     // we sometimes get asked to store the stack pointer into the
3571     // current thread -- we cannot do that directly on AArch64
3572     if (src_reg == r31_sp) {
3573       MacroAssembler _masm(&cbuf);
3574       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
3575       __ mov(rscratch2, sp);
3576       src_reg = rscratch2;
3577     }
3578     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
3579                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3580   %}
3581 
3582   enc_class aarch64_enc_str0(memory mem) %{
3583     MacroAssembler _masm(&cbuf);
3584     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
3585                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3586   %}
3587 
3588   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
3589     FloatRegister src_reg = as_FloatRegister($src$$reg);
3590     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
3591                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3592   %}
3593 
3594   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
3595     FloatRegister src_reg = as_FloatRegister($src$$reg);
3596     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
3597                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3598   %}
3599 
3600   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
3601     FloatRegister src_reg = as_FloatRegister($src$$reg);
3602     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
3603        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3604   %}
3605 
3606   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
3607     FloatRegister src_reg = as_FloatRegister($src$$reg);
3608     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
3609        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3610   %}
3611 
3612   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
3613     FloatRegister src_reg = as_FloatRegister($src$$reg);
3614     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
3615        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3616   %}
3617 
3618   // END Non-volatile memory access
3619 
3620   // volatile loads and stores
3621 
3622   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
3623     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3624                  rscratch1, stlrb);
3625   %}
3626 
3627   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
3628     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3629                  rscratch1, stlrh);
3630   %}
3631 
3632   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
3633     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3634                  rscratch1, stlrw);
3635   %}
3636 
3637 
3638   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
3639     Register dst_reg = as_Register($dst$$reg);
3640     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3641              rscratch1, ldarb);
3642     __ sxtbw(dst_reg, dst_reg);
3643   %}
3644 
3645   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
3646     Register dst_reg = as_Register($dst$$reg);
3647     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3648              rscratch1, ldarb);
3649     __ sxtb(dst_reg, dst_reg);
3650   %}
3651 
3652   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
3653     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3654              rscratch1, ldarb);
3655   %}
3656 
3657   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
3658     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3659              rscratch1, ldarb);
3660   %}
3661 
3662   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
3663     Register dst_reg = as_Register($dst$$reg);
3664     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3665              rscratch1, ldarh);
3666     __ sxthw(dst_reg, dst_reg);
3667   %}
3668 
3669   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
3670     Register dst_reg = as_Register($dst$$reg);
3671     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3672              rscratch1, ldarh);
3673     __ sxth(dst_reg, dst_reg);
3674   %}
3675 
3676   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
3677     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3678              rscratch1, ldarh);
3679   %}
3680 
3681   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
3682     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3683              rscratch1, ldarh);
3684   %}
3685 
3686   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
3687     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3688              rscratch1, ldarw);
3689   %}
3690 
3691   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
3692     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3693              rscratch1, ldarw);
3694   %}
3695 
3696   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
3697     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3698              rscratch1, ldar);
3699   %}
3700 
3701   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
3702     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3703              rscratch1, ldarw);
3704     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
3705   %}
3706 
3707   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
3708     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3709              rscratch1, ldar);
3710     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
3711   %}
3712 
3713   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
3714     Register src_reg = as_Register($src$$reg);
3715     // we sometimes get asked to store the stack pointer into the
3716     // current thread -- we cannot do that directly on AArch64
3717     if (src_reg == r31_sp) {
3718         MacroAssembler _masm(&cbuf);
3719       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
3720       __ mov(rscratch2, sp);
3721       src_reg = rscratch2;
3722     }
3723     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3724                  rscratch1, stlr);
3725   %}
3726 
3727   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
3728     {
3729       MacroAssembler _masm(&cbuf);
3730       FloatRegister src_reg = as_FloatRegister($src$$reg);
3731       __ fmovs(rscratch2, src_reg);
3732     }
3733     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3734                  rscratch1, stlrw);
3735   %}
3736 
3737   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
3738     {
3739       MacroAssembler _masm(&cbuf);
3740       FloatRegister src_reg = as_FloatRegister($src$$reg);
3741       __ fmovd(rscratch2, src_reg);
3742     }
3743     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3744                  rscratch1, stlr);
3745   %}
3746 
3747   // synchronized read/update encodings
3748 
3749   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
3750     MacroAssembler _masm(&cbuf);
3751     Register dst_reg = as_Register($dst$$reg);
3752     Register base = as_Register($mem$$base);
3753     int index = $mem$$index;
3754     int scale = $mem$$scale;
3755     int disp = $mem$$disp;
3756     if (index == -1) {
3757        if (disp != 0) {
3758         __ lea(rscratch1, Address(base, disp));
3759         __ ldaxr(dst_reg, rscratch1);
3760       } else {
3761         // TODO
3762         // should we ever get anything other than this case?
3763         __ ldaxr(dst_reg, base);
3764       }
3765     } else {
3766       Register index_reg = as_Register(index);
3767       if (disp == 0) {
3768         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
3769         __ ldaxr(dst_reg, rscratch1);
3770       } else {
3771         __ lea(rscratch1, Address(base, disp));
3772         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
3773         __ ldaxr(dst_reg, rscratch1);
3774       }
3775     }
3776   %}
3777 
3778   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
3779     MacroAssembler _masm(&cbuf);
3780     Register src_reg = as_Register($src$$reg);
3781     Register base = as_Register($mem$$base);
3782     int index = $mem$$index;
3783     int scale = $mem$$scale;
3784     int disp = $mem$$disp;
3785     if (index == -1) {
3786        if (disp != 0) {
3787         __ lea(rscratch2, Address(base, disp));
3788         __ stlxr(rscratch1, src_reg, rscratch2);
3789       } else {
3790         // TODO
3791         // should we ever get anything other than this case?
3792         __ stlxr(rscratch1, src_reg, base);
3793       }
3794     } else {
3795       Register index_reg = as_Register(index);
3796       if (disp == 0) {
3797         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
3798         __ stlxr(rscratch1, src_reg, rscratch2);
3799       } else {
3800         __ lea(rscratch2, Address(base, disp));
3801         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
3802         __ stlxr(rscratch1, src_reg, rscratch2);
3803       }
3804     }
3805     __ cmpw(rscratch1, zr);
3806   %}
3807 
3808   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
3809     MacroAssembler _masm(&cbuf);
3810     Register old_reg = as_Register($oldval$$reg);
3811     Register new_reg = as_Register($newval$$reg);
3812     Register base = as_Register($mem$$base);
3813     Register addr_reg;
3814     int index = $mem$$index;
3815     int scale = $mem$$scale;
3816     int disp = $mem$$disp;
3817     if (index == -1) {
3818        if (disp != 0) {
3819         __ lea(rscratch2, Address(base, disp));
3820         addr_reg = rscratch2;
3821       } else {
3822         // TODO
3823         // should we ever get anything other than this case?
3824         addr_reg = base;
3825       }
3826     } else {
3827       Register index_reg = as_Register(index);
3828       if (disp == 0) {
3829         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
3830         addr_reg = rscratch2;
3831       } else {
3832         __ lea(rscratch2, Address(base, disp));
3833         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
3834         addr_reg = rscratch2;
3835       }
3836     }
3837     Label retry_load, done;
3838     __ bind(retry_load);
3839     __ ldxr(rscratch1, addr_reg);
3840     __ cmp(rscratch1, old_reg);
3841     __ br(Assembler::NE, done);
3842     __ stlxr(rscratch1, new_reg, addr_reg);
3843     __ cbnzw(rscratch1, retry_load);
3844     __ bind(done);
3845   %}
3846 
3847   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3848     MacroAssembler _masm(&cbuf);
3849     Register old_reg = as_Register($oldval$$reg);
3850     Register new_reg = as_Register($newval$$reg);
3851     Register base = as_Register($mem$$base);
3852     Register addr_reg;
3853     int index = $mem$$index;
3854     int scale = $mem$$scale;
3855     int disp = $mem$$disp;
3856     if (index == -1) {
3857        if (disp != 0) {
3858         __ lea(rscratch2, Address(base, disp));
3859         addr_reg = rscratch2;
3860       } else {
3861         // TODO
3862         // should we ever get anything other than this case?
3863         addr_reg = base;
3864       }
3865     } else {
3866       Register index_reg = as_Register(index);
3867       if (disp == 0) {
3868         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
3869         addr_reg = rscratch2;
3870       } else {
3871         __ lea(rscratch2, Address(base, disp));
3872         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
3873         addr_reg = rscratch2;
3874       }
3875     }
3876     Label retry_load, done;
3877     __ bind(retry_load);
3878     __ ldxrw(rscratch1, addr_reg);
3879     __ cmpw(rscratch1, old_reg);
3880     __ br(Assembler::NE, done);
3881     __ stlxrw(rscratch1, new_reg, addr_reg);
3882     __ cbnzw(rscratch1, retry_load);
3883     __ bind(done);
3884   %}
3885 
3886   // auxiliary used for CompareAndSwapX to set result register
3887   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
3888     MacroAssembler _masm(&cbuf);
3889     Register res_reg = as_Register($res$$reg);
3890     __ cset(res_reg, Assembler::EQ);
3891   %}
3892 
3893   // prefetch encodings
3894 
3895   enc_class aarch64_enc_prefetchw(memory mem) %{
3896     MacroAssembler _masm(&cbuf);
3897     Register base = as_Register($mem$$base);
3898     int index = $mem$$index;
3899     int scale = $mem$$scale;
3900     int disp = $mem$$disp;
3901     if (index == -1) {
3902       __ prfm(Address(base, disp), PSTL1KEEP);
3903       __ nop();
3904     } else {
3905       Register index_reg = as_Register(index);
3906       if (disp == 0) {
3907         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
3908       } else {
3909         __ lea(rscratch1, Address(base, disp));
3910         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
3911       }
3912     }
3913   %}
3914 
3915   enc_class aarch64_enc_clear_array_reg_reg(iRegL_R11 cnt, iRegP_R10 base) %{
3916     MacroAssembler _masm(&cbuf);
3917     Register cnt_reg = as_Register($cnt$$reg);
3918     Register base_reg = as_Register($base$$reg);
3919     // base is word aligned
3920     // cnt is count of words
3921 
3922     Label loop;
3923     Label entry;
3924 
3925 //  Algorithm:
3926 //
3927 //    scratch1 = cnt & 7;
3928 //    cnt -= scratch1;
3929 //    p += scratch1;
3930 //    switch (scratch1) {
3931 //      do {
3932 //        cnt -= 8;
3933 //          p[-8] = 0;
3934 //        case 7:
3935 //          p[-7] = 0;
3936 //        case 6:
3937 //          p[-6] = 0;
3938 //          // ...
3939 //        case 1:
3940 //          p[-1] = 0;
3941 //        case 0:
3942 //          p += 8;
3943 //      } while (cnt);
3944 //    }
3945 
3946     const int unroll = 8; // Number of str(zr) instructions we'll unroll
3947 
3948     __ andr(rscratch1, cnt_reg, unroll - 1);  // tmp1 = cnt % unroll
3949     __ sub(cnt_reg, cnt_reg, rscratch1);      // cnt -= unroll
3950     // base_reg always points to the end of the region we're about to zero
3951     __ add(base_reg, base_reg, rscratch1, Assembler::LSL, exact_log2(wordSize));
3952     __ adr(rscratch2, entry);
3953     __ sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 2);
3954     __ br(rscratch2);
3955     __ bind(loop);
3956     __ sub(cnt_reg, cnt_reg, unroll);
3957     for (int i = -unroll; i < 0; i++)
3958       __ str(zr, Address(base_reg, i * wordSize));
3959     __ bind(entry);
3960     __ add(base_reg, base_reg, unroll * wordSize);
3961     __ cbnz(cnt_reg, loop);
3962   %}
3963 
3964   /// mov envcodings
3965 
3966   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
3967     MacroAssembler _masm(&cbuf);
3968     u_int32_t con = (u_int32_t)$src$$constant;
3969     Register dst_reg = as_Register($dst$$reg);
3970     if (con == 0) {
3971       __ movw(dst_reg, zr);
3972     } else {
3973       __ movw(dst_reg, con);
3974     }
3975   %}
3976 
3977   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
3978     MacroAssembler _masm(&cbuf);
3979     Register dst_reg = as_Register($dst$$reg);
3980     u_int64_t con = (u_int64_t)$src$$constant;
3981     if (con == 0) {
3982       __ mov(dst_reg, zr);
3983     } else {
3984       __ mov(dst_reg, con);
3985     }
3986   %}
3987 
3988   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
3989     MacroAssembler _masm(&cbuf);
3990     Register dst_reg = as_Register($dst$$reg);
3991     address con = (address)$src$$constant;
3992     if (con == NULL || con == (address)1) {
3993       ShouldNotReachHere();
3994     } else {
3995       relocInfo::relocType rtype = $src->constant_reloc();
3996       if (rtype == relocInfo::oop_type) {
3997         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
3998       } else if (rtype == relocInfo::metadata_type) {
3999         __ mov_metadata(dst_reg, (Metadata*)con);
4000       } else {
4001         assert(rtype == relocInfo::none, "unexpected reloc type");
4002         if (con < (address)(uintptr_t)os::vm_page_size()) {
4003           __ mov(dst_reg, con);
4004         } else {
4005           unsigned long offset;
4006           __ adrp(dst_reg, con, offset);
4007           __ add(dst_reg, dst_reg, offset);
4008         }
4009       }
4010     }
4011   %}
4012 
4013   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
4014     MacroAssembler _masm(&cbuf);
4015     Register dst_reg = as_Register($dst$$reg);
4016     __ mov(dst_reg, zr);
4017   %}
4018 
4019   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
4020     MacroAssembler _masm(&cbuf);
4021     Register dst_reg = as_Register($dst$$reg);
4022     __ mov(dst_reg, (u_int64_t)1);
4023   %}
4024 
4025   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
4026     MacroAssembler _masm(&cbuf);
4027     address page = (address)$src$$constant;
4028     Register dst_reg = as_Register($dst$$reg);
4029     unsigned long off;
4030     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
4031     assert(off == 0, "assumed offset == 0");
4032   %}
4033 
4034   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
4035     MacroAssembler _masm(&cbuf);
4036     address page = (address)$src$$constant;
4037     Register dst_reg = as_Register($dst$$reg);
4038     unsigned long off;
4039     __ adrp(dst_reg, ExternalAddress(page), off);
4040     assert(off == 0, "assumed offset == 0");
4041   %}
4042 
4043   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
4044     MacroAssembler _masm(&cbuf);
4045     Register dst_reg = as_Register($dst$$reg);
4046     address con = (address)$src$$constant;
4047     if (con == NULL) {
4048       ShouldNotReachHere();
4049     } else {
4050       relocInfo::relocType rtype = $src->constant_reloc();
4051       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
4052       __ set_narrow_oop(dst_reg, (jobject)con);
4053     }
4054   %}
4055 
4056   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
4057     MacroAssembler _masm(&cbuf);
4058     Register dst_reg = as_Register($dst$$reg);
4059     __ mov(dst_reg, zr);
4060   %}
4061 
4062   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
4063     MacroAssembler _masm(&cbuf);
4064     Register dst_reg = as_Register($dst$$reg);
4065     address con = (address)$src$$constant;
4066     if (con == NULL) {
4067       ShouldNotReachHere();
4068     } else {
4069       relocInfo::relocType rtype = $src->constant_reloc();
4070       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
4071       __ set_narrow_klass(dst_reg, (Klass *)con);
4072     }
4073   %}
4074 
4075   // arithmetic encodings
4076 
4077   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
4078     MacroAssembler _masm(&cbuf);
4079     Register dst_reg = as_Register($dst$$reg);
4080     Register src_reg = as_Register($src1$$reg);
4081     int32_t con = (int32_t)$src2$$constant;
4082     // add has primary == 0, subtract has primary == 1
4083     if ($primary) { con = -con; }
4084     if (con < 0) {
4085       __ subw(dst_reg, src_reg, -con);
4086     } else {
4087       __ addw(dst_reg, src_reg, con);
4088     }
4089   %}
4090 
4091   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
4092     MacroAssembler _masm(&cbuf);
4093     Register dst_reg = as_Register($dst$$reg);
4094     Register src_reg = as_Register($src1$$reg);
4095     int32_t con = (int32_t)$src2$$constant;
4096     // add has primary == 0, subtract has primary == 1
4097     if ($primary) { con = -con; }
4098     if (con < 0) {
4099       __ sub(dst_reg, src_reg, -con);
4100     } else {
4101       __ add(dst_reg, src_reg, con);
4102     }
4103   %}
4104 
4105   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
4106     MacroAssembler _masm(&cbuf);
4107    Register dst_reg = as_Register($dst$$reg);
4108    Register src1_reg = as_Register($src1$$reg);
4109    Register src2_reg = as_Register($src2$$reg);
4110     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
4111   %}
4112 
4113   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
4114     MacroAssembler _masm(&cbuf);
4115    Register dst_reg = as_Register($dst$$reg);
4116    Register src1_reg = as_Register($src1$$reg);
4117    Register src2_reg = as_Register($src2$$reg);
4118     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
4119   %}
4120 
4121   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
4122     MacroAssembler _masm(&cbuf);
4123    Register dst_reg = as_Register($dst$$reg);
4124    Register src1_reg = as_Register($src1$$reg);
4125    Register src2_reg = as_Register($src2$$reg);
4126     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
4127   %}
4128 
4129   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
4130     MacroAssembler _masm(&cbuf);
4131    Register dst_reg = as_Register($dst$$reg);
4132    Register src1_reg = as_Register($src1$$reg);
4133    Register src2_reg = as_Register($src2$$reg);
4134     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
4135   %}
4136 
4137   // compare instruction encodings
4138 
4139   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
4140     MacroAssembler _masm(&cbuf);
4141     Register reg1 = as_Register($src1$$reg);
4142     Register reg2 = as_Register($src2$$reg);
4143     __ cmpw(reg1, reg2);
4144   %}
4145 
4146   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
4147     MacroAssembler _masm(&cbuf);
4148     Register reg = as_Register($src1$$reg);
4149     int32_t val = $src2$$constant;
4150     if (val >= 0) {
4151       __ subsw(zr, reg, val);
4152     } else {
4153       __ addsw(zr, reg, -val);
4154     }
4155   %}
4156 
4157   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
4158     MacroAssembler _masm(&cbuf);
4159     Register reg1 = as_Register($src1$$reg);
4160     u_int32_t val = (u_int32_t)$src2$$constant;
4161     __ movw(rscratch1, val);
4162     __ cmpw(reg1, rscratch1);
4163   %}
4164 
4165   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
4166     MacroAssembler _masm(&cbuf);
4167     Register reg1 = as_Register($src1$$reg);
4168     Register reg2 = as_Register($src2$$reg);
4169     __ cmp(reg1, reg2);
4170   %}
4171 
4172   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
4173     MacroAssembler _masm(&cbuf);
4174     Register reg = as_Register($src1$$reg);
4175     int64_t val = $src2$$constant;
4176     if (val >= 0) {
4177       __ subs(zr, reg, val);
4178     } else if (val != -val) {
4179       __ adds(zr, reg, -val);
4180     } else {
4181     // aargh, Long.MIN_VALUE is a special case
4182       __ orr(rscratch1, zr, (u_int64_t)val);
4183       __ subs(zr, reg, rscratch1);
4184     }
4185   %}
4186 
4187   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
4188     MacroAssembler _masm(&cbuf);
4189     Register reg1 = as_Register($src1$$reg);
4190     u_int64_t val = (u_int64_t)$src2$$constant;
4191     __ mov(rscratch1, val);
4192     __ cmp(reg1, rscratch1);
4193   %}
4194 
4195   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
4196     MacroAssembler _masm(&cbuf);
4197     Register reg1 = as_Register($src1$$reg);
4198     Register reg2 = as_Register($src2$$reg);
4199     __ cmp(reg1, reg2);
4200   %}
4201 
4202   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
4203     MacroAssembler _masm(&cbuf);
4204     Register reg1 = as_Register($src1$$reg);
4205     Register reg2 = as_Register($src2$$reg);
4206     __ cmpw(reg1, reg2);
4207   %}
4208 
4209   enc_class aarch64_enc_testp(iRegP src) %{
4210     MacroAssembler _masm(&cbuf);
4211     Register reg = as_Register($src$$reg);
4212     __ cmp(reg, zr);
4213   %}
4214 
4215   enc_class aarch64_enc_testn(iRegN src) %{
4216     MacroAssembler _masm(&cbuf);
4217     Register reg = as_Register($src$$reg);
4218     __ cmpw(reg, zr);
4219   %}
4220 
4221   enc_class aarch64_enc_b(label lbl) %{
4222     MacroAssembler _masm(&cbuf);
4223     Label *L = $lbl$$label;
4224     __ b(*L);
4225   %}
4226 
4227   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
4228     MacroAssembler _masm(&cbuf);
4229     Label *L = $lbl$$label;
4230     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4231   %}
4232 
4233   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
4234     MacroAssembler _masm(&cbuf);
4235     Label *L = $lbl$$label;
4236     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4237   %}
4238 
4239   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
4240   %{
4241      Register sub_reg = as_Register($sub$$reg);
4242      Register super_reg = as_Register($super$$reg);
4243      Register temp_reg = as_Register($temp$$reg);
4244      Register result_reg = as_Register($result$$reg);
4245 
4246      Label miss;
4247      MacroAssembler _masm(&cbuf);
4248      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
4249                                      NULL, &miss,
4250                                      /*set_cond_codes:*/ true);
4251      if ($primary) {
4252        __ mov(result_reg, zr);
4253      }
4254      __ bind(miss);
4255   %}
4256 
4257   enc_class aarch64_enc_java_static_call(method meth) %{
4258     MacroAssembler _masm(&cbuf);
4259 
4260     address addr = (address)$meth$$method;
4261     address call;
4262     if (!_method) {
4263       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
4264       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
4265     } else if (_optimized_virtual) {
4266       call = __ trampoline_call(Address(addr, relocInfo::opt_virtual_call_type), &cbuf);
4267     } else {
4268       call = __ trampoline_call(Address(addr, relocInfo::static_call_type), &cbuf);
4269     }
4270     if (call == NULL) {
4271       ciEnv::current()->record_failure("CodeCache is full"); 
4272       return;
4273     }
4274 
4275     if (_method) {
4276       // Emit stub for static call
4277       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
4278       if (stub == NULL) {
4279         ciEnv::current()->record_failure("CodeCache is full"); 
4280         return;
4281       }
4282     }
4283   %}
4284 
4285   enc_class aarch64_enc_java_dynamic_call(method meth) %{
4286     MacroAssembler _masm(&cbuf);
4287     address call = __ ic_call((address)$meth$$method);
4288     if (call == NULL) {
4289       ciEnv::current()->record_failure("CodeCache is full"); 
4290       return;
4291     }
4292   %}
4293 
4294   enc_class aarch64_enc_call_epilog() %{
4295     MacroAssembler _masm(&cbuf);
4296     if (VerifyStackAtCalls) {
4297       // Check that stack depth is unchanged: find majik cookie on stack
4298       __ call_Unimplemented();
4299     }
4300   %}
4301 
4302   enc_class aarch64_enc_java_to_runtime(method meth) %{
4303     MacroAssembler _masm(&cbuf);
4304 
4305     // some calls to generated routines (arraycopy code) are scheduled
4306     // by C2 as runtime calls. if so we can call them using a br (they
4307     // will be in a reachable segment) otherwise we have to use a blrt
4308     // which loads the absolute address into a register.
4309     address entry = (address)$meth$$method;
4310     CodeBlob *cb = CodeCache::find_blob(entry);
4311     if (cb) {
4312       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
4313       if (call == NULL) {
4314         ciEnv::current()->record_failure("CodeCache is full"); 
4315         return;
4316       }
4317     } else {
4318       int gpcnt;
4319       int fpcnt;
4320       int rtype;
4321       getCallInfo(tf(), gpcnt, fpcnt, rtype);
4322       Label retaddr;
4323       __ adr(rscratch2, retaddr);
4324       __ lea(rscratch1, RuntimeAddress(entry));
4325       // Leave a breadcrumb for JavaThread::pd_last_frame().
4326       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
4327       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
4328       __ bind(retaddr);
4329       __ add(sp, sp, 2 * wordSize);
4330     }
4331   %}
4332 
4333   enc_class aarch64_enc_rethrow() %{
4334     MacroAssembler _masm(&cbuf);
4335     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
4336   %}
4337 
4338   enc_class aarch64_enc_ret() %{
4339     MacroAssembler _masm(&cbuf);
4340     __ ret(lr);
4341   %}
4342 
4343   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
4344     MacroAssembler _masm(&cbuf);
4345     Register target_reg = as_Register($jump_target$$reg);
4346     __ br(target_reg);
4347   %}
4348 
4349   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
4350     MacroAssembler _masm(&cbuf);
4351     Register target_reg = as_Register($jump_target$$reg);
4352     // exception oop should be in r0
4353     // ret addr has been popped into lr
4354     // callee expects it in r3
4355     __ mov(r3, lr);
4356     __ br(target_reg);
4357   %}
4358 
4359   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4360     MacroAssembler _masm(&cbuf);
4361     Register oop = as_Register($object$$reg);
4362     Register box = as_Register($box$$reg);
4363     Register disp_hdr = as_Register($tmp$$reg);
4364     Register tmp = as_Register($tmp2$$reg);
4365     Label cont;
4366     Label object_has_monitor;
4367     Label cas_failed;
4368 
4369     assert_different_registers(oop, box, tmp, disp_hdr);
4370 
4371     // Load markOop from object into displaced_header.
4372     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
4373 
4374     // Always do locking in runtime.
4375     if (EmitSync & 0x01) {
4376       __ cmp(oop, zr);
4377       return;
4378     }
4379 
4380     if (UseBiasedLocking && !UseOptoBiasInlining) {
4381       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
4382     }
4383 
4384     // Handle existing monitor
4385     if ((EmitSync & 0x02) == 0) {
4386       // we can use AArch64's bit test and branch here but
4387       // markoopDesc does not define a bit index just the bit value
4388       // so assert in case the bit pos changes
4389 #     define __monitor_value_log2 1
4390       assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
4391       __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
4392 #     undef __monitor_value_log2
4393     }
4394 
4395     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
4396     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
4397 
4398     // Load Compare Value application register.
4399 
4400     // Initialize the box. (Must happen before we update the object mark!)
4401     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4402 
4403     // Compare object markOop with mark and if equal exchange scratch1
4404     // with object markOop.
4405     // Note that this is simply a CAS: it does not generate any
4406     // barriers.  These are separately generated by
4407     // membar_acquire_lock().
4408     {
4409       Label retry_load;
4410       __ bind(retry_load);
4411       __ ldxr(tmp, oop);
4412       __ cmp(tmp, disp_hdr);
4413       __ br(Assembler::NE, cas_failed);
4414       // use stlxr to ensure update is immediately visible
4415       __ stlxr(tmp, box, oop);
4416       __ cbzw(tmp, cont);
4417       __ b(retry_load);
4418     }
4419 
4420     // Formerly:
4421     // __ cmpxchgptr(/*oldv=*/disp_hdr,
4422     //               /*newv=*/box,
4423     //               /*addr=*/oop,
4424     //               /*tmp=*/tmp,
4425     //               cont,
4426     //               /*fail*/NULL);
4427 
4428     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4429 
4430     // If the compare-and-exchange succeeded, then we found an unlocked
4431     // object, will have now locked it will continue at label cont
4432 
4433     __ bind(cas_failed);
4434     // We did not see an unlocked object so try the fast recursive case.
4435 
4436     // Check if the owner is self by comparing the value in the
4437     // markOop of object (disp_hdr) with the stack pointer.
4438     __ mov(rscratch1, sp);
4439     __ sub(disp_hdr, disp_hdr, rscratch1);
4440     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
4441     // If condition is true we are cont and hence we can store 0 as the
4442     // displaced header in the box, which indicates that it is a recursive lock.
4443     __ ands(tmp/*==0?*/, disp_hdr, tmp);
4444     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4445 
4446     // Handle existing monitor.
4447     if ((EmitSync & 0x02) == 0) {
4448       __ b(cont);
4449 
4450       __ bind(object_has_monitor);
4451       // The object's monitor m is unlocked iff m->owner == NULL,
4452       // otherwise m->owner may contain a thread or a stack address.
4453       //
4454       // Try to CAS m->owner from NULL to current thread.
4455       __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
4456       __ mov(disp_hdr, zr);
4457 
4458       {
4459         Label retry_load, fail;
4460         __ bind(retry_load);
4461         __ ldxr(rscratch1, tmp);
4462         __ cmp(disp_hdr, rscratch1);
4463         __ br(Assembler::NE, fail);
4464         // use stlxr to ensure update is immediately visible
4465         __ stlxr(rscratch1, rthread, tmp);
4466         __ cbnzw(rscratch1, retry_load);
4467         __ bind(fail);
4468       }
4469 
4470       // Label next;
4471       // __ cmpxchgptr(/*oldv=*/disp_hdr,
4472       //               /*newv=*/rthread,
4473       //               /*addr=*/tmp,
4474       //               /*tmp=*/rscratch1,
4475       //               /*succeed*/next,
4476       //               /*fail*/NULL);
4477       // __ bind(next);
4478 
4479       // store a non-null value into the box.
4480       __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4481 
4482       // PPC port checks the following invariants
4483       // #ifdef ASSERT
4484       // bne(flag, cont);
4485       // We have acquired the monitor, check some invariants.
4486       // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
4487       // Invariant 1: _recursions should be 0.
4488       // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
4489       // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
4490       //                        "monitor->_recursions should be 0", -1);
4491       // Invariant 2: OwnerIsThread shouldn't be 0.
4492       // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
4493       //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
4494       //                           "monitor->OwnerIsThread shouldn't be 0", -1);
4495       // #endif
4496     }
4497 
4498     __ bind(cont);
4499     // flag == EQ indicates success
4500     // flag == NE indicates failure
4501 
4502   %}
4503 
4504   // TODO
4505   // reimplement this with custom cmpxchgptr code
4506   // which avoids some of the unnecessary branching
4507   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4508     MacroAssembler _masm(&cbuf);
4509     Register oop = as_Register($object$$reg);
4510     Register box = as_Register($box$$reg);
4511     Register disp_hdr = as_Register($tmp$$reg);
4512     Register tmp = as_Register($tmp2$$reg);
4513     Label cont;
4514     Label object_has_monitor;
4515     Label cas_failed;
4516 
4517     assert_different_registers(oop, box, tmp, disp_hdr);
4518 
4519     // Always do locking in runtime.
4520     if (EmitSync & 0x01) {
4521       __ cmp(oop, zr); // Oop can't be 0 here => always false.
4522       return;
4523     }
4524 
4525     if (UseBiasedLocking && !UseOptoBiasInlining) {
4526       __ biased_locking_exit(oop, tmp, cont);
4527     }
4528 
4529     // Find the lock address and load the displaced header from the stack.
4530     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4531 
4532     // If the displaced header is 0, we have a recursive unlock.
4533     __ cmp(disp_hdr, zr);
4534     __ br(Assembler::EQ, cont);
4535 
4536 
4537     // Handle existing monitor.
4538     if ((EmitSync & 0x02) == 0) {
4539       __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
4540       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
4541     }
4542 
4543     // Check if it is still a light weight lock, this is is true if we
4544     // see the stack address of the basicLock in the markOop of the
4545     // object.
4546 
4547       {
4548         Label retry_load;
4549         __ bind(retry_load);
4550         __ ldxr(tmp, oop);
4551         __ cmp(box, tmp);
4552         __ br(Assembler::NE, cas_failed);
4553         // use stlxr to ensure update is immediately visible
4554         __ stlxr(tmp, disp_hdr, oop);
4555         __ cbzw(tmp, cont);
4556         __ b(retry_load);
4557       }
4558 
4559     // __ cmpxchgptr(/*compare_value=*/box,
4560     //               /*exchange_value=*/disp_hdr,
4561     //               /*where=*/oop,
4562     //               /*result=*/tmp,
4563     //               cont,
4564     //               /*cas_failed*/NULL);
4565     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4566 
4567     __ bind(cas_failed);
4568 
4569     // Handle existing monitor.
4570     if ((EmitSync & 0x02) == 0) {
4571       __ b(cont);
4572 
4573       __ bind(object_has_monitor);
4574       __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
4575       __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
4576       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
4577       __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
4578       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
4579       __ cmp(rscratch1, zr);
4580       __ br(Assembler::NE, cont);
4581 
4582       __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
4583       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
4584       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
4585       __ cmp(rscratch1, zr);
4586       __ cbnz(rscratch1, cont);
4587       // need a release store here
4588       __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
4589       __ stlr(rscratch1, tmp); // rscratch1 is zero
4590     }
4591 
4592     __ bind(cont);
4593     // flag == EQ indicates success
4594     // flag == NE indicates failure
4595   %}
4596 
4597 %}
4598 
4599 //----------FRAME--------------------------------------------------------------
4600 // Definition of frame structure and management information.
4601 //
4602 //  S T A C K   L A Y O U T    Allocators stack-slot number
4603 //                             |   (to get allocators register number
4604 //  G  Owned by    |        |  v    add OptoReg::stack0())
4605 //  r   CALLER     |        |
4606 //  o     |        +--------+      pad to even-align allocators stack-slot
4607 //  w     V        |  pad0  |        numbers; owned by CALLER
4608 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
4609 //  h     ^        |   in   |  5
4610 //        |        |  args  |  4   Holes in incoming args owned by SELF
4611 //  |     |        |        |  3
4612 //  |     |        +--------+
4613 //  V     |        | old out|      Empty on Intel, window on Sparc
4614 //        |    old |preserve|      Must be even aligned.
4615 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
4616 //        |        |   in   |  3   area for Intel ret address
4617 //     Owned by    |preserve|      Empty on Sparc.
4618 //       SELF      +--------+
4619 //        |        |  pad2  |  2   pad to align old SP
4620 //        |        +--------+  1
4621 //        |        | locks  |  0
4622 //        |        +--------+----> OptoReg::stack0(), even aligned
4623 //        |        |  pad1  | 11   pad to align new SP
4624 //        |        +--------+
4625 //        |        |        | 10
4626 //        |        | spills |  9   spills
4627 //        V        |        |  8   (pad0 slot for callee)
4628 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
4629 //        ^        |  out   |  7
4630 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
4631 //     Owned by    +--------+
4632 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
4633 //        |    new |preserve|      Must be even-aligned.
4634 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
4635 //        |        |        |
4636 //
4637 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
4638 //         known from SELF's arguments and the Java calling convention.
4639 //         Region 6-7 is determined per call site.
4640 // Note 2: If the calling convention leaves holes in the incoming argument
4641 //         area, those holes are owned by SELF.  Holes in the outgoing area
4642 //         are owned by the CALLEE.  Holes should not be nessecary in the
4643 //         incoming area, as the Java calling convention is completely under
4644 //         the control of the AD file.  Doubles can be sorted and packed to
4645 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
4646 //         varargs C calling conventions.
4647 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
4648 //         even aligned with pad0 as needed.
4649 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
4650 //           (the latter is true on Intel but is it false on AArch64?)
4651 //         region 6-11 is even aligned; it may be padded out more so that
4652 //         the region from SP to FP meets the minimum stack alignment.
4653 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
4654 //         alignment.  Region 11, pad1, may be dynamically extended so that
4655 //         SP meets the minimum alignment.
4656 
4657 frame %{
4658   // What direction does stack grow in (assumed to be same for C & Java)
4659   stack_direction(TOWARDS_LOW);
4660 
4661   // These three registers define part of the calling convention
4662   // between compiled code and the interpreter.
4663 
4664   // Inline Cache Register or methodOop for I2C.
4665   inline_cache_reg(R12);
4666 
4667   // Method Oop Register when calling interpreter.
4668   interpreter_method_oop_reg(R12);
4669 
4670   // Number of stack slots consumed by locking an object
4671   sync_stack_slots(2);
4672 
4673   // Compiled code's Frame Pointer
4674   frame_pointer(R31);
4675 
4676   // Interpreter stores its frame pointer in a register which is
4677   // stored to the stack by I2CAdaptors.
4678   // I2CAdaptors convert from interpreted java to compiled java.
4679   interpreter_frame_pointer(R29);
4680 
4681   // Stack alignment requirement
4682   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
4683 
4684   // Number of stack slots between incoming argument block and the start of
4685   // a new frame.  The PROLOG must add this many slots to the stack.  The
4686   // EPILOG must remove this many slots. aarch64 needs two slots for
4687   // return address and fp.
4688   // TODO think this is correct but check
4689   in_preserve_stack_slots(4);
4690 
4691   // Number of outgoing stack slots killed above the out_preserve_stack_slots
4692   // for calls to C.  Supports the var-args backing area for register parms.
4693   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
4694 
4695   // The after-PROLOG location of the return address.  Location of
4696   // return address specifies a type (REG or STACK) and a number
4697   // representing the register number (i.e. - use a register name) or
4698   // stack slot.
4699   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
4700   // Otherwise, it is above the locks and verification slot and alignment word
4701   // TODO this may well be correct but need to check why that - 2 is there
4702   // ppc port uses 0 but we definitely need to allow for fixed_slots
4703   // which folds in the space used for monitors
4704   return_addr(STACK - 2 +
4705               round_to((Compile::current()->in_preserve_stack_slots() +
4706                         Compile::current()->fixed_slots()),
4707                        stack_alignment_in_slots()));
4708 
4709   // Body of function which returns an integer array locating
4710   // arguments either in registers or in stack slots.  Passed an array
4711   // of ideal registers called "sig" and a "length" count.  Stack-slot
4712   // offsets are based on outgoing arguments, i.e. a CALLER setting up
4713   // arguments for a CALLEE.  Incoming stack arguments are
4714   // automatically biased by the preserve_stack_slots field above.
4715 
4716   calling_convention
4717   %{
4718     // No difference between ingoing/outgoing just pass false
4719     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
4720   %}
4721 
4722   c_calling_convention
4723   %{
4724     // This is obviously always outgoing
4725     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
4726   %}
4727 
4728   // Location of compiled Java return values.  Same as C for now.
4729   return_value
4730   %{
4731     // TODO do we allow ideal_reg == Op_RegN???
4732     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
4733            "only return normal values");
4734 
4735     static const int lo[Op_RegL + 1] = { // enum name
4736       0,                                 // Op_Node
4737       0,                                 // Op_Set
4738       R0_num,                            // Op_RegN
4739       R0_num,                            // Op_RegI
4740       R0_num,                            // Op_RegP
4741       V0_num,                            // Op_RegF
4742       V0_num,                            // Op_RegD
4743       R0_num                             // Op_RegL
4744     };
4745 
4746     static const int hi[Op_RegL + 1] = { // enum name
4747       0,                                 // Op_Node
4748       0,                                 // Op_Set
4749       OptoReg::Bad,                       // Op_RegN
4750       OptoReg::Bad,                      // Op_RegI
4751       R0_H_num,                          // Op_RegP
4752       OptoReg::Bad,                      // Op_RegF
4753       V0_H_num,                          // Op_RegD
4754       R0_H_num                           // Op_RegL
4755     };
4756 
4757     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
4758   %}
4759 %}
4760 
4761 //----------ATTRIBUTES---------------------------------------------------------
4762 //----------Operand Attributes-------------------------------------------------
4763 op_attrib op_cost(1);        // Required cost attribute
4764 
4765 //----------Instruction Attributes---------------------------------------------
4766 ins_attrib ins_cost(INSN_COST); // Required cost attribute
4767 ins_attrib ins_size(32);        // Required size attribute (in bits)
4768 ins_attrib ins_short_branch(0); // Required flag: is this instruction
4769                                 // a non-matching short branch variant
4770                                 // of some long branch?
4771 ins_attrib ins_alignment(4);    // Required alignment attribute (must
4772                                 // be a power of 2) specifies the
4773                                 // alignment that some part of the
4774                                 // instruction (not necessarily the
4775                                 // start) requires.  If > 1, a
4776                                 // compute_padding() function must be
4777                                 // provided for the instruction
4778 
4779 //----------OPERANDS-----------------------------------------------------------
4780 // Operand definitions must precede instruction definitions for correct parsing
4781 // in the ADLC because operands constitute user defined types which are used in
4782 // instruction definitions.
4783 
4784 //----------Simple Operands----------------------------------------------------
4785 
4786 // Integer operands 32 bit
4787 // 32 bit immediate
4788 operand immI()
4789 %{
4790   match(ConI);
4791 
4792   op_cost(0);
4793   format %{ %}
4794   interface(CONST_INTER);
4795 %}
4796 
4797 // 32 bit zero
4798 operand immI0()
4799 %{
4800   predicate(n->get_int() == 0);
4801   match(ConI);
4802 
4803   op_cost(0);
4804   format %{ %}
4805   interface(CONST_INTER);
4806 %}
4807 
4808 // 32 bit unit increment
4809 operand immI_1()
4810 %{
4811   predicate(n->get_int() == 1);
4812   match(ConI);
4813 
4814   op_cost(0);
4815   format %{ %}
4816   interface(CONST_INTER);
4817 %}
4818 
4819 // 32 bit unit decrement
4820 operand immI_M1()
4821 %{
4822   predicate(n->get_int() == -1);
4823   match(ConI);
4824 
4825   op_cost(0);
4826   format %{ %}
4827   interface(CONST_INTER);
4828 %}
4829 
4830 operand immI_le_4()
4831 %{
4832   predicate(n->get_int() <= 4);
4833   match(ConI);
4834 
4835   op_cost(0);
4836   format %{ %}
4837   interface(CONST_INTER);
4838 %}
4839 
4840 operand immI_31()
4841 %{
4842   predicate(n->get_int() == 31);
4843   match(ConI);
4844 
4845   op_cost(0);
4846   format %{ %}
4847   interface(CONST_INTER);
4848 %}
4849 
4850 operand immI_8()
4851 %{
4852   predicate(n->get_int() == 8);
4853   match(ConI);
4854 
4855   op_cost(0);
4856   format %{ %}
4857   interface(CONST_INTER);
4858 %}
4859 
4860 operand immI_16()
4861 %{
4862   predicate(n->get_int() == 16);
4863   match(ConI);
4864 
4865   op_cost(0);
4866   format %{ %}
4867   interface(CONST_INTER);
4868 %}
4869 
4870 operand immI_24()
4871 %{
4872   predicate(n->get_int() == 24);
4873   match(ConI);
4874 
4875   op_cost(0);
4876   format %{ %}
4877   interface(CONST_INTER);
4878 %}
4879 
4880 operand immI_32()
4881 %{
4882   predicate(n->get_int() == 32);
4883   match(ConI);
4884 
4885   op_cost(0);
4886   format %{ %}
4887   interface(CONST_INTER);
4888 %}
4889 
4890 operand immI_48()
4891 %{
4892   predicate(n->get_int() == 48);
4893   match(ConI);
4894 
4895   op_cost(0);
4896   format %{ %}
4897   interface(CONST_INTER);
4898 %}
4899 
4900 operand immI_56()
4901 %{
4902   predicate(n->get_int() == 56);
4903   match(ConI);
4904 
4905   op_cost(0);
4906   format %{ %}
4907   interface(CONST_INTER);
4908 %}
4909 
4910 operand immI_64()
4911 %{
4912   predicate(n->get_int() == 64);
4913   match(ConI);
4914 
4915   op_cost(0);
4916   format %{ %}
4917   interface(CONST_INTER);
4918 %}
4919 
4920 operand immI_255()
4921 %{
4922   predicate(n->get_int() == 255);
4923   match(ConI);
4924 
4925   op_cost(0);
4926   format %{ %}
4927   interface(CONST_INTER);
4928 %}
4929 
4930 operand immI_65535()
4931 %{
4932   predicate(n->get_int() == 65535);
4933   match(ConI);
4934 
4935   op_cost(0);
4936   format %{ %}
4937   interface(CONST_INTER);
4938 %}
4939 
4940 operand immL_63()
4941 %{
4942   predicate(n->get_int() == 63);
4943   match(ConI);
4944 
4945   op_cost(0);
4946   format %{ %}
4947   interface(CONST_INTER);
4948 %}
4949 
4950 operand immL_255()
4951 %{
4952   predicate(n->get_int() == 255);
4953   match(ConI);
4954 
4955   op_cost(0);
4956   format %{ %}
4957   interface(CONST_INTER);
4958 %}
4959 
4960 operand immL_65535()
4961 %{
4962   predicate(n->get_long() == 65535L);
4963   match(ConL);
4964 
4965   op_cost(0);
4966   format %{ %}
4967   interface(CONST_INTER);
4968 %}
4969 
4970 operand immL_4294967295()
4971 %{
4972   predicate(n->get_long() == 4294967295L);
4973   match(ConL);
4974 
4975   op_cost(0);
4976   format %{ %}
4977   interface(CONST_INTER);
4978 %}
4979 
4980 operand immL_bitmask()
4981 %{
4982   predicate(((n->get_long() & 0xc000000000000000l) == 0)
4983             && is_power_of_2(n->get_long() + 1));
4984   match(ConL);
4985 
4986   op_cost(0);
4987   format %{ %}
4988   interface(CONST_INTER);
4989 %}
4990 
4991 operand immI_bitmask()
4992 %{
4993   predicate(((n->get_int() & 0xc0000000) == 0)
4994             && is_power_of_2(n->get_int() + 1));
4995   match(ConI);
4996 
4997   op_cost(0);
4998   format %{ %}
4999   interface(CONST_INTER);
5000 %}
5001 
5002 // Scale values for scaled offset addressing modes (up to long but not quad)
5003 operand immIScale()
5004 %{
5005   predicate(0 <= n->get_int() && (n->get_int() <= 3));
5006   match(ConI);
5007 
5008   op_cost(0);
5009   format %{ %}
5010   interface(CONST_INTER);
5011 %}
5012 
5013 // 26 bit signed offset -- for pc-relative branches
5014 operand immI26()
5015 %{
5016   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
5017   match(ConI);
5018 
5019   op_cost(0);
5020   format %{ %}
5021   interface(CONST_INTER);
5022 %}
5023 
5024 // 19 bit signed offset -- for pc-relative loads
5025 operand immI19()
5026 %{
5027   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
5028   match(ConI);
5029 
5030   op_cost(0);
5031   format %{ %}
5032   interface(CONST_INTER);
5033 %}
5034 
5035 // 12 bit unsigned offset -- for base plus immediate loads
5036 operand immIU12()
5037 %{
5038   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
5039   match(ConI);
5040 
5041   op_cost(0);
5042   format %{ %}
5043   interface(CONST_INTER);
5044 %}
5045 
5046 operand immLU12()
5047 %{
5048   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
5049   match(ConL);
5050 
5051   op_cost(0);
5052   format %{ %}
5053   interface(CONST_INTER);
5054 %}
5055 
5056 // Offset for scaled or unscaled immediate loads and stores
5057 operand immIOffset()
5058 %{
5059   predicate(Address::offset_ok_for_immed(n->get_int()));
5060   match(ConI);
5061 
5062   op_cost(0);
5063   format %{ %}
5064   interface(CONST_INTER);
5065 %}
5066 
5067 operand immLoffset()
5068 %{
5069   predicate(Address::offset_ok_for_immed(n->get_long()));
5070   match(ConL);
5071 
5072   op_cost(0);
5073   format %{ %}
5074   interface(CONST_INTER);
5075 %}
5076 
5077 // 32 bit integer valid for add sub immediate
5078 operand immIAddSub()
5079 %{
5080   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
5081   match(ConI);
5082   op_cost(0);
5083   format %{ %}
5084   interface(CONST_INTER);
5085 %}
5086 
5087 // 32 bit unsigned integer valid for logical immediate
5088 // TODO -- check this is right when e.g the mask is 0x80000000
5089 operand immILog()
5090 %{
5091   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
5092   match(ConI);
5093 
5094   op_cost(0);
5095   format %{ %}
5096   interface(CONST_INTER);
5097 %}
5098 
5099 // Integer operands 64 bit
5100 // 64 bit immediate
5101 operand immL()
5102 %{
5103   match(ConL);
5104 
5105   op_cost(0);
5106   format %{ %}
5107   interface(CONST_INTER);
5108 %}
5109 
5110 // 64 bit zero
5111 operand immL0()
5112 %{
5113   predicate(n->get_long() == 0);
5114   match(ConL);
5115 
5116   op_cost(0);
5117   format %{ %}
5118   interface(CONST_INTER);
5119 %}
5120 
5121 // 64 bit unit increment
5122 operand immL_1()
5123 %{
5124   predicate(n->get_long() == 1);
5125   match(ConL);
5126 
5127   op_cost(0);
5128   format %{ %}
5129   interface(CONST_INTER);
5130 %}
5131 
5132 // 64 bit unit decrement
5133 operand immL_M1()
5134 %{
5135   predicate(n->get_long() == -1);
5136   match(ConL);
5137 
5138   op_cost(0);
5139   format %{ %}
5140   interface(CONST_INTER);
5141 %}
5142 
5143 // 32 bit offset of pc in thread anchor
5144 
5145 operand immL_pc_off()
5146 %{
5147   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
5148                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
5149   match(ConL);
5150 
5151   op_cost(0);
5152   format %{ %}
5153   interface(CONST_INTER);
5154 %}
5155 
5156 // 64 bit integer valid for add sub immediate
5157 operand immLAddSub()
5158 %{
5159   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
5160   match(ConL);
5161   op_cost(0);
5162   format %{ %}
5163   interface(CONST_INTER);
5164 %}
5165 
5166 // 64 bit integer valid for logical immediate
5167 operand immLLog()
5168 %{
5169   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
5170   match(ConL);
5171   op_cost(0);
5172   format %{ %}
5173   interface(CONST_INTER);
5174 %}
5175 
5176 // Long Immediate: low 32-bit mask
5177 operand immL_32bits()
5178 %{
5179   predicate(n->get_long() == 0xFFFFFFFFL);
5180   match(ConL);
5181   op_cost(0);
5182   format %{ %}
5183   interface(CONST_INTER);
5184 %}
5185 
5186 // Pointer operands
5187 // Pointer Immediate
5188 operand immP()
5189 %{
5190   match(ConP);
5191 
5192   op_cost(0);
5193   format %{ %}
5194   interface(CONST_INTER);
5195 %}
5196 
5197 // NULL Pointer Immediate
5198 operand immP0()
5199 %{
5200   predicate(n->get_ptr() == 0);
5201   match(ConP);
5202 
5203   op_cost(0);
5204   format %{ %}
5205   interface(CONST_INTER);
5206 %}
5207 
5208 // Pointer Immediate One
5209 // this is used in object initialization (initial object header)
5210 operand immP_1()
5211 %{
5212   predicate(n->get_ptr() == 1);
5213   match(ConP);
5214 
5215   op_cost(0);
5216   format %{ %}
5217   interface(CONST_INTER);
5218 %}
5219 
5220 // Polling Page Pointer Immediate
5221 operand immPollPage()
5222 %{
5223   predicate((address)n->get_ptr() == os::get_polling_page());
5224   match(ConP);
5225 
5226   op_cost(0);
5227   format %{ %}
5228   interface(CONST_INTER);
5229 %}
5230 
5231 // Card Table Byte Map Base
5232 operand immByteMapBase()
5233 %{
5234   // Get base of card map
5235   predicate((jbyte*)n->get_ptr() ==
5236         ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
5237   match(ConP);
5238 
5239   op_cost(0);
5240   format %{ %}
5241   interface(CONST_INTER);
5242 %}
5243 
5244 // Pointer Immediate Minus One
5245 // this is used when we want to write the current PC to the thread anchor
5246 operand immP_M1()
5247 %{
5248   predicate(n->get_ptr() == -1);
5249   match(ConP);
5250 
5251   op_cost(0);
5252   format %{ %}
5253   interface(CONST_INTER);
5254 %}
5255 
5256 // Pointer Immediate Minus Two
5257 // this is used when we want to write the current PC to the thread anchor
5258 operand immP_M2()
5259 %{
5260   predicate(n->get_ptr() == -2);
5261   match(ConP);
5262 
5263   op_cost(0);
5264   format %{ %}
5265   interface(CONST_INTER);
5266 %}
5267 
5268 // Float and Double operands
5269 // Double Immediate
5270 operand immD()
5271 %{
5272   match(ConD);
5273   op_cost(0);
5274   format %{ %}
5275   interface(CONST_INTER);
5276 %}
5277 
5278 // Double Immediate: +0.0d
5279 operand immD0()
5280 %{
5281   predicate(jlong_cast(n->getd()) == 0);
5282   match(ConD);
5283 
5284   op_cost(0);
5285   format %{ %}
5286   interface(CONST_INTER);
5287 %}
5288 
5289 // constant 'double +0.0'.
5290 operand immDPacked()
5291 %{
5292   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
5293   match(ConD);
5294   op_cost(0);
5295   format %{ %}
5296   interface(CONST_INTER);
5297 %}
5298 
5299 // Float Immediate
5300 operand immF()
5301 %{
5302   match(ConF);
5303   op_cost(0);
5304   format %{ %}
5305   interface(CONST_INTER);
5306 %}
5307 
5308 // Float Immediate: +0.0f.
5309 operand immF0()
5310 %{
5311   predicate(jint_cast(n->getf()) == 0);
5312   match(ConF);
5313 
5314   op_cost(0);
5315   format %{ %}
5316   interface(CONST_INTER);
5317 %}
5318 
5319 //
5320 operand immFPacked()
5321 %{
5322   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
5323   match(ConF);
5324   op_cost(0);
5325   format %{ %}
5326   interface(CONST_INTER);
5327 %}
5328 
5329 // Narrow pointer operands
5330 // Narrow Pointer Immediate
5331 operand immN()
5332 %{
5333   match(ConN);
5334 
5335   op_cost(0);
5336   format %{ %}
5337   interface(CONST_INTER);
5338 %}
5339 
5340 // Narrow NULL Pointer Immediate
5341 operand immN0()
5342 %{
5343   predicate(n->get_narrowcon() == 0);
5344   match(ConN);
5345 
5346   op_cost(0);
5347   format %{ %}
5348   interface(CONST_INTER);
5349 %}
5350 
5351 operand immNKlass()
5352 %{
5353   match(ConNKlass);
5354 
5355   op_cost(0);
5356   format %{ %}
5357   interface(CONST_INTER);
5358 %}
5359 
5360 // Integer 32 bit Register Operands
5361 // Integer 32 bitRegister (excludes SP)
5362 operand iRegI()
5363 %{
5364   constraint(ALLOC_IN_RC(any_reg32));
5365   match(RegI);
5366   match(iRegINoSp);
5367   op_cost(0);
5368   format %{ %}
5369   interface(REG_INTER);
5370 %}
5371 
5372 // Integer 32 bit Register not Special
5373 operand iRegINoSp()
5374 %{
5375   constraint(ALLOC_IN_RC(no_special_reg32));
5376   match(RegI);
5377   op_cost(0);
5378   format %{ %}
5379   interface(REG_INTER);
5380 %}
5381 
5382 // Integer 64 bit Register Operands
5383 // Integer 64 bit Register (includes SP)
5384 operand iRegL()
5385 %{
5386   constraint(ALLOC_IN_RC(any_reg));
5387   match(RegL);
5388   match(iRegLNoSp);
5389   op_cost(0);
5390   format %{ %}
5391   interface(REG_INTER);
5392 %}
5393 
5394 // Integer 64 bit Register not Special
5395 operand iRegLNoSp()
5396 %{
5397   constraint(ALLOC_IN_RC(no_special_reg));
5398   match(RegL);
5399   format %{ %}
5400   interface(REG_INTER);
5401 %}
5402 
5403 // Pointer Register Operands
5404 // Pointer Register
5405 operand iRegP()
5406 %{
5407   constraint(ALLOC_IN_RC(ptr_reg));
5408   match(RegP);
5409   match(iRegPNoSp);
5410   match(iRegP_R0);
5411   //match(iRegP_R2);
5412   //match(iRegP_R4);
5413   //match(iRegP_R5);
5414   match(thread_RegP);
5415   op_cost(0);
5416   format %{ %}
5417   interface(REG_INTER);
5418 %}
5419 
5420 // Pointer 64 bit Register not Special
5421 operand iRegPNoSp()
5422 %{
5423   constraint(ALLOC_IN_RC(no_special_ptr_reg));
5424   match(RegP);
5425   // match(iRegP);
5426   // match(iRegP_R0);
5427   // match(iRegP_R2);
5428   // match(iRegP_R4);
5429   // match(iRegP_R5);
5430   // match(thread_RegP);
5431   op_cost(0);
5432   format %{ %}
5433   interface(REG_INTER);
5434 %}
5435 
5436 // Pointer 64 bit Register R0 only
5437 operand iRegP_R0()
5438 %{
5439   constraint(ALLOC_IN_RC(r0_reg));
5440   match(RegP);
5441   // match(iRegP);
5442   match(iRegPNoSp);
5443   op_cost(0);
5444   format %{ %}
5445   interface(REG_INTER);
5446 %}
5447 
5448 // Pointer 64 bit Register R1 only
5449 operand iRegP_R1()
5450 %{
5451   constraint(ALLOC_IN_RC(r1_reg));
5452   match(RegP);
5453   // match(iRegP);
5454   match(iRegPNoSp);
5455   op_cost(0);
5456   format %{ %}
5457   interface(REG_INTER);
5458 %}
5459 
5460 // Pointer 64 bit Register R2 only
5461 operand iRegP_R2()
5462 %{
5463   constraint(ALLOC_IN_RC(r2_reg));
5464   match(RegP);
5465   // match(iRegP);
5466   match(iRegPNoSp);
5467   op_cost(0);
5468   format %{ %}
5469   interface(REG_INTER);
5470 %}
5471 
5472 // Pointer 64 bit Register R3 only
5473 operand iRegP_R3()
5474 %{
5475   constraint(ALLOC_IN_RC(r3_reg));
5476   match(RegP);
5477   // match(iRegP);
5478   match(iRegPNoSp);
5479   op_cost(0);
5480   format %{ %}
5481   interface(REG_INTER);
5482 %}
5483 
5484 // Pointer 64 bit Register R4 only
5485 operand iRegP_R4()
5486 %{
5487   constraint(ALLOC_IN_RC(r4_reg));
5488   match(RegP);
5489   // match(iRegP);
5490   match(iRegPNoSp);
5491   op_cost(0);
5492   format %{ %}
5493   interface(REG_INTER);
5494 %}
5495 
5496 // Pointer 64 bit Register R5 only
5497 operand iRegP_R5()
5498 %{
5499   constraint(ALLOC_IN_RC(r5_reg));
5500   match(RegP);
5501   // match(iRegP);
5502   match(iRegPNoSp);
5503   op_cost(0);
5504   format %{ %}
5505   interface(REG_INTER);
5506 %}
5507 
5508 // Pointer 64 bit Register R10 only
5509 operand iRegP_R10()
5510 %{
5511   constraint(ALLOC_IN_RC(r10_reg));
5512   match(RegP);
5513   // match(iRegP);
5514   match(iRegPNoSp);
5515   op_cost(0);
5516   format %{ %}
5517   interface(REG_INTER);
5518 %}
5519 
5520 // Long 64 bit Register R11 only
5521 operand iRegL_R11()
5522 %{
5523   constraint(ALLOC_IN_RC(r11_reg));
5524   match(RegL);
5525   match(iRegLNoSp);
5526   op_cost(0);
5527   format %{ %}
5528   interface(REG_INTER);
5529 %}
5530 
5531 // Pointer 64 bit Register FP only
5532 operand iRegP_FP()
5533 %{
5534   constraint(ALLOC_IN_RC(fp_reg));
5535   match(RegP);
5536   // match(iRegP);
5537   op_cost(0);
5538   format %{ %}
5539   interface(REG_INTER);
5540 %}
5541 
5542 // Register R0 only
5543 operand iRegI_R0()
5544 %{
5545   constraint(ALLOC_IN_RC(int_r0_reg));
5546   match(RegI);
5547   match(iRegINoSp);
5548   op_cost(0);
5549   format %{ %}
5550   interface(REG_INTER);
5551 %}
5552 
5553 // Register R2 only
5554 operand iRegI_R2()
5555 %{
5556   constraint(ALLOC_IN_RC(int_r2_reg));
5557   match(RegI);
5558   match(iRegINoSp);
5559   op_cost(0);
5560   format %{ %}
5561   interface(REG_INTER);
5562 %}
5563 
5564 // Register R3 only
5565 operand iRegI_R3()
5566 %{
5567   constraint(ALLOC_IN_RC(int_r3_reg));
5568   match(RegI);
5569   match(iRegINoSp);
5570   op_cost(0);
5571   format %{ %}
5572   interface(REG_INTER);
5573 %}
5574 
5575 
5576 // Register R2 only
5577 operand iRegI_R4()
5578 %{
5579   constraint(ALLOC_IN_RC(int_r4_reg));
5580   match(RegI);
5581   match(iRegINoSp);
5582   op_cost(0);
5583   format %{ %}
5584   interface(REG_INTER);
5585 %}
5586 
5587 
5588 // Pointer Register Operands
5589 // Narrow Pointer Register
5590 operand iRegN()
5591 %{
5592   constraint(ALLOC_IN_RC(any_reg32));
5593   match(RegN);
5594   match(iRegNNoSp);
5595   op_cost(0);
5596   format %{ %}
5597   interface(REG_INTER);
5598 %}
5599 
5600 // Integer 64 bit Register not Special
5601 operand iRegNNoSp()
5602 %{
5603   constraint(ALLOC_IN_RC(no_special_reg32));
5604   match(RegN);
5605   op_cost(0);
5606   format %{ %}
5607   interface(REG_INTER);
5608 %}
5609 
5610 // heap base register -- used for encoding immN0
5611 
5612 operand iRegIHeapbase()
5613 %{
5614   constraint(ALLOC_IN_RC(heapbase_reg));
5615   match(RegI);
5616   op_cost(0);
5617   format %{ %}
5618   interface(REG_INTER);
5619 %}
5620 
5621 // Float Register
5622 // Float register operands
5623 operand vRegF()
5624 %{
5625   constraint(ALLOC_IN_RC(float_reg));
5626   match(RegF);
5627 
5628   op_cost(0);
5629   format %{ %}
5630   interface(REG_INTER);
5631 %}
5632 
5633 // Double Register
5634 // Double register operands
5635 operand vRegD()
5636 %{
5637   constraint(ALLOC_IN_RC(double_reg));
5638   match(RegD);
5639 
5640   op_cost(0);
5641   format %{ %}
5642   interface(REG_INTER);
5643 %}
5644 
5645 operand vecD()
5646 %{
5647   constraint(ALLOC_IN_RC(vectord_reg));
5648   match(VecD);
5649 
5650   op_cost(0);
5651   format %{ %}
5652   interface(REG_INTER);
5653 %}
5654 
5655 operand vecX()
5656 %{
5657   constraint(ALLOC_IN_RC(vectorx_reg));
5658   match(VecX);
5659 
5660   op_cost(0);
5661   format %{ %}
5662   interface(REG_INTER);
5663 %}
5664 
5665 operand vRegD_V0()
5666 %{
5667   constraint(ALLOC_IN_RC(v0_reg));
5668   match(RegD);
5669   op_cost(0);
5670   format %{ %}
5671   interface(REG_INTER);
5672 %}
5673 
5674 operand vRegD_V1()
5675 %{
5676   constraint(ALLOC_IN_RC(v1_reg));
5677   match(RegD);
5678   op_cost(0);
5679   format %{ %}
5680   interface(REG_INTER);
5681 %}
5682 
5683 operand vRegD_V2()
5684 %{
5685   constraint(ALLOC_IN_RC(v2_reg));
5686   match(RegD);
5687   op_cost(0);
5688   format %{ %}
5689   interface(REG_INTER);
5690 %}
5691 
5692 operand vRegD_V3()
5693 %{
5694   constraint(ALLOC_IN_RC(v3_reg));
5695   match(RegD);
5696   op_cost(0);
5697   format %{ %}
5698   interface(REG_INTER);
5699 %}
5700 
5701 // Flags register, used as output of signed compare instructions
5702 
5703 // note that on AArch64 we also use this register as the output for
5704 // for floating point compare instructions (CmpF CmpD). this ensures
5705 // that ordered inequality tests use GT, GE, LT or LE none of which
5706 // pass through cases where the result is unordered i.e. one or both
5707 // inputs to the compare is a NaN. this means that the ideal code can
5708 // replace e.g. a GT with an LE and not end up capturing the NaN case
5709 // (where the comparison should always fail). EQ and NE tests are
5710 // always generated in ideal code so that unordered folds into the NE
5711 // case, matching the behaviour of AArch64 NE.
5712 //
5713 // This differs from x86 where the outputs of FP compares use a
5714 // special FP flags registers and where compares based on this
5715 // register are distinguished into ordered inequalities (cmpOpUCF) and
5716 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
5717 // to explicitly handle the unordered case in branches. x86 also has
5718 // to include extra CMoveX rules to accept a cmpOpUCF input.
5719 
5720 operand rFlagsReg()
5721 %{
5722   constraint(ALLOC_IN_RC(int_flags));
5723   match(RegFlags);
5724 
5725   op_cost(0);
5726   format %{ "RFLAGS" %}
5727   interface(REG_INTER);
5728 %}
5729 
5730 // Flags register, used as output of unsigned compare instructions
5731 operand rFlagsRegU()
5732 %{
5733   constraint(ALLOC_IN_RC(int_flags));
5734   match(RegFlags);
5735 
5736   op_cost(0);
5737   format %{ "RFLAGSU" %}
5738   interface(REG_INTER);
5739 %}
5740 
5741 // Special Registers
5742 
5743 // Method Register
5744 operand inline_cache_RegP(iRegP reg)
5745 %{
5746   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
5747   match(reg);
5748   match(iRegPNoSp);
5749   op_cost(0);
5750   format %{ %}
5751   interface(REG_INTER);
5752 %}
5753 
5754 operand interpreter_method_oop_RegP(iRegP reg)
5755 %{
5756   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
5757   match(reg);
5758   match(iRegPNoSp);
5759   op_cost(0);
5760   format %{ %}
5761   interface(REG_INTER);
5762 %}
5763 
5764 // Thread Register
5765 operand thread_RegP(iRegP reg)
5766 %{
5767   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
5768   match(reg);
5769   op_cost(0);
5770   format %{ %}
5771   interface(REG_INTER);
5772 %}
5773 
5774 operand lr_RegP(iRegP reg)
5775 %{
5776   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
5777   match(reg);
5778   op_cost(0);
5779   format %{ %}
5780   interface(REG_INTER);
5781 %}
5782 
5783 //----------Memory Operands----------------------------------------------------
5784 
5785 operand indirect(iRegP reg)
5786 %{
5787   constraint(ALLOC_IN_RC(ptr_reg));
5788   match(reg);
5789   op_cost(0);
5790   format %{ "[$reg]" %}
5791   interface(MEMORY_INTER) %{
5792     base($reg);
5793     index(0xffffffff);
5794     scale(0x0);
5795     disp(0x0);
5796   %}
5797 %}
5798 
5799 operand indIndexScaledOffsetI(iRegP reg, iRegL lreg, immIScale scale, immIU12 off)
5800 %{
5801   constraint(ALLOC_IN_RC(ptr_reg));
5802   match(AddP (AddP reg (LShiftL lreg scale)) off);
5803   op_cost(INSN_COST);
5804   format %{ "$reg, $lreg lsl($scale), $off" %}
5805   interface(MEMORY_INTER) %{
5806     base($reg);
5807     index($lreg);
5808     scale($scale);
5809     disp($off);
5810   %}
5811 %}
5812 
5813 operand indIndexScaledOffsetL(iRegP reg, iRegL lreg, immIScale scale, immLU12 off)
5814 %{
5815   constraint(ALLOC_IN_RC(ptr_reg));
5816   match(AddP (AddP reg (LShiftL lreg scale)) off);
5817   op_cost(INSN_COST);
5818   format %{ "$reg, $lreg lsl($scale), $off" %}
5819   interface(MEMORY_INTER) %{
5820     base($reg);
5821     index($lreg);
5822     scale($scale);
5823     disp($off);
5824   %}
5825 %}
5826 
5827 operand indIndexOffsetI2L(iRegP reg, iRegI ireg, immLU12 off)
5828 %{
5829   constraint(ALLOC_IN_RC(ptr_reg));
5830   match(AddP (AddP reg (ConvI2L ireg)) off);
5831   op_cost(INSN_COST);
5832   format %{ "$reg, $ireg, $off I2L" %}
5833   interface(MEMORY_INTER) %{
5834     base($reg);
5835     index($ireg);
5836     scale(0x0);
5837     disp($off);
5838   %}
5839 %}
5840 
5841 operand indIndexScaledOffsetI2L(iRegP reg, iRegI ireg, immIScale scale, immLU12 off)
5842 %{
5843   constraint(ALLOC_IN_RC(ptr_reg));
5844   match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5845   op_cost(INSN_COST);
5846   format %{ "$reg, $ireg sxtw($scale), $off I2L" %}
5847   interface(MEMORY_INTER) %{
5848     base($reg);
5849     index($ireg);
5850     scale($scale);
5851     disp($off);
5852   %}
5853 %}
5854 
5855 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
5856 %{
5857   constraint(ALLOC_IN_RC(ptr_reg));
5858   match(AddP reg (LShiftL (ConvI2L ireg) scale));
5859   op_cost(0);
5860   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
5861   interface(MEMORY_INTER) %{
5862     base($reg);
5863     index($ireg);
5864     scale($scale);
5865     disp(0x0);
5866   %}
5867 %}
5868 
5869 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
5870 %{
5871   constraint(ALLOC_IN_RC(ptr_reg));
5872   match(AddP reg (LShiftL lreg scale));
5873   op_cost(0);
5874   format %{ "$reg, $lreg lsl($scale)" %}
5875   interface(MEMORY_INTER) %{
5876     base($reg);
5877     index($lreg);
5878     scale($scale);
5879     disp(0x0);
5880   %}
5881 %}
5882 
5883 operand indIndex(iRegP reg, iRegL lreg)
5884 %{
5885   constraint(ALLOC_IN_RC(ptr_reg));
5886   match(AddP reg lreg);
5887   op_cost(0);
5888   format %{ "$reg, $lreg" %}
5889   interface(MEMORY_INTER) %{
5890     base($reg);
5891     index($lreg);
5892     scale(0x0);
5893     disp(0x0);
5894   %}
5895 %}
5896 
5897 operand indOffI(iRegP reg, immIOffset off)
5898 %{
5899   constraint(ALLOC_IN_RC(ptr_reg));
5900   match(AddP reg off);
5901   op_cost(0);
5902   format %{ "[$reg, $off]" %}
5903   interface(MEMORY_INTER) %{
5904     base($reg);
5905     index(0xffffffff);
5906     scale(0x0);
5907     disp($off);
5908   %}
5909 %}
5910 
5911 operand indOffL(iRegP reg, immLoffset off)
5912 %{
5913   constraint(ALLOC_IN_RC(ptr_reg));
5914   match(AddP reg off);
5915   op_cost(0);
5916   format %{ "[$reg, $off]" %}
5917   interface(MEMORY_INTER) %{
5918     base($reg);
5919     index(0xffffffff);
5920     scale(0x0);
5921     disp($off);
5922   %}
5923 %}
5924 
5925 
5926 operand indirectN(iRegN reg)
5927 %{
5928   predicate(Universe::narrow_oop_shift() == 0);
5929   constraint(ALLOC_IN_RC(ptr_reg));
5930   match(DecodeN reg);
5931   op_cost(0);
5932   format %{ "[$reg]\t# narrow" %}
5933   interface(MEMORY_INTER) %{
5934     base($reg);
5935     index(0xffffffff);
5936     scale(0x0);
5937     disp(0x0);
5938   %}
5939 %}
5940 
5941 operand indIndexScaledOffsetIN(iRegN reg, iRegL lreg, immIScale scale, immIU12 off)
5942 %{
5943   predicate(Universe::narrow_oop_shift() == 0);
5944   constraint(ALLOC_IN_RC(ptr_reg));
5945   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5946   op_cost(0);
5947   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
5948   interface(MEMORY_INTER) %{
5949     base($reg);
5950     index($lreg);
5951     scale($scale);
5952     disp($off);
5953   %}
5954 %}
5955 
5956 operand indIndexScaledOffsetLN(iRegN reg, iRegL lreg, immIScale scale, immLU12 off)
5957 %{
5958   predicate(Universe::narrow_oop_shift() == 0);
5959   constraint(ALLOC_IN_RC(ptr_reg));
5960   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5961   op_cost(INSN_COST);
5962   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
5963   interface(MEMORY_INTER) %{
5964     base($reg);
5965     index($lreg);
5966     scale($scale);
5967     disp($off);
5968   %}
5969 %}
5970 
5971 operand indIndexOffsetI2LN(iRegN reg, iRegI ireg, immLU12 off)
5972 %{
5973   predicate(Universe::narrow_oop_shift() == 0);
5974   constraint(ALLOC_IN_RC(ptr_reg));
5975   match(AddP (AddP (DecodeN reg) (ConvI2L ireg)) off);
5976   op_cost(INSN_COST);
5977   format %{ "$reg, $ireg, $off I2L\t# narrow" %}
5978   interface(MEMORY_INTER) %{
5979     base($reg);
5980     index($ireg);
5981     scale(0x0);
5982     disp($off);
5983   %}
5984 %}
5985 
5986 operand indIndexScaledOffsetI2LN(iRegN reg, iRegI ireg, immIScale scale, immLU12 off)
5987 %{
5988   predicate(Universe::narrow_oop_shift() == 0);
5989   constraint(ALLOC_IN_RC(ptr_reg));
5990   match(AddP (AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale)) off);
5991   op_cost(INSN_COST);
5992   format %{ "$reg, $ireg sxtw($scale), $off I2L\t# narrow" %}
5993   interface(MEMORY_INTER) %{
5994     base($reg);
5995     index($ireg);
5996     scale($scale);
5997     disp($off);
5998   %}
5999 %}
6000 
6001 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
6002 %{
6003   predicate(Universe::narrow_oop_shift() == 0);
6004   constraint(ALLOC_IN_RC(ptr_reg));
6005   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
6006   op_cost(0);
6007   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
6008   interface(MEMORY_INTER) %{
6009     base($reg);
6010     index($ireg);
6011     scale($scale);
6012     disp(0x0);
6013   %}
6014 %}
6015 
6016 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
6017 %{
6018   predicate(Universe::narrow_oop_shift() == 0);
6019   constraint(ALLOC_IN_RC(ptr_reg));
6020   match(AddP (DecodeN reg) (LShiftL lreg scale));
6021   op_cost(0);
6022   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
6023   interface(MEMORY_INTER) %{
6024     base($reg);
6025     index($lreg);
6026     scale($scale);
6027     disp(0x0);
6028   %}
6029 %}
6030 
6031 operand indIndexN(iRegN reg, iRegL lreg)
6032 %{
6033   predicate(Universe::narrow_oop_shift() == 0);
6034   constraint(ALLOC_IN_RC(ptr_reg));
6035   match(AddP (DecodeN reg) lreg);
6036   op_cost(0);
6037   format %{ "$reg, $lreg\t# narrow" %}
6038   interface(MEMORY_INTER) %{
6039     base($reg);
6040     index($lreg);
6041     scale(0x0);
6042     disp(0x0);
6043   %}
6044 %}
6045 
6046 operand indOffIN(iRegN reg, immIOffset off)
6047 %{
6048   predicate(Universe::narrow_oop_shift() == 0);
6049   constraint(ALLOC_IN_RC(ptr_reg));
6050   match(AddP (DecodeN reg) off);
6051   op_cost(0);
6052   format %{ "[$reg, $off]\t# narrow" %}
6053   interface(MEMORY_INTER) %{
6054     base($reg);
6055     index(0xffffffff);
6056     scale(0x0);
6057     disp($off);
6058   %}
6059 %}
6060 
6061 operand indOffLN(iRegN reg, immLoffset off)
6062 %{
6063   predicate(Universe::narrow_oop_shift() == 0);
6064   constraint(ALLOC_IN_RC(ptr_reg));
6065   match(AddP (DecodeN reg) off);
6066   op_cost(0);
6067   format %{ "[$reg, $off]\t# narrow" %}
6068   interface(MEMORY_INTER) %{
6069     base($reg);
6070     index(0xffffffff);
6071     scale(0x0);
6072     disp($off);
6073   %}
6074 %}
6075 
6076 
6077 
6078 // AArch64 opto stubs need to write to the pc slot in the thread anchor
6079 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
6080 %{
6081   constraint(ALLOC_IN_RC(ptr_reg));
6082   match(AddP reg off);
6083   op_cost(0);
6084   format %{ "[$reg, $off]" %}
6085   interface(MEMORY_INTER) %{
6086     base($reg);
6087     index(0xffffffff);
6088     scale(0x0);
6089     disp($off);
6090   %}
6091 %}
6092 
6093 //----------Special Memory Operands--------------------------------------------
6094 // Stack Slot Operand - This operand is used for loading and storing temporary
6095 //                      values on the stack where a match requires a value to
6096 //                      flow through memory.
6097 operand stackSlotP(sRegP reg)
6098 %{
6099   constraint(ALLOC_IN_RC(stack_slots));
6100   op_cost(100);
6101   // No match rule because this operand is only generated in matching
6102   // match(RegP);
6103   format %{ "[$reg]" %}
6104   interface(MEMORY_INTER) %{
6105     base(0x1e);  // RSP
6106     index(0x0);  // No Index
6107     scale(0x0);  // No Scale
6108     disp($reg);  // Stack Offset
6109   %}
6110 %}
6111 
6112 operand stackSlotI(sRegI reg)
6113 %{
6114   constraint(ALLOC_IN_RC(stack_slots));
6115   // No match rule because this operand is only generated in matching
6116   // match(RegI);
6117   format %{ "[$reg]" %}
6118   interface(MEMORY_INTER) %{
6119     base(0x1e);  // RSP
6120     index(0x0);  // No Index
6121     scale(0x0);  // No Scale
6122     disp($reg);  // Stack Offset
6123   %}
6124 %}
6125 
6126 operand stackSlotF(sRegF reg)
6127 %{
6128   constraint(ALLOC_IN_RC(stack_slots));
6129   // No match rule because this operand is only generated in matching
6130   // match(RegF);
6131   format %{ "[$reg]" %}
6132   interface(MEMORY_INTER) %{
6133     base(0x1e);  // RSP
6134     index(0x0);  // No Index
6135     scale(0x0);  // No Scale
6136     disp($reg);  // Stack Offset
6137   %}
6138 %}
6139 
6140 operand stackSlotD(sRegD reg)
6141 %{
6142   constraint(ALLOC_IN_RC(stack_slots));
6143   // No match rule because this operand is only generated in matching
6144   // match(RegD);
6145   format %{ "[$reg]" %}
6146   interface(MEMORY_INTER) %{
6147     base(0x1e);  // RSP
6148     index(0x0);  // No Index
6149     scale(0x0);  // No Scale
6150     disp($reg);  // Stack Offset
6151   %}
6152 %}
6153 
6154 operand stackSlotL(sRegL reg)
6155 %{
6156   constraint(ALLOC_IN_RC(stack_slots));
6157   // No match rule because this operand is only generated in matching
6158   // match(RegL);
6159   format %{ "[$reg]" %}
6160   interface(MEMORY_INTER) %{
6161     base(0x1e);  // RSP
6162     index(0x0);  // No Index
6163     scale(0x0);  // No Scale
6164     disp($reg);  // Stack Offset
6165   %}
6166 %}
6167 
6168 // Operands for expressing Control Flow
6169 // NOTE: Label is a predefined operand which should not be redefined in
6170 //       the AD file. It is generically handled within the ADLC.
6171 
6172 //----------Conditional Branch Operands----------------------------------------
6173 // Comparison Op  - This is the operation of the comparison, and is limited to
6174 //                  the following set of codes:
6175 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
6176 //
6177 // Other attributes of the comparison, such as unsignedness, are specified
6178 // by the comparison instruction that sets a condition code flags register.
6179 // That result is represented by a flags operand whose subtype is appropriate
6180 // to the unsignedness (etc.) of the comparison.
6181 //
6182 // Later, the instruction which matches both the Comparison Op (a Bool) and
6183 // the flags (produced by the Cmp) specifies the coding of the comparison op
6184 // by matching a specific subtype of Bool operand below, such as cmpOpU.
6185 
6186 // used for signed integral comparisons and fp comparisons
6187 
6188 operand cmpOp()
6189 %{
6190   match(Bool);
6191 
6192   format %{ "" %}
6193   interface(COND_INTER) %{
6194     equal(0x0, "eq");
6195     not_equal(0x1, "ne");
6196     less(0xb, "lt");
6197     greater_equal(0xa, "ge");
6198     less_equal(0xd, "le");
6199     greater(0xc, "gt");
6200     overflow(0x6, "vs");
6201     no_overflow(0x7, "vc");
6202   %}
6203 %}
6204 
6205 // used for unsigned integral comparisons
6206 
6207 operand cmpOpU()
6208 %{
6209   match(Bool);
6210 
6211   format %{ "" %}
6212   interface(COND_INTER) %{
6213     equal(0x0, "eq");
6214     not_equal(0x1, "ne");
6215     less(0x3, "lo");
6216     greater_equal(0x2, "hs");
6217     less_equal(0x9, "ls");
6218     greater(0x8, "hi");
6219     overflow(0x6, "vs");
6220     no_overflow(0x7, "vc");
6221   %}
6222 %}
6223 
6224 // Special operand allowing long args to int ops to be truncated for free
6225 
6226 operand iRegL2I(iRegL reg) %{
6227 
6228   op_cost(0);
6229 
6230   match(ConvL2I reg);
6231 
6232   format %{ "l2i($reg)" %}
6233 
6234   interface(REG_INTER)
6235 %}
6236 
6237 opclass vmem(indirect, indIndex, indOffI, indOffL);
6238 
6239 //----------OPERAND CLASSES----------------------------------------------------
6240 // Operand Classes are groups of operands that are used as to simplify
6241 // instruction definitions by not requiring the AD writer to specify
6242 // separate instructions for every form of operand when the
6243 // instruction accepts multiple operand types with the same basic
6244 // encoding and format. The classic case of this is memory operands.
6245 
6246 // memory is used to define read/write location for load/store
6247 // instruction defs. we can turn a memory op into an Address
6248 
6249 opclass memory(indirect, indIndexScaledOffsetI, indIndexScaledOffsetL, indIndexOffsetI2L, indIndexScaledOffsetI2L, indIndexScaled, indIndexScaledI2L, indIndex, indOffI, indOffL,
6250                indirectN, indIndexScaledOffsetIN, indIndexScaledOffsetLN, indIndexOffsetI2LN, indIndexScaledOffsetI2LN, indIndexScaledN, indIndexScaledI2LN, indIndexN, indOffIN, indOffLN);
6251 
6252 
6253 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
6254 // operations. it allows the src to be either an iRegI or a (ConvL2I
6255 // iRegL). in the latter case the l2i normally planted for a ConvL2I
6256 // can be elided because the 32-bit instruction will just employ the
6257 // lower 32 bits anyway.
6258 //
6259 // n.b. this does not elide all L2I conversions. if the truncated
6260 // value is consumed by more than one operation then the ConvL2I
6261 // cannot be bundled into the consuming nodes so an l2i gets planted
6262 // (actually a movw $dst $src) and the downstream instructions consume
6263 // the result of the l2i as an iRegI input. That's a shame since the
6264 // movw is actually redundant but its not too costly.
6265 
6266 opclass iRegIorL2I(iRegI, iRegL2I);
6267 
6268 //----------PIPELINE-----------------------------------------------------------
6269 // Rules which define the behavior of the target architectures pipeline.
6270 // Integer ALU reg operation
6271 pipeline %{
6272 
6273 attributes %{
6274   // ARM instructions are of fixed length
6275   fixed_size_instructions;        // Fixed size instructions TODO does
6276   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
6277   // ARM instructions come in 32-bit word units
6278   instruction_unit_size = 4;         // An instruction is 4 bytes long
6279   instruction_fetch_unit_size = 64;  // The processor fetches one line
6280   instruction_fetch_units = 1;       // of 64 bytes
6281 
6282   // List of nop instructions
6283   nops( MachNop );
6284 %}
6285 
6286 // We don't use an actual pipeline model so don't care about resources
6287 // or description. we do use pipeline classes to introduce fixed
6288 // latencies
6289 
6290 //----------RESOURCES----------------------------------------------------------
6291 // Resources are the functional units available to the machine
6292 
6293 resources( INS0, INS1, INS01 = INS0 | INS1,
6294            ALU0, ALU1, ALU = ALU0 | ALU1,
6295            MAC,
6296            DIV,
6297            BRANCH,
6298            LDST,
6299            NEON_FP);
6300 
6301 //----------PIPELINE DESCRIPTION-----------------------------------------------
6302 // Pipeline Description specifies the stages in the machine's pipeline
6303 
6304 pipe_desc(ISS, EX1, EX2, WR);
6305 
6306 //----------PIPELINE CLASSES---------------------------------------------------
6307 // Pipeline Classes describe the stages in which input and output are
6308 // referenced by the hardware pipeline.
6309 
6310 //------- Integer ALU operations --------------------------
6311 
6312 // Integer ALU reg-reg operation
6313 // Operands needed in EX1, result generated in EX2
6314 // Eg.  ADD     x0, x1, x2
6315 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6316 %{
6317   single_instruction;
6318   dst    : EX2(write);
6319   src1   : EX1(read);
6320   src2   : EX1(read);
6321   INS01  : ISS; // Dual issue as instruction 0 or 1
6322   ALU    : EX2;
6323 %}
6324 
6325 // Integer ALU reg-reg operation with constant shift
6326 // Shifted register must be available in LATE_ISS instead of EX1
6327 // Eg.  ADD     x0, x1, x2, LSL #2
6328 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
6329 %{
6330   single_instruction;
6331   dst    : EX2(write);
6332   src1   : EX1(read);
6333   src2   : ISS(read);
6334   INS01  : ISS;
6335   ALU    : EX2;
6336 %}
6337 
6338 // Integer ALU reg operation with constant shift
6339 // Eg.  LSL     x0, x1, #shift
6340 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
6341 %{
6342   single_instruction;
6343   dst    : EX2(write);
6344   src1   : ISS(read);
6345   INS01  : ISS;
6346   ALU    : EX2;
6347 %}
6348 
6349 // Integer ALU reg-reg operation with variable shift
6350 // Both operands must be available in LATE_ISS instead of EX1
6351 // Result is available in EX1 instead of EX2
6352 // Eg.  LSLV    x0, x1, x2
6353 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
6354 %{
6355   single_instruction;
6356   dst    : EX1(write);
6357   src1   : ISS(read);
6358   src2   : ISS(read);
6359   INS01  : ISS;
6360   ALU    : EX1;
6361 %}
6362 
6363 // Integer ALU reg-reg operation with extract
6364 // As for _vshift above, but result generated in EX2
6365 // Eg.  EXTR    x0, x1, x2, #N
6366 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
6367 %{
6368   single_instruction;
6369   dst    : EX2(write);
6370   src1   : ISS(read);
6371   src2   : ISS(read);
6372   INS1   : ISS; // Can only dual issue as Instruction 1
6373   ALU    : EX1;
6374 %}
6375 
6376 // Integer ALU reg operation
6377 // Eg.  NEG     x0, x1
6378 pipe_class ialu_reg(iRegI dst, iRegI src)
6379 %{
6380   single_instruction;
6381   dst    : EX2(write);
6382   src    : EX1(read);
6383   INS01  : ISS;
6384   ALU    : EX2;
6385 %}
6386 
6387 // Integer ALU reg mmediate operation
6388 // Eg.  ADD     x0, x1, #N
6389 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
6390 %{
6391   single_instruction;
6392   dst    : EX2(write);
6393   src1   : EX1(read);
6394   INS01  : ISS;
6395   ALU    : EX2;
6396 %}
6397 
6398 // Integer ALU immediate operation (no source operands)
6399 // Eg.  MOV     x0, #N
6400 pipe_class ialu_imm(iRegI dst)
6401 %{
6402   single_instruction;
6403   dst    : EX1(write);
6404   INS01  : ISS;
6405   ALU    : EX1;
6406 %}
6407 
6408 //------- Compare operation -------------------------------
6409 
6410 // Compare reg-reg
6411 // Eg.  CMP     x0, x1
6412 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
6413 %{
6414   single_instruction;
6415 //  fixed_latency(16);
6416   cr     : EX2(write);
6417   op1    : EX1(read);
6418   op2    : EX1(read);
6419   INS01  : ISS;
6420   ALU    : EX2;
6421 %}
6422 
6423 // Compare reg-reg
6424 // Eg.  CMP     x0, #N
6425 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
6426 %{
6427   single_instruction;
6428 //  fixed_latency(16);
6429   cr     : EX2(write);
6430   op1    : EX1(read);
6431   INS01  : ISS;
6432   ALU    : EX2;
6433 %}
6434 
6435 //------- Conditional instructions ------------------------
6436 
6437 // Conditional no operands
6438 // Eg.  CSINC   x0, zr, zr, <cond>
6439 pipe_class icond_none(iRegI dst, rFlagsReg cr)
6440 %{
6441   single_instruction;
6442   cr     : EX1(read);
6443   dst    : EX2(write);
6444   INS01  : ISS;
6445   ALU    : EX2;
6446 %}
6447 
6448 // Conditional 2 operand
6449 // EG.  CSEL    X0, X1, X2, <cond>
6450 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
6451 %{
6452   single_instruction;
6453   cr     : EX1(read);
6454   src1   : EX1(read);
6455   src2   : EX1(read);
6456   dst    : EX2(write);
6457   INS01  : ISS;
6458   ALU    : EX2;
6459 %}
6460 
6461 // Conditional 2 operand
6462 // EG.  CSEL    X0, X1, X2, <cond>
6463 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
6464 %{
6465   single_instruction;
6466   cr     : EX1(read);
6467   src    : EX1(read);
6468   dst    : EX2(write);
6469   INS01  : ISS;
6470   ALU    : EX2;
6471 %}
6472 
6473 //------- Multiply pipeline operations --------------------
6474 
6475 // Multiply reg-reg
6476 // Eg.  MUL     w0, w1, w2
6477 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6478 %{
6479   single_instruction;
6480   dst    : WR(write);
6481   src1   : ISS(read);
6482   src2   : ISS(read);
6483   INS01  : ISS;
6484   MAC    : WR;
6485 %}
6486 
6487 // Multiply accumulate
6488 // Eg.  MADD    w0, w1, w2, w3
6489 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6490 %{
6491   single_instruction;
6492   dst    : WR(write);
6493   src1   : ISS(read);
6494   src2   : ISS(read);
6495   src3   : ISS(read);
6496   INS01  : ISS;
6497   MAC    : WR;
6498 %}
6499 
6500 // Eg.  MUL     w0, w1, w2
6501 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6502 %{
6503   single_instruction;
6504   fixed_latency(3); // Maximum latency for 64 bit mul
6505   dst    : WR(write);
6506   src1   : ISS(read);
6507   src2   : ISS(read);
6508   INS01  : ISS;
6509   MAC    : WR;
6510 %}
6511 
6512 // Multiply accumulate
6513 // Eg.  MADD    w0, w1, w2, w3
6514 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6515 %{
6516   single_instruction;
6517   fixed_latency(3); // Maximum latency for 64 bit mul
6518   dst    : WR(write);
6519   src1   : ISS(read);
6520   src2   : ISS(read);
6521   src3   : ISS(read);
6522   INS01  : ISS;
6523   MAC    : WR;
6524 %}
6525 
6526 //------- Divide pipeline operations --------------------
6527 
6528 // Eg.  SDIV    w0, w1, w2
6529 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6530 %{
6531   single_instruction;
6532   fixed_latency(8); // Maximum latency for 32 bit divide
6533   dst    : WR(write);
6534   src1   : ISS(read);
6535   src2   : ISS(read);
6536   INS0   : ISS; // Can only dual issue as instruction 0
6537   DIV    : WR;
6538 %}
6539 
6540 // Eg.  SDIV    x0, x1, x2
6541 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6542 %{
6543   single_instruction;
6544   fixed_latency(16); // Maximum latency for 64 bit divide
6545   dst    : WR(write);
6546   src1   : ISS(read);
6547   src2   : ISS(read);
6548   INS0   : ISS; // Can only dual issue as instruction 0
6549   DIV    : WR;
6550 %}
6551 
6552 //------- Load pipeline operations ------------------------
6553 
6554 // Load - prefetch
6555 // Eg.  PFRM    <mem>
6556 pipe_class iload_prefetch(memory mem)
6557 %{
6558   single_instruction;
6559   mem    : ISS(read);
6560   INS01  : ISS;
6561   LDST   : WR;
6562 %}
6563 
6564 // Load - reg, mem
6565 // Eg.  LDR     x0, <mem>
6566 pipe_class iload_reg_mem(iRegI dst, memory mem)
6567 %{
6568   single_instruction;
6569   dst    : WR(write);
6570   mem    : ISS(read);
6571   INS01  : ISS;
6572   LDST   : WR;
6573 %}
6574 
6575 // Load - reg, reg
6576 // Eg.  LDR     x0, [sp, x1]
6577 pipe_class iload_reg_reg(iRegI dst, iRegI src)
6578 %{
6579   single_instruction;
6580   dst    : WR(write);
6581   src    : ISS(read);
6582   INS01  : ISS;
6583   LDST   : WR;
6584 %}
6585 
6586 //------- Store pipeline operations -----------------------
6587 
6588 // Store - zr, mem
6589 // Eg.  STR     zr, <mem>
6590 pipe_class istore_mem(memory mem)
6591 %{
6592   single_instruction;
6593   mem    : ISS(read);
6594   INS01  : ISS;
6595   LDST   : WR;
6596 %}
6597 
6598 // Store - reg, mem
6599 // Eg.  STR     x0, <mem>
6600 pipe_class istore_reg_mem(iRegI src, memory mem)
6601 %{
6602   single_instruction;
6603   mem    : ISS(read);
6604   src    : EX2(read);
6605   INS01  : ISS;
6606   LDST   : WR;
6607 %}
6608 
6609 // Store - reg, reg
6610 // Eg. STR      x0, [sp, x1]
6611 pipe_class istore_reg_reg(iRegI dst, iRegI src)
6612 %{
6613   single_instruction;
6614   dst    : ISS(read);
6615   src    : EX2(read);
6616   INS01  : ISS;
6617   LDST   : WR;
6618 %}
6619 
6620 //------- Store pipeline operations -----------------------
6621 
6622 // Branch
6623 pipe_class pipe_branch()
6624 %{
6625   single_instruction;
6626   INS01  : ISS;
6627   BRANCH : EX1;
6628 %}
6629 
6630 // Conditional branch
6631 pipe_class pipe_branch_cond(rFlagsReg cr)
6632 %{
6633   single_instruction;
6634   cr     : EX1(read);
6635   INS01  : ISS;
6636   BRANCH : EX1;
6637 %}
6638 
6639 // Compare & Branch
6640 // EG.  CBZ/CBNZ
6641 pipe_class pipe_cmp_branch(iRegI op1)
6642 %{
6643   single_instruction;
6644   op1    : EX1(read);
6645   INS01  : ISS;
6646   BRANCH : EX1;
6647 %}
6648 
6649 //------- Synchronisation operations ----------------------
6650 
6651 // Any operation requiring serialization.
6652 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
6653 pipe_class pipe_serial()
6654 %{
6655   single_instruction;
6656   force_serialization;
6657   fixed_latency(16);
6658   INS01  : ISS(2); // Cannot dual issue with any other instruction
6659   LDST   : WR;
6660 %}
6661 
6662 // Generic big/slow expanded idiom - also serialized
6663 pipe_class pipe_slow()
6664 %{
6665   instruction_count(10);
6666   multiple_bundles;
6667   force_serialization;
6668   fixed_latency(16);
6669   INS01  : ISS(2); // Cannot dual issue with any other instruction
6670   LDST   : WR;
6671 %}
6672 
6673 // Empty pipeline class
6674 pipe_class pipe_class_empty()
6675 %{
6676   single_instruction;
6677   fixed_latency(0);
6678 %}
6679 
6680 // Default pipeline class.
6681 pipe_class pipe_class_default()
6682 %{
6683   single_instruction;
6684   fixed_latency(2);
6685 %}
6686 
6687 // Pipeline class for compares.
6688 pipe_class pipe_class_compare()
6689 %{
6690   single_instruction;
6691   fixed_latency(16);
6692 %}
6693 
6694 // Pipeline class for memory operations.
6695 pipe_class pipe_class_memory()
6696 %{
6697   single_instruction;
6698   fixed_latency(16);
6699 %}
6700 
6701 // Pipeline class for call.
6702 pipe_class pipe_class_call()
6703 %{
6704   single_instruction;
6705   fixed_latency(100);
6706 %}
6707 
6708 // Define the class for the Nop node.
6709 define %{
6710    MachNop = pipe_class_empty;
6711 %}
6712 
6713 %}
6714 //----------INSTRUCTIONS-------------------------------------------------------
6715 //
6716 // match      -- States which machine-independent subtree may be replaced
6717 //               by this instruction.
6718 // ins_cost   -- The estimated cost of this instruction is used by instruction
6719 //               selection to identify a minimum cost tree of machine
6720 //               instructions that matches a tree of machine-independent
6721 //               instructions.
6722 // format     -- A string providing the disassembly for this instruction.
6723 //               The value of an instruction's operand may be inserted
6724 //               by referring to it with a '$' prefix.
6725 // opcode     -- Three instruction opcodes may be provided.  These are referred
6726 //               to within an encode class as $primary, $secondary, and $tertiary
6727 //               rrspectively.  The primary opcode is commonly used to
6728 //               indicate the type of machine instruction, while secondary
6729 //               and tertiary are often used for prefix options or addressing
6730 //               modes.
6731 // ins_encode -- A list of encode classes with parameters. The encode class
6732 //               name must have been defined in an 'enc_class' specification
6733 //               in the encode section of the architecture description.
6734 
6735 // ============================================================================
6736 // Memory (Load/Store) Instructions
6737 
6738 // Load Instructions
6739 
6740 // Load Byte (8 bit signed)
6741 instruct loadB(iRegINoSp dst, memory mem)
6742 %{
6743   match(Set dst (LoadB mem));
6744   predicate(!needs_acquiring_load(n));
6745 
6746   ins_cost(4 * INSN_COST);
6747   format %{ "ldrsbw  $dst, $mem\t# byte" %}
6748 
6749   ins_encode(aarch64_enc_ldrsbw(dst, mem));
6750 
6751   ins_pipe(iload_reg_mem);
6752 %}
6753 
6754 // Load Byte (8 bit signed) into long
6755 instruct loadB2L(iRegLNoSp dst, memory mem)
6756 %{
6757   match(Set dst (ConvI2L (LoadB mem)));
6758   predicate(!needs_acquiring_load(n->in(1)));
6759 
6760   ins_cost(4 * INSN_COST);
6761   format %{ "ldrsb  $dst, $mem\t# byte" %}
6762 
6763   ins_encode(aarch64_enc_ldrsb(dst, mem));
6764 
6765   ins_pipe(iload_reg_mem);
6766 %}
6767 
6768 // Load Byte (8 bit unsigned)
6769 instruct loadUB(iRegINoSp dst, memory mem)
6770 %{
6771   match(Set dst (LoadUB mem));
6772   predicate(!needs_acquiring_load(n));
6773 
6774   ins_cost(4 * INSN_COST);
6775   format %{ "ldrbw  $dst, $mem\t# byte" %}
6776 
6777   ins_encode(aarch64_enc_ldrb(dst, mem));
6778 
6779   ins_pipe(iload_reg_mem);
6780 %}
6781 
6782 // Load Byte (8 bit unsigned) into long
6783 instruct loadUB2L(iRegLNoSp dst, memory mem)
6784 %{
6785   match(Set dst (ConvI2L (LoadUB mem)));
6786   predicate(!needs_acquiring_load(n->in(1)));
6787 
6788   ins_cost(4 * INSN_COST);
6789   format %{ "ldrb  $dst, $mem\t# byte" %}
6790 
6791   ins_encode(aarch64_enc_ldrb(dst, mem));
6792 
6793   ins_pipe(iload_reg_mem);
6794 %}
6795 
6796 // Load Short (16 bit signed)
6797 instruct loadS(iRegINoSp dst, memory mem)
6798 %{
6799   match(Set dst (LoadS mem));
6800   predicate(!needs_acquiring_load(n));
6801 
6802   ins_cost(4 * INSN_COST);
6803   format %{ "ldrshw  $dst, $mem\t# short" %}
6804 
6805   ins_encode(aarch64_enc_ldrshw(dst, mem));
6806 
6807   ins_pipe(iload_reg_mem);
6808 %}
6809 
6810 // Load Short (16 bit signed) into long
6811 instruct loadS2L(iRegLNoSp dst, memory mem)
6812 %{
6813   match(Set dst (ConvI2L (LoadS mem)));
6814   predicate(!needs_acquiring_load(n->in(1)));
6815 
6816   ins_cost(4 * INSN_COST);
6817   format %{ "ldrsh  $dst, $mem\t# short" %}
6818 
6819   ins_encode(aarch64_enc_ldrsh(dst, mem));
6820 
6821   ins_pipe(iload_reg_mem);
6822 %}
6823 
6824 // Load Char (16 bit unsigned)
6825 instruct loadUS(iRegINoSp dst, memory mem)
6826 %{
6827   match(Set dst (LoadUS mem));
6828   predicate(!needs_acquiring_load(n));
6829 
6830   ins_cost(4 * INSN_COST);
6831   format %{ "ldrh  $dst, $mem\t# short" %}
6832 
6833   ins_encode(aarch64_enc_ldrh(dst, mem));
6834 
6835   ins_pipe(iload_reg_mem);
6836 %}
6837 
6838 // Load Short/Char (16 bit unsigned) into long
6839 instruct loadUS2L(iRegLNoSp dst, memory mem)
6840 %{
6841   match(Set dst (ConvI2L (LoadUS mem)));
6842   predicate(!needs_acquiring_load(n->in(1)));
6843 
6844   ins_cost(4 * INSN_COST);
6845   format %{ "ldrh  $dst, $mem\t# short" %}
6846 
6847   ins_encode(aarch64_enc_ldrh(dst, mem));
6848 
6849   ins_pipe(iload_reg_mem);
6850 %}
6851 
6852 // Load Integer (32 bit signed)
6853 instruct loadI(iRegINoSp dst, memory mem)
6854 %{
6855   match(Set dst (LoadI mem));
6856   predicate(!needs_acquiring_load(n));
6857 
6858   ins_cost(4 * INSN_COST);
6859   format %{ "ldrw  $dst, $mem\t# int" %}
6860 
6861   ins_encode(aarch64_enc_ldrw(dst, mem));
6862 
6863   ins_pipe(iload_reg_mem);
6864 %}
6865 
6866 // Load Integer (32 bit signed) into long
6867 instruct loadI2L(iRegLNoSp dst, memory mem)
6868 %{
6869   match(Set dst (ConvI2L (LoadI mem)));
6870   predicate(!needs_acquiring_load(n->in(1)));
6871 
6872   ins_cost(4 * INSN_COST);
6873   format %{ "ldrsw  $dst, $mem\t# int" %}
6874 
6875   ins_encode(aarch64_enc_ldrsw(dst, mem));
6876 
6877   ins_pipe(iload_reg_mem);
6878 %}
6879 
6880 // Load Integer (32 bit unsigned) into long
6881 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
6882 %{
6883   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
6884   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
6885 
6886   ins_cost(4 * INSN_COST);
6887   format %{ "ldrw  $dst, $mem\t# int" %}
6888 
6889   ins_encode(aarch64_enc_ldrw(dst, mem));
6890 
6891   ins_pipe(iload_reg_mem);
6892 %}
6893 
6894 // Load Long (64 bit signed)
6895 instruct loadL(iRegLNoSp dst, memory mem)
6896 %{
6897   match(Set dst (LoadL mem));
6898   predicate(!needs_acquiring_load(n));
6899 
6900   ins_cost(4 * INSN_COST);
6901   format %{ "ldr  $dst, $mem\t# int" %}
6902 
6903   ins_encode(aarch64_enc_ldr(dst, mem));
6904 
6905   ins_pipe(iload_reg_mem);
6906 %}
6907 
6908 // Load Range
6909 instruct loadRange(iRegINoSp dst, memory mem)
6910 %{
6911   match(Set dst (LoadRange mem));
6912 
6913   ins_cost(4 * INSN_COST);
6914   format %{ "ldrw  $dst, $mem\t# range" %}
6915 
6916   ins_encode(aarch64_enc_ldrw(dst, mem));
6917 
6918   ins_pipe(iload_reg_mem);
6919 %}
6920 
6921 // Load Pointer
6922 instruct loadP(iRegPNoSp dst, memory mem)
6923 %{
6924   match(Set dst (LoadP mem));
6925   predicate(!needs_acquiring_load(n));
6926 
6927   ins_cost(4 * INSN_COST);
6928   format %{ "ldr  $dst, $mem\t# ptr" %}
6929 
6930   ins_encode(aarch64_enc_ldr(dst, mem));
6931 
6932   ins_pipe(iload_reg_mem);
6933 %}
6934 
6935 // Load Compressed Pointer
6936 instruct loadN(iRegNNoSp dst, memory mem)
6937 %{
6938   match(Set dst (LoadN mem));
6939   predicate(!needs_acquiring_load(n));
6940 
6941   ins_cost(4 * INSN_COST);
6942   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
6943 
6944   ins_encode(aarch64_enc_ldrw(dst, mem));
6945 
6946   ins_pipe(iload_reg_mem);
6947 %}
6948 
6949 // Load Klass Pointer
6950 instruct loadKlass(iRegPNoSp dst, memory mem)
6951 %{
6952   match(Set dst (LoadKlass mem));
6953   predicate(!needs_acquiring_load(n));
6954 
6955   ins_cost(4 * INSN_COST);
6956   format %{ "ldr  $dst, $mem\t# class" %}
6957 
6958   ins_encode(aarch64_enc_ldr(dst, mem));
6959 
6960   ins_pipe(iload_reg_mem);
6961 %}
6962 
6963 // Load Narrow Klass Pointer
6964 instruct loadNKlass(iRegNNoSp dst, memory mem)
6965 %{
6966   match(Set dst (LoadNKlass mem));
6967   predicate(!needs_acquiring_load(n));
6968 
6969   ins_cost(4 * INSN_COST);
6970   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
6971 
6972   ins_encode(aarch64_enc_ldrw(dst, mem));
6973 
6974   ins_pipe(iload_reg_mem);
6975 %}
6976 
6977 // Load Float
6978 instruct loadF(vRegF dst, memory mem)
6979 %{
6980   match(Set dst (LoadF mem));
6981   predicate(!needs_acquiring_load(n));
6982 
6983   ins_cost(4 * INSN_COST);
6984   format %{ "ldrs  $dst, $mem\t# float" %}
6985 
6986   ins_encode( aarch64_enc_ldrs(dst, mem) );
6987 
6988   ins_pipe(pipe_class_memory);
6989 %}
6990 
6991 // Load Double
6992 instruct loadD(vRegD dst, memory mem)
6993 %{
6994   match(Set dst (LoadD mem));
6995   predicate(!needs_acquiring_load(n));
6996 
6997   ins_cost(4 * INSN_COST);
6998   format %{ "ldrd  $dst, $mem\t# double" %}
6999 
7000   ins_encode( aarch64_enc_ldrd(dst, mem) );
7001 
7002   ins_pipe(pipe_class_memory);
7003 %}
7004 
7005 
7006 // Load Int Constant
7007 instruct loadConI(iRegINoSp dst, immI src)
7008 %{
7009   match(Set dst src);
7010 
7011   ins_cost(INSN_COST);
7012   format %{ "mov $dst, $src\t# int" %}
7013 
7014   ins_encode( aarch64_enc_movw_imm(dst, src) );
7015 
7016   ins_pipe(ialu_imm);
7017 %}
7018 
7019 // Load Long Constant
7020 instruct loadConL(iRegLNoSp dst, immL src)
7021 %{
7022   match(Set dst src);
7023 
7024   ins_cost(INSN_COST);
7025   format %{ "mov $dst, $src\t# long" %}
7026 
7027   ins_encode( aarch64_enc_mov_imm(dst, src) );
7028 
7029   ins_pipe(ialu_imm);
7030 %}
7031 
7032 // Load Pointer Constant
7033 
7034 instruct loadConP(iRegPNoSp dst, immP con)
7035 %{
7036   match(Set dst con);
7037 
7038   ins_cost(INSN_COST * 4);
7039   format %{
7040     "mov  $dst, $con\t# ptr\n\t"
7041   %}
7042 
7043   ins_encode(aarch64_enc_mov_p(dst, con));
7044 
7045   ins_pipe(ialu_imm);
7046 %}
7047 
7048 // Load Null Pointer Constant
7049 
7050 instruct loadConP0(iRegPNoSp dst, immP0 con)
7051 %{
7052   match(Set dst con);
7053 
7054   ins_cost(INSN_COST);
7055   format %{ "mov  $dst, $con\t# NULL ptr" %}
7056 
7057   ins_encode(aarch64_enc_mov_p0(dst, con));
7058 
7059   ins_pipe(ialu_imm);
7060 %}
7061 
7062 // Load Pointer Constant One
7063 
7064 instruct loadConP1(iRegPNoSp dst, immP_1 con)
7065 %{
7066   match(Set dst con);
7067 
7068   ins_cost(INSN_COST);
7069   format %{ "mov  $dst, $con\t# NULL ptr" %}
7070 
7071   ins_encode(aarch64_enc_mov_p1(dst, con));
7072 
7073   ins_pipe(ialu_imm);
7074 %}
7075 
7076 // Load Poll Page Constant
7077 
7078 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
7079 %{
7080   match(Set dst con);
7081 
7082   ins_cost(INSN_COST);
7083   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
7084 
7085   ins_encode(aarch64_enc_mov_poll_page(dst, con));
7086 
7087   ins_pipe(ialu_imm);
7088 %}
7089 
7090 // Load Byte Map Base Constant
7091 
7092 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
7093 %{
7094   match(Set dst con);
7095 
7096   ins_cost(INSN_COST);
7097   format %{ "adr  $dst, $con\t# Byte Map Base" %}
7098 
7099   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
7100 
7101   ins_pipe(ialu_imm);
7102 %}
7103 
7104 // Load Narrow Pointer Constant
7105 
7106 instruct loadConN(iRegNNoSp dst, immN con)
7107 %{
7108   match(Set dst con);
7109 
7110   ins_cost(INSN_COST * 4);
7111   format %{ "mov  $dst, $con\t# compressed ptr" %}
7112 
7113   ins_encode(aarch64_enc_mov_n(dst, con));
7114 
7115   ins_pipe(ialu_imm);
7116 %}
7117 
7118 // Load Narrow Null Pointer Constant
7119 
7120 instruct loadConN0(iRegNNoSp dst, immN0 con)
7121 %{
7122   match(Set dst con);
7123 
7124   ins_cost(INSN_COST);
7125   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
7126 
7127   ins_encode(aarch64_enc_mov_n0(dst, con));
7128 
7129   ins_pipe(ialu_imm);
7130 %}
7131 
7132 // Load Narrow Klass Constant
7133 
7134 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
7135 %{
7136   match(Set dst con);
7137 
7138   ins_cost(INSN_COST);
7139   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
7140 
7141   ins_encode(aarch64_enc_mov_nk(dst, con));
7142 
7143   ins_pipe(ialu_imm);
7144 %}
7145 
7146 // Load Packed Float Constant
7147 
7148 instruct loadConF_packed(vRegF dst, immFPacked con) %{
7149   match(Set dst con);
7150   ins_cost(INSN_COST * 4);
7151   format %{ "fmovs  $dst, $con"%}
7152   ins_encode %{
7153     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
7154   %}
7155 
7156   ins_pipe(pipe_class_default);
7157 %}
7158 
7159 // Load Float Constant
7160 
7161 instruct loadConF(vRegF dst, immF con) %{
7162   match(Set dst con);
7163 
7164   ins_cost(INSN_COST * 4);
7165 
7166   format %{
7167     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7168   %}
7169 
7170   ins_encode %{
7171     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
7172   %}
7173 
7174   ins_pipe(pipe_class_default);
7175 %}
7176 
7177 // Load Packed Double Constant
7178 
7179 instruct loadConD_packed(vRegD dst, immDPacked con) %{
7180   match(Set dst con);
7181   ins_cost(INSN_COST);
7182   format %{ "fmovd  $dst, $con"%}
7183   ins_encode %{
7184     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
7185   %}
7186 
7187   ins_pipe(pipe_class_default);
7188 %}
7189 
7190 // Load Double Constant
7191 
7192 instruct loadConD(vRegD dst, immD con) %{
7193   match(Set dst con);
7194 
7195   ins_cost(INSN_COST * 5);
7196   format %{
7197     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7198   %}
7199 
7200   ins_encode %{
7201     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
7202   %}
7203 
7204   ins_pipe(pipe_class_default);
7205 %}
7206 
7207 // Store Instructions
7208 
7209 // Store CMS card-mark Immediate
7210 instruct storeimmCM0(immI0 zero, memory mem)
7211 %{
7212   match(Set mem (StoreCM mem zero));
7213   predicate(unnecessary_storestore(n));
7214 
7215   ins_cost(INSN_COST);
7216   format %{ "strb zr, $mem\t# byte" %}
7217 
7218   ins_encode(aarch64_enc_strb0(mem));
7219 
7220   ins_pipe(istore_mem);
7221 %}
7222 
7223 // Store CMS card-mark Immediate with intervening StoreStore
7224 // needed when using CMS with no conditional card marking
7225 instruct storeimmCM0_ordered(immI0 zero, memory mem)
7226 %{
7227   match(Set mem (StoreCM mem zero));
7228 
7229   ins_cost(INSN_COST * 2);
7230   format %{ "dmb ishst"
7231       "\n\tstrb zr, $mem\t# byte" %}
7232 
7233   ins_encode(aarch64_enc_strb0_ordered(mem));
7234 
7235   ins_pipe(istore_mem);
7236 %}
7237 
7238 // Store Byte
7239 instruct storeB(iRegIorL2I src, memory mem)
7240 %{
7241   match(Set mem (StoreB mem src));
7242   predicate(!needs_releasing_store(n));
7243 
7244   ins_cost(INSN_COST);
7245   format %{ "strb  $src, $mem\t# byte" %}
7246 
7247   ins_encode(aarch64_enc_strb(src, mem));
7248 
7249   ins_pipe(istore_reg_mem);
7250 %}
7251 
7252 
7253 instruct storeimmB0(immI0 zero, memory mem)
7254 %{
7255   match(Set mem (StoreB mem zero));
7256   predicate(!needs_releasing_store(n));
7257 
7258   ins_cost(INSN_COST);
7259   format %{ "strb rscractch2, $mem\t# byte" %}
7260 
7261   ins_encode(aarch64_enc_strb0(mem));
7262 
7263   ins_pipe(istore_mem);
7264 %}
7265 
7266 // Store Char/Short
7267 instruct storeC(iRegIorL2I src, memory mem)
7268 %{
7269   match(Set mem (StoreC mem src));
7270   predicate(!needs_releasing_store(n));
7271 
7272   ins_cost(INSN_COST);
7273   format %{ "strh  $src, $mem\t# short" %}
7274 
7275   ins_encode(aarch64_enc_strh(src, mem));
7276 
7277   ins_pipe(istore_reg_mem);
7278 %}
7279 
7280 instruct storeimmC0(immI0 zero, memory mem)
7281 %{
7282   match(Set mem (StoreC mem zero));
7283   predicate(!needs_releasing_store(n));
7284 
7285   ins_cost(INSN_COST);
7286   format %{ "strh  zr, $mem\t# short" %}
7287 
7288   ins_encode(aarch64_enc_strh0(mem));
7289 
7290   ins_pipe(istore_mem);
7291 %}
7292 
7293 // Store Integer
7294 
7295 instruct storeI(iRegIorL2I src, memory mem)
7296 %{
7297   match(Set mem(StoreI mem src));
7298   predicate(!needs_releasing_store(n));
7299 
7300   ins_cost(INSN_COST);
7301   format %{ "strw  $src, $mem\t# int" %}
7302 
7303   ins_encode(aarch64_enc_strw(src, mem));
7304 
7305   ins_pipe(istore_reg_mem);
7306 %}
7307 
7308 instruct storeimmI0(immI0 zero, memory mem)
7309 %{
7310   match(Set mem(StoreI mem zero));
7311   predicate(!needs_releasing_store(n));
7312 
7313   ins_cost(INSN_COST);
7314   format %{ "strw  zr, $mem\t# int" %}
7315 
7316   ins_encode(aarch64_enc_strw0(mem));
7317 
7318   ins_pipe(istore_mem);
7319 %}
7320 
7321 // Store Long (64 bit signed)
7322 instruct storeL(iRegL src, memory mem)
7323 %{
7324   match(Set mem (StoreL mem src));
7325   predicate(!needs_releasing_store(n));
7326 
7327   ins_cost(INSN_COST);
7328   format %{ "str  $src, $mem\t# int" %}
7329 
7330   ins_encode(aarch64_enc_str(src, mem));
7331 
7332   ins_pipe(istore_reg_mem);
7333 %}
7334 
7335 // Store Long (64 bit signed)
7336 instruct storeimmL0(immL0 zero, memory mem)
7337 %{
7338   match(Set mem (StoreL mem zero));
7339   predicate(!needs_releasing_store(n));
7340 
7341   ins_cost(INSN_COST);
7342   format %{ "str  zr, $mem\t# int" %}
7343 
7344   ins_encode(aarch64_enc_str0(mem));
7345 
7346   ins_pipe(istore_mem);
7347 %}
7348 
7349 // Store Pointer
7350 instruct storeP(iRegP src, memory mem)
7351 %{
7352   match(Set mem (StoreP mem src));
7353   predicate(!needs_releasing_store(n));
7354 
7355   ins_cost(INSN_COST);
7356   format %{ "str  $src, $mem\t# ptr" %}
7357 
7358   ins_encode(aarch64_enc_str(src, mem));
7359 
7360   ins_pipe(istore_reg_mem);
7361 %}
7362 
7363 // Store Pointer
7364 instruct storeimmP0(immP0 zero, memory mem)
7365 %{
7366   match(Set mem (StoreP mem zero));
7367   predicate(!needs_releasing_store(n));
7368 
7369   ins_cost(INSN_COST);
7370   format %{ "str zr, $mem\t# ptr" %}
7371 
7372   ins_encode(aarch64_enc_str0(mem));
7373 
7374   ins_pipe(istore_mem);
7375 %}
7376 
7377 // Store Compressed Pointer
7378 instruct storeN(iRegN src, memory mem)
7379 %{
7380   match(Set mem (StoreN mem src));
7381   predicate(!needs_releasing_store(n));
7382 
7383   ins_cost(INSN_COST);
7384   format %{ "strw  $src, $mem\t# compressed ptr" %}
7385 
7386   ins_encode(aarch64_enc_strw(src, mem));
7387 
7388   ins_pipe(istore_reg_mem);
7389 %}
7390 
7391 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
7392 %{
7393   match(Set mem (StoreN mem zero));
7394   predicate(Universe::narrow_oop_base() == NULL &&
7395             Universe::narrow_klass_base() == NULL &&
7396             (!needs_releasing_store(n)));
7397 
7398   ins_cost(INSN_COST);
7399   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
7400 
7401   ins_encode(aarch64_enc_strw(heapbase, mem));
7402 
7403   ins_pipe(istore_reg_mem);
7404 %}
7405 
7406 // Store Float
7407 instruct storeF(vRegF src, memory mem)
7408 %{
7409   match(Set mem (StoreF mem src));
7410   predicate(!needs_releasing_store(n));
7411 
7412   ins_cost(INSN_COST);
7413   format %{ "strs  $src, $mem\t# float" %}
7414 
7415   ins_encode( aarch64_enc_strs(src, mem) );
7416 
7417   ins_pipe(pipe_class_memory);
7418 %}
7419 
7420 // TODO
7421 // implement storeImmF0 and storeFImmPacked
7422 
7423 // Store Double
7424 instruct storeD(vRegD src, memory mem)
7425 %{
7426   match(Set mem (StoreD mem src));
7427   predicate(!needs_releasing_store(n));
7428 
7429   ins_cost(INSN_COST);
7430   format %{ "strd  $src, $mem\t# double" %}
7431 
7432   ins_encode( aarch64_enc_strd(src, mem) );
7433 
7434   ins_pipe(pipe_class_memory);
7435 %}
7436 
7437 // Store Compressed Klass Pointer
7438 instruct storeNKlass(iRegN src, memory mem)
7439 %{
7440   predicate(!needs_releasing_store(n));
7441   match(Set mem (StoreNKlass mem src));
7442 
7443   ins_cost(INSN_COST);
7444   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
7445 
7446   ins_encode(aarch64_enc_strw(src, mem));
7447 
7448   ins_pipe(istore_reg_mem);
7449 %}
7450 
7451 // TODO
7452 // implement storeImmD0 and storeDImmPacked
7453 
7454 // prefetch instructions
7455 // Must be safe to execute with invalid address (cannot fault).
7456 
7457 instruct prefetchalloc( memory mem ) %{
7458   match(PrefetchAllocation mem);
7459 
7460   ins_cost(INSN_COST);
7461   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
7462 
7463   ins_encode( aarch64_enc_prefetchw(mem) );
7464 
7465   ins_pipe(iload_prefetch);
7466 %}
7467 
7468 //  ---------------- volatile loads and stores ----------------
7469 
7470 // Load Byte (8 bit signed)
7471 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7472 %{
7473   match(Set dst (LoadB mem));
7474 
7475   ins_cost(VOLATILE_REF_COST);
7476   format %{ "ldarsb  $dst, $mem\t# byte" %}
7477 
7478   ins_encode(aarch64_enc_ldarsb(dst, mem));
7479 
7480   ins_pipe(pipe_serial);
7481 %}
7482 
7483 // Load Byte (8 bit signed) into long
7484 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7485 %{
7486   match(Set dst (ConvI2L (LoadB mem)));
7487 
7488   ins_cost(VOLATILE_REF_COST);
7489   format %{ "ldarsb  $dst, $mem\t# byte" %}
7490 
7491   ins_encode(aarch64_enc_ldarsb(dst, mem));
7492 
7493   ins_pipe(pipe_serial);
7494 %}
7495 
7496 // Load Byte (8 bit unsigned)
7497 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7498 %{
7499   match(Set dst (LoadUB mem));
7500 
7501   ins_cost(VOLATILE_REF_COST);
7502   format %{ "ldarb  $dst, $mem\t# byte" %}
7503 
7504   ins_encode(aarch64_enc_ldarb(dst, mem));
7505 
7506   ins_pipe(pipe_serial);
7507 %}
7508 
7509 // Load Byte (8 bit unsigned) into long
7510 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7511 %{
7512   match(Set dst (ConvI2L (LoadUB mem)));
7513 
7514   ins_cost(VOLATILE_REF_COST);
7515   format %{ "ldarb  $dst, $mem\t# byte" %}
7516 
7517   ins_encode(aarch64_enc_ldarb(dst, mem));
7518 
7519   ins_pipe(pipe_serial);
7520 %}
7521 
7522 // Load Short (16 bit signed)
7523 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7524 %{
7525   match(Set dst (LoadS mem));
7526 
7527   ins_cost(VOLATILE_REF_COST);
7528   format %{ "ldarshw  $dst, $mem\t# short" %}
7529 
7530   ins_encode(aarch64_enc_ldarshw(dst, mem));
7531 
7532   ins_pipe(pipe_serial);
7533 %}
7534 
7535 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7536 %{
7537   match(Set dst (LoadUS mem));
7538 
7539   ins_cost(VOLATILE_REF_COST);
7540   format %{ "ldarhw  $dst, $mem\t# short" %}
7541 
7542   ins_encode(aarch64_enc_ldarhw(dst, mem));
7543 
7544   ins_pipe(pipe_serial);
7545 %}
7546 
7547 // Load Short/Char (16 bit unsigned) into long
7548 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7549 %{
7550   match(Set dst (ConvI2L (LoadUS mem)));
7551 
7552   ins_cost(VOLATILE_REF_COST);
7553   format %{ "ldarh  $dst, $mem\t# short" %}
7554 
7555   ins_encode(aarch64_enc_ldarh(dst, mem));
7556 
7557   ins_pipe(pipe_serial);
7558 %}
7559 
7560 // Load Short/Char (16 bit signed) into long
7561 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7562 %{
7563   match(Set dst (ConvI2L (LoadS mem)));
7564 
7565   ins_cost(VOLATILE_REF_COST);
7566   format %{ "ldarh  $dst, $mem\t# short" %}
7567 
7568   ins_encode(aarch64_enc_ldarsh(dst, mem));
7569 
7570   ins_pipe(pipe_serial);
7571 %}
7572 
7573 // Load Integer (32 bit signed)
7574 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7575 %{
7576   match(Set dst (LoadI mem));
7577 
7578   ins_cost(VOLATILE_REF_COST);
7579   format %{ "ldarw  $dst, $mem\t# int" %}
7580 
7581   ins_encode(aarch64_enc_ldarw(dst, mem));
7582 
7583   ins_pipe(pipe_serial);
7584 %}
7585 
7586 // Load Integer (32 bit unsigned) into long
7587 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
7588 %{
7589   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7590 
7591   ins_cost(VOLATILE_REF_COST);
7592   format %{ "ldarw  $dst, $mem\t# int" %}
7593 
7594   ins_encode(aarch64_enc_ldarw(dst, mem));
7595 
7596   ins_pipe(pipe_serial);
7597 %}
7598 
7599 // Load Long (64 bit signed)
7600 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7601 %{
7602   match(Set dst (LoadL mem));
7603 
7604   ins_cost(VOLATILE_REF_COST);
7605   format %{ "ldar  $dst, $mem\t# int" %}
7606 
7607   ins_encode(aarch64_enc_ldar(dst, mem));
7608 
7609   ins_pipe(pipe_serial);
7610 %}
7611 
7612 // Load Pointer
7613 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
7614 %{
7615   match(Set dst (LoadP mem));
7616 
7617   ins_cost(VOLATILE_REF_COST);
7618   format %{ "ldar  $dst, $mem\t# ptr" %}
7619 
7620   ins_encode(aarch64_enc_ldar(dst, mem));
7621 
7622   ins_pipe(pipe_serial);
7623 %}
7624 
7625 // Load Compressed Pointer
7626 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
7627 %{
7628   match(Set dst (LoadN mem));
7629 
7630   ins_cost(VOLATILE_REF_COST);
7631   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
7632 
7633   ins_encode(aarch64_enc_ldarw(dst, mem));
7634 
7635   ins_pipe(pipe_serial);
7636 %}
7637 
7638 // Load Float
7639 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
7640 %{
7641   match(Set dst (LoadF mem));
7642 
7643   ins_cost(VOLATILE_REF_COST);
7644   format %{ "ldars  $dst, $mem\t# float" %}
7645 
7646   ins_encode( aarch64_enc_fldars(dst, mem) );
7647 
7648   ins_pipe(pipe_serial);
7649 %}
7650 
7651 // Load Double
7652 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
7653 %{
7654   match(Set dst (LoadD mem));
7655 
7656   ins_cost(VOLATILE_REF_COST);
7657   format %{ "ldard  $dst, $mem\t# double" %}
7658 
7659   ins_encode( aarch64_enc_fldard(dst, mem) );
7660 
7661   ins_pipe(pipe_serial);
7662 %}
7663 
7664 // Store Byte
7665 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7666 %{
7667   match(Set mem (StoreB mem src));
7668 
7669   ins_cost(VOLATILE_REF_COST);
7670   format %{ "stlrb  $src, $mem\t# byte" %}
7671 
7672   ins_encode(aarch64_enc_stlrb(src, mem));
7673 
7674   ins_pipe(pipe_class_memory);
7675 %}
7676 
7677 // Store Char/Short
7678 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7679 %{
7680   match(Set mem (StoreC mem src));
7681 
7682   ins_cost(VOLATILE_REF_COST);
7683   format %{ "stlrh  $src, $mem\t# short" %}
7684 
7685   ins_encode(aarch64_enc_stlrh(src, mem));
7686 
7687   ins_pipe(pipe_class_memory);
7688 %}
7689 
7690 // Store Integer
7691 
7692 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7693 %{
7694   match(Set mem(StoreI mem src));
7695 
7696   ins_cost(VOLATILE_REF_COST);
7697   format %{ "stlrw  $src, $mem\t# int" %}
7698 
7699   ins_encode(aarch64_enc_stlrw(src, mem));
7700 
7701   ins_pipe(pipe_class_memory);
7702 %}
7703 
7704 // Store Long (64 bit signed)
7705 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
7706 %{
7707   match(Set mem (StoreL mem src));
7708 
7709   ins_cost(VOLATILE_REF_COST);
7710   format %{ "stlr  $src, $mem\t# int" %}
7711 
7712   ins_encode(aarch64_enc_stlr(src, mem));
7713 
7714   ins_pipe(pipe_class_memory);
7715 %}
7716 
7717 // Store Pointer
7718 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
7719 %{
7720   match(Set mem (StoreP mem src));
7721 
7722   ins_cost(VOLATILE_REF_COST);
7723   format %{ "stlr  $src, $mem\t# ptr" %}
7724 
7725   ins_encode(aarch64_enc_stlr(src, mem));
7726 
7727   ins_pipe(pipe_class_memory);
7728 %}
7729 
7730 // Store Compressed Pointer
7731 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
7732 %{
7733   match(Set mem (StoreN mem src));
7734 
7735   ins_cost(VOLATILE_REF_COST);
7736   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
7737 
7738   ins_encode(aarch64_enc_stlrw(src, mem));
7739 
7740   ins_pipe(pipe_class_memory);
7741 %}
7742 
7743 // Store Float
7744 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
7745 %{
7746   match(Set mem (StoreF mem src));
7747 
7748   ins_cost(VOLATILE_REF_COST);
7749   format %{ "stlrs  $src, $mem\t# float" %}
7750 
7751   ins_encode( aarch64_enc_fstlrs(src, mem) );
7752 
7753   ins_pipe(pipe_class_memory);
7754 %}
7755 
7756 // TODO
7757 // implement storeImmF0 and storeFImmPacked
7758 
7759 // Store Double
7760 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
7761 %{
7762   match(Set mem (StoreD mem src));
7763 
7764   ins_cost(VOLATILE_REF_COST);
7765   format %{ "stlrd  $src, $mem\t# double" %}
7766 
7767   ins_encode( aarch64_enc_fstlrd(src, mem) );
7768 
7769   ins_pipe(pipe_class_memory);
7770 %}
7771 
7772 //  ---------------- end of volatile loads and stores ----------------
7773 
7774 // ============================================================================
7775 // BSWAP Instructions
7776 
7777 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
7778   match(Set dst (ReverseBytesI src));
7779 
7780   ins_cost(INSN_COST);
7781   format %{ "revw  $dst, $src" %}
7782 
7783   ins_encode %{
7784     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
7785   %}
7786 
7787   ins_pipe(ialu_reg);
7788 %}
7789 
7790 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
7791   match(Set dst (ReverseBytesL src));
7792 
7793   ins_cost(INSN_COST);
7794   format %{ "rev  $dst, $src" %}
7795 
7796   ins_encode %{
7797     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
7798   %}
7799 
7800   ins_pipe(ialu_reg);
7801 %}
7802 
7803 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
7804   match(Set dst (ReverseBytesUS src));
7805 
7806   ins_cost(INSN_COST);
7807   format %{ "rev16w  $dst, $src" %}
7808 
7809   ins_encode %{
7810     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7811   %}
7812 
7813   ins_pipe(ialu_reg);
7814 %}
7815 
7816 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
7817   match(Set dst (ReverseBytesS src));
7818 
7819   ins_cost(INSN_COST);
7820   format %{ "rev16w  $dst, $src\n\t"
7821             "sbfmw $dst, $dst, #0, #15" %}
7822 
7823   ins_encode %{
7824     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7825     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
7826   %}
7827 
7828   ins_pipe(ialu_reg);
7829 %}
7830 
7831 // ============================================================================
7832 // Zero Count Instructions
7833 
7834 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7835   match(Set dst (CountLeadingZerosI src));
7836 
7837   ins_cost(INSN_COST);
7838   format %{ "clzw  $dst, $src" %}
7839   ins_encode %{
7840     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
7841   %}
7842 
7843   ins_pipe(ialu_reg);
7844 %}
7845 
7846 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
7847   match(Set dst (CountLeadingZerosL src));
7848 
7849   ins_cost(INSN_COST);
7850   format %{ "clz   $dst, $src" %}
7851   ins_encode %{
7852     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
7853   %}
7854 
7855   ins_pipe(ialu_reg);
7856 %}
7857 
7858 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7859   match(Set dst (CountTrailingZerosI src));
7860 
7861   ins_cost(INSN_COST * 2);
7862   format %{ "rbitw  $dst, $src\n\t"
7863             "clzw   $dst, $dst" %}
7864   ins_encode %{
7865     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
7866     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
7867   %}
7868 
7869   ins_pipe(ialu_reg);
7870 %}
7871 
7872 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
7873   match(Set dst (CountTrailingZerosL src));
7874 
7875   ins_cost(INSN_COST * 2);
7876   format %{ "rbit   $dst, $src\n\t"
7877             "clz    $dst, $dst" %}
7878   ins_encode %{
7879     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
7880     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
7881   %}
7882 
7883   ins_pipe(ialu_reg);
7884 %}
7885 
7886 //---------- Population Count Instructions -------------------------------------
7887 //
7888 
7889 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
7890   predicate(UsePopCountInstruction);
7891   match(Set dst (PopCountI src));
7892   effect(TEMP tmp);
7893   ins_cost(INSN_COST * 13);
7894 
7895   format %{ "movw   $src, $src\n\t"
7896             "mov    $tmp, $src\t# vector (1D)\n\t"
7897             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7898             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7899             "mov    $dst, $tmp\t# vector (1D)" %}
7900   ins_encode %{
7901     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
7902     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7903     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7904     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7905     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7906   %}
7907 
7908   ins_pipe(pipe_class_default);
7909 %}
7910 
7911 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
7912   predicate(UsePopCountInstruction);
7913   match(Set dst (PopCountI (LoadI mem)));
7914   effect(TEMP tmp);
7915   ins_cost(INSN_COST * 13);
7916 
7917   format %{ "ldrs   $tmp, $mem\n\t"
7918             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7919             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7920             "mov    $dst, $tmp\t# vector (1D)" %}
7921   ins_encode %{
7922     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
7923     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
7924                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
7925     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7926     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7927     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7928   %}
7929 
7930   ins_pipe(pipe_class_default);
7931 %}
7932 
7933 // Note: Long.bitCount(long) returns an int.
7934 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
7935   predicate(UsePopCountInstruction);
7936   match(Set dst (PopCountL src));
7937   effect(TEMP tmp);
7938   ins_cost(INSN_COST * 13);
7939 
7940   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
7941             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7942             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7943             "mov    $dst, $tmp\t# vector (1D)" %}
7944   ins_encode %{
7945     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7946     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7947     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7948     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7949   %}
7950 
7951   ins_pipe(pipe_class_default);
7952 %}
7953 
7954 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
7955   predicate(UsePopCountInstruction);
7956   match(Set dst (PopCountL (LoadL mem)));
7957   effect(TEMP tmp);
7958   ins_cost(INSN_COST * 13);
7959 
7960   format %{ "ldrd   $tmp, $mem\n\t"
7961             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7962             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7963             "mov    $dst, $tmp\t# vector (1D)" %}
7964   ins_encode %{
7965     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
7966     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
7967                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
7968     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7969     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7970     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7971   %}
7972 
7973   ins_pipe(pipe_class_default);
7974 %}
7975 
7976 // ============================================================================
7977 // MemBar Instruction
7978 
7979 instruct load_fence() %{
7980   match(LoadFence);
7981   ins_cost(VOLATILE_REF_COST);
7982 
7983   format %{ "load_fence" %}
7984 
7985   ins_encode %{
7986     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7987   %}
7988   ins_pipe(pipe_serial);
7989 %}
7990 
7991 instruct unnecessary_membar_acquire() %{
7992   predicate(unnecessary_acquire(n));
7993   match(MemBarAcquire);
7994   ins_cost(0);
7995 
7996   format %{ "membar_acquire (elided)" %}
7997 
7998   ins_encode %{
7999     __ block_comment("membar_acquire (elided)");
8000   %}
8001 
8002   ins_pipe(pipe_class_empty);
8003 %}
8004 
8005 instruct membar_acquire() %{
8006   match(MemBarAcquire);
8007   ins_cost(VOLATILE_REF_COST);
8008 
8009   format %{ "membar_acquire" %}
8010 
8011   ins_encode %{
8012     __ block_comment("membar_acquire");
8013     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8014   %}
8015 
8016   ins_pipe(pipe_serial);
8017 %}
8018 
8019 
8020 instruct membar_acquire_lock() %{
8021   match(MemBarAcquireLock);
8022   ins_cost(VOLATILE_REF_COST);
8023 
8024   format %{ "membar_acquire_lock" %}
8025 
8026   ins_encode %{
8027     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8028   %}
8029 
8030   ins_pipe(pipe_serial);
8031 %}
8032 
8033 instruct store_fence() %{
8034   match(StoreFence);
8035   ins_cost(VOLATILE_REF_COST);
8036 
8037   format %{ "store_fence" %}
8038 
8039   ins_encode %{
8040     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8041   %}
8042   ins_pipe(pipe_serial);
8043 %}
8044 
8045 instruct unnecessary_membar_release() %{
8046   predicate(unnecessary_release(n));
8047   match(MemBarRelease);
8048   ins_cost(0);
8049 
8050   format %{ "membar_release (elided)" %}
8051 
8052   ins_encode %{
8053     __ block_comment("membar_release (elided)");
8054   %}
8055   ins_pipe(pipe_serial);
8056 %}
8057 
8058 instruct membar_release() %{
8059   match(MemBarRelease);
8060   ins_cost(VOLATILE_REF_COST);
8061 
8062   format %{ "membar_release" %}
8063 
8064   ins_encode %{
8065     __ block_comment("membar_release");
8066     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8067   %}
8068   ins_pipe(pipe_serial);
8069 %}
8070 
8071 instruct membar_storestore() %{
8072   match(MemBarStoreStore);
8073   ins_cost(VOLATILE_REF_COST);
8074 
8075   format %{ "MEMBAR-store-store" %}
8076 
8077   ins_encode %{
8078     __ membar(Assembler::StoreStore);
8079   %}
8080   ins_pipe(pipe_serial);
8081 %}
8082 
8083 instruct membar_release_lock() %{
8084   match(MemBarReleaseLock);
8085   ins_cost(VOLATILE_REF_COST);
8086 
8087   format %{ "membar_release_lock" %}
8088 
8089   ins_encode %{
8090     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8091   %}
8092 
8093   ins_pipe(pipe_serial);
8094 %}
8095 
8096 instruct unnecessary_membar_volatile() %{
8097   predicate(unnecessary_volatile(n));
8098   match(MemBarVolatile);
8099   ins_cost(0);
8100 
8101   format %{ "membar_volatile (elided)" %}
8102 
8103   ins_encode %{
8104     __ block_comment("membar_volatile (elided)");
8105   %}
8106 
8107   ins_pipe(pipe_serial);
8108 %}
8109 
8110 instruct membar_volatile() %{
8111   match(MemBarVolatile);
8112   ins_cost(VOLATILE_REF_COST*100);
8113 
8114   format %{ "membar_volatile" %}
8115 
8116   ins_encode %{
8117     __ block_comment("membar_volatile");
8118     __ membar(Assembler::StoreLoad);
8119   %}
8120 
8121   ins_pipe(pipe_serial);
8122 %}
8123 
8124 // ============================================================================
8125 // Cast/Convert Instructions
8126 
8127 instruct castX2P(iRegPNoSp dst, iRegL src) %{
8128   match(Set dst (CastX2P src));
8129 
8130   ins_cost(INSN_COST);
8131   format %{ "mov $dst, $src\t# long -> ptr" %}
8132 
8133   ins_encode %{
8134     if ($dst$$reg != $src$$reg) {
8135       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8136     }
8137   %}
8138 
8139   ins_pipe(ialu_reg);
8140 %}
8141 
8142 instruct castP2X(iRegLNoSp dst, iRegP src) %{
8143   match(Set dst (CastP2X src));
8144 
8145   ins_cost(INSN_COST);
8146   format %{ "mov $dst, $src\t# ptr -> long" %}
8147 
8148   ins_encode %{
8149     if ($dst$$reg != $src$$reg) {
8150       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8151     }
8152   %}
8153 
8154   ins_pipe(ialu_reg);
8155 %}
8156 
8157 // Convert oop into int for vectors alignment masking
8158 instruct convP2I(iRegINoSp dst, iRegP src) %{
8159   match(Set dst (ConvL2I (CastP2X src)));
8160 
8161   ins_cost(INSN_COST);
8162   format %{ "movw $dst, $src\t# ptr -> int" %}
8163   ins_encode %{
8164     __ movw($dst$$Register, $src$$Register);
8165   %}
8166 
8167   ins_pipe(ialu_reg);
8168 %}
8169 
8170 // Convert compressed oop into int for vectors alignment masking
8171 // in case of 32bit oops (heap < 4Gb).
8172 instruct convN2I(iRegINoSp dst, iRegN src)
8173 %{
8174   predicate(Universe::narrow_oop_shift() == 0);
8175   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
8176 
8177   ins_cost(INSN_COST);
8178   format %{ "mov dst, $src\t# compressed ptr -> int" %}
8179   ins_encode %{
8180     __ movw($dst$$Register, $src$$Register);
8181   %}
8182 
8183   ins_pipe(ialu_reg);
8184 %}
8185 
8186 
8187 // Convert oop pointer into compressed form
8188 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8189   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
8190   match(Set dst (EncodeP src));
8191   effect(KILL cr);
8192   ins_cost(INSN_COST * 3);
8193   format %{ "encode_heap_oop $dst, $src" %}
8194   ins_encode %{
8195     Register s = $src$$Register;
8196     Register d = $dst$$Register;
8197     __ encode_heap_oop(d, s);
8198   %}
8199   ins_pipe(ialu_reg);
8200 %}
8201 
8202 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8203   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
8204   match(Set dst (EncodeP src));
8205   ins_cost(INSN_COST * 3);
8206   format %{ "encode_heap_oop_not_null $dst, $src" %}
8207   ins_encode %{
8208     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
8209   %}
8210   ins_pipe(ialu_reg);
8211 %}
8212 
8213 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8214   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
8215             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
8216   match(Set dst (DecodeN src));
8217   ins_cost(INSN_COST * 3);
8218   format %{ "decode_heap_oop $dst, $src" %}
8219   ins_encode %{
8220     Register s = $src$$Register;
8221     Register d = $dst$$Register;
8222     __ decode_heap_oop(d, s);
8223   %}
8224   ins_pipe(ialu_reg);
8225 %}
8226 
8227 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8228   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
8229             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
8230   match(Set dst (DecodeN src));
8231   ins_cost(INSN_COST * 3);
8232   format %{ "decode_heap_oop_not_null $dst, $src" %}
8233   ins_encode %{
8234     Register s = $src$$Register;
8235     Register d = $dst$$Register;
8236     __ decode_heap_oop_not_null(d, s);
8237   %}
8238   ins_pipe(ialu_reg);
8239 %}
8240 
8241 // n.b. AArch64 implementations of encode_klass_not_null and
8242 // decode_klass_not_null do not modify the flags register so, unlike
8243 // Intel, we don't kill CR as a side effect here
8244 
8245 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
8246   match(Set dst (EncodePKlass src));
8247 
8248   ins_cost(INSN_COST * 3);
8249   format %{ "encode_klass_not_null $dst,$src" %}
8250 
8251   ins_encode %{
8252     Register src_reg = as_Register($src$$reg);
8253     Register dst_reg = as_Register($dst$$reg);
8254     __ encode_klass_not_null(dst_reg, src_reg);
8255   %}
8256 
8257    ins_pipe(ialu_reg);
8258 %}
8259 
8260 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
8261   match(Set dst (DecodeNKlass src));
8262 
8263   ins_cost(INSN_COST * 3);
8264   format %{ "decode_klass_not_null $dst,$src" %}
8265 
8266   ins_encode %{
8267     Register src_reg = as_Register($src$$reg);
8268     Register dst_reg = as_Register($dst$$reg);
8269     if (dst_reg != src_reg) {
8270       __ decode_klass_not_null(dst_reg, src_reg);
8271     } else {
8272       __ decode_klass_not_null(dst_reg);
8273     }
8274   %}
8275 
8276    ins_pipe(ialu_reg);
8277 %}
8278 
8279 instruct checkCastPP(iRegPNoSp dst)
8280 %{
8281   match(Set dst (CheckCastPP dst));
8282 
8283   size(0);
8284   format %{ "# checkcastPP of $dst" %}
8285   ins_encode(/* empty encoding */);
8286   ins_pipe(pipe_class_empty);
8287 %}
8288 
8289 instruct castPP(iRegPNoSp dst)
8290 %{
8291   match(Set dst (CastPP dst));
8292 
8293   size(0);
8294   format %{ "# castPP of $dst" %}
8295   ins_encode(/* empty encoding */);
8296   ins_pipe(pipe_class_empty);
8297 %}
8298 
8299 instruct castII(iRegI dst)
8300 %{
8301   match(Set dst (CastII dst));
8302 
8303   size(0);
8304   format %{ "# castII of $dst" %}
8305   ins_encode(/* empty encoding */);
8306   ins_cost(0);
8307   ins_pipe(pipe_class_empty);
8308 %}
8309 
8310 // ============================================================================
8311 // Atomic operation instructions
8312 //
8313 // Intel and SPARC both implement Ideal Node LoadPLocked and
8314 // Store{PIL}Conditional instructions using a normal load for the
8315 // LoadPLocked and a CAS for the Store{PIL}Conditional.
8316 //
8317 // The ideal code appears only to use LoadPLocked/StorePLocked as a
8318 // pair to lock object allocations from Eden space when not using
8319 // TLABs.
8320 //
8321 // There does not appear to be a Load{IL}Locked Ideal Node and the
8322 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
8323 // and to use StoreIConditional only for 32-bit and StoreLConditional
8324 // only for 64-bit.
8325 //
8326 // We implement LoadPLocked and StorePLocked instructions using,
8327 // respectively the AArch64 hw load-exclusive and store-conditional
8328 // instructions. Whereas we must implement each of
8329 // Store{IL}Conditional using a CAS which employs a pair of
8330 // instructions comprising a load-exclusive followed by a
8331 // store-conditional.
8332 
8333 
8334 // Locked-load (linked load) of the current heap-top
8335 // used when updating the eden heap top
8336 // implemented using ldaxr on AArch64
8337 
8338 instruct loadPLocked(iRegPNoSp dst, indirect mem)
8339 %{
8340   match(Set dst (LoadPLocked mem));
8341 
8342   ins_cost(VOLATILE_REF_COST);
8343 
8344   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
8345 
8346   ins_encode(aarch64_enc_ldaxr(dst, mem));
8347 
8348   ins_pipe(pipe_serial);
8349 %}
8350 
8351 // Conditional-store of the updated heap-top.
8352 // Used during allocation of the shared heap.
8353 // Sets flag (EQ) on success.
8354 // implemented using stlxr on AArch64.
8355 
8356 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
8357 %{
8358   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
8359 
8360   ins_cost(VOLATILE_REF_COST);
8361 
8362  // TODO
8363  // do we need to do a store-conditional release or can we just use a
8364  // plain store-conditional?
8365 
8366   format %{
8367     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
8368     "cmpw rscratch1, zr\t# EQ on successful write"
8369   %}
8370 
8371   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
8372 
8373   ins_pipe(pipe_serial);
8374 %}
8375 
8376 // this has to be implemented as a CAS
8377 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
8378 %{
8379   match(Set cr (StoreLConditional mem (Binary oldval newval)));
8380 
8381   ins_cost(VOLATILE_REF_COST);
8382 
8383   format %{
8384     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8385     "cmpw rscratch1, zr\t# EQ on successful write"
8386   %}
8387 
8388   ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval));
8389 
8390   ins_pipe(pipe_slow);
8391 %}
8392 
8393 // this has to be implemented as a CAS
8394 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
8395 %{
8396   match(Set cr (StoreIConditional mem (Binary oldval newval)));
8397 
8398   ins_cost(VOLATILE_REF_COST);
8399 
8400   format %{
8401     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8402     "cmpw rscratch1, zr\t# EQ on successful write"
8403   %}
8404 
8405   ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval));
8406 
8407   ins_pipe(pipe_slow);
8408 %}
8409 
8410 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
8411 // can't match them
8412 
8413 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8414 
8415   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8416 
8417   effect(KILL cr);
8418 
8419  format %{
8420     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8421     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8422  %}
8423 
8424  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8425             aarch64_enc_cset_eq(res));
8426 
8427   ins_pipe(pipe_slow);
8428 %}
8429 
8430 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8431 
8432   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8433 
8434   effect(KILL cr);
8435 
8436  format %{
8437     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8438     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8439  %}
8440 
8441  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8442             aarch64_enc_cset_eq(res));
8443 
8444   ins_pipe(pipe_slow);
8445 %}
8446 
8447 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8448 
8449   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8450 
8451   effect(KILL cr);
8452 
8453  format %{
8454     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8455     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8456  %}
8457 
8458  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8459             aarch64_enc_cset_eq(res));
8460 
8461   ins_pipe(pipe_slow);
8462 %}
8463 
8464 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8465 
8466   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8467 
8468   effect(KILL cr);
8469 
8470  format %{
8471     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8472     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8473  %}
8474 
8475  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8476             aarch64_enc_cset_eq(res));
8477 
8478   ins_pipe(pipe_slow);
8479 %}
8480 
8481 
8482 instruct get_and_setI(indirect mem, iRegINoSp newv, iRegI prev) %{
8483   match(Set prev (GetAndSetI mem newv));
8484   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
8485   ins_encode %{
8486     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8487   %}
8488   ins_pipe(pipe_serial);
8489 %}
8490 
8491 instruct get_and_setL(indirect mem, iRegLNoSp newv, iRegL prev) %{
8492   match(Set prev (GetAndSetL mem newv));
8493   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
8494   ins_encode %{
8495     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
8496   %}
8497   ins_pipe(pipe_serial);
8498 %}
8499 
8500 instruct get_and_setN(indirect mem, iRegNNoSp newv, iRegI prev) %{
8501   match(Set prev (GetAndSetN mem newv));
8502   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
8503   ins_encode %{
8504     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8505   %}
8506   ins_pipe(pipe_serial);
8507 %}
8508 
8509 instruct get_and_setP(indirect mem, iRegPNoSp newv, iRegP prev) %{
8510   match(Set prev (GetAndSetP mem newv));
8511   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
8512   ins_encode %{
8513     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
8514   %}
8515   ins_pipe(pipe_serial);
8516 %}
8517 
8518 
8519 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
8520   match(Set newval (GetAndAddL mem incr));
8521   ins_cost(INSN_COST * 10);
8522   format %{ "get_and_addL $newval, [$mem], $incr" %}
8523   ins_encode %{
8524     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
8525   %}
8526   ins_pipe(pipe_serial);
8527 %}
8528 
8529 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
8530   predicate(n->as_LoadStore()->result_not_used());
8531   match(Set dummy (GetAndAddL mem incr));
8532   ins_cost(INSN_COST * 9);
8533   format %{ "get_and_addL [$mem], $incr" %}
8534   ins_encode %{
8535     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
8536   %}
8537   ins_pipe(pipe_serial);
8538 %}
8539 
8540 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
8541   match(Set newval (GetAndAddL mem incr));
8542   ins_cost(INSN_COST * 10);
8543   format %{ "get_and_addL $newval, [$mem], $incr" %}
8544   ins_encode %{
8545     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
8546   %}
8547   ins_pipe(pipe_serial);
8548 %}
8549 
8550 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
8551   predicate(n->as_LoadStore()->result_not_used());
8552   match(Set dummy (GetAndAddL mem incr));
8553   ins_cost(INSN_COST * 9);
8554   format %{ "get_and_addL [$mem], $incr" %}
8555   ins_encode %{
8556     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
8557   %}
8558   ins_pipe(pipe_serial);
8559 %}
8560 
8561 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
8562   match(Set newval (GetAndAddI mem incr));
8563   ins_cost(INSN_COST * 10);
8564   format %{ "get_and_addI $newval, [$mem], $incr" %}
8565   ins_encode %{
8566     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
8567   %}
8568   ins_pipe(pipe_serial);
8569 %}
8570 
8571 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
8572   predicate(n->as_LoadStore()->result_not_used());
8573   match(Set dummy (GetAndAddI mem incr));
8574   ins_cost(INSN_COST * 9);
8575   format %{ "get_and_addI [$mem], $incr" %}
8576   ins_encode %{
8577     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
8578   %}
8579   ins_pipe(pipe_serial);
8580 %}
8581 
8582 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
8583   match(Set newval (GetAndAddI mem incr));
8584   ins_cost(INSN_COST * 10);
8585   format %{ "get_and_addI $newval, [$mem], $incr" %}
8586   ins_encode %{
8587     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
8588   %}
8589   ins_pipe(pipe_serial);
8590 %}
8591 
8592 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
8593   predicate(n->as_LoadStore()->result_not_used());
8594   match(Set dummy (GetAndAddI mem incr));
8595   ins_cost(INSN_COST * 9);
8596   format %{ "get_and_addI [$mem], $incr" %}
8597   ins_encode %{
8598     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
8599   %}
8600   ins_pipe(pipe_serial);
8601 %}
8602 
8603 // Manifest a CmpL result in an integer register.
8604 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
8605 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
8606 %{
8607   match(Set dst (CmpL3 src1 src2));
8608   effect(KILL flags);
8609 
8610   ins_cost(INSN_COST * 6);
8611   format %{
8612       "cmp $src1, $src2"
8613       "csetw $dst, ne"
8614       "cnegw $dst, lt"
8615   %}
8616   // format %{ "CmpL3 $dst, $src1, $src2" %}
8617   ins_encode %{
8618     __ cmp($src1$$Register, $src2$$Register);
8619     __ csetw($dst$$Register, Assembler::NE);
8620     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
8621   %}
8622 
8623   ins_pipe(pipe_class_default);
8624 %}
8625 
8626 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
8627 %{
8628   match(Set dst (CmpL3 src1 src2));
8629   effect(KILL flags);
8630 
8631   ins_cost(INSN_COST * 6);
8632   format %{
8633       "cmp $src1, $src2"
8634       "csetw $dst, ne"
8635       "cnegw $dst, lt"
8636   %}
8637   ins_encode %{
8638     int32_t con = (int32_t)$src2$$constant;
8639      if (con < 0) {
8640       __ adds(zr, $src1$$Register, -con);
8641     } else {
8642       __ subs(zr, $src1$$Register, con);
8643     }
8644     __ csetw($dst$$Register, Assembler::NE);
8645     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
8646   %}
8647 
8648   ins_pipe(pipe_class_default);
8649 %}
8650 
8651 // ============================================================================
8652 // Conditional Move Instructions
8653 
8654 // n.b. we have identical rules for both a signed compare op (cmpOp)
8655 // and an unsigned compare op (cmpOpU). it would be nice if we could
8656 // define an op class which merged both inputs and use it to type the
8657 // argument to a single rule. unfortunatelyt his fails because the
8658 // opclass does not live up to the COND_INTER interface of its
8659 // component operands. When the generic code tries to negate the
8660 // operand it ends up running the generci Machoper::negate method
8661 // which throws a ShouldNotHappen. So, we have to provide two flavours
8662 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
8663 
8664 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8665   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
8666 
8667   ins_cost(INSN_COST * 2);
8668   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
8669 
8670   ins_encode %{
8671     __ cselw(as_Register($dst$$reg),
8672              as_Register($src2$$reg),
8673              as_Register($src1$$reg),
8674              (Assembler::Condition)$cmp$$cmpcode);
8675   %}
8676 
8677   ins_pipe(icond_reg_reg);
8678 %}
8679 
8680 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8681   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
8682 
8683   ins_cost(INSN_COST * 2);
8684   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
8685 
8686   ins_encode %{
8687     __ cselw(as_Register($dst$$reg),
8688              as_Register($src2$$reg),
8689              as_Register($src1$$reg),
8690              (Assembler::Condition)$cmp$$cmpcode);
8691   %}
8692 
8693   ins_pipe(icond_reg_reg);
8694 %}
8695 
8696 // special cases where one arg is zero
8697 
8698 // n.b. this is selected in preference to the rule above because it
8699 // avoids loading constant 0 into a source register
8700 
8701 // TODO
8702 // we ought only to be able to cull one of these variants as the ideal
8703 // transforms ought always to order the zero consistently (to left/right?)
8704 
8705 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
8706   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
8707 
8708   ins_cost(INSN_COST * 2);
8709   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
8710 
8711   ins_encode %{
8712     __ cselw(as_Register($dst$$reg),
8713              as_Register($src$$reg),
8714              zr,
8715              (Assembler::Condition)$cmp$$cmpcode);
8716   %}
8717 
8718   ins_pipe(icond_reg);
8719 %}
8720 
8721 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
8722   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
8723 
8724   ins_cost(INSN_COST * 2);
8725   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
8726 
8727   ins_encode %{
8728     __ cselw(as_Register($dst$$reg),
8729              as_Register($src$$reg),
8730              zr,
8731              (Assembler::Condition)$cmp$$cmpcode);
8732   %}
8733 
8734   ins_pipe(icond_reg);
8735 %}
8736 
8737 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
8738   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
8739 
8740   ins_cost(INSN_COST * 2);
8741   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
8742 
8743   ins_encode %{
8744     __ cselw(as_Register($dst$$reg),
8745              zr,
8746              as_Register($src$$reg),
8747              (Assembler::Condition)$cmp$$cmpcode);
8748   %}
8749 
8750   ins_pipe(icond_reg);
8751 %}
8752 
8753 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
8754   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
8755 
8756   ins_cost(INSN_COST * 2);
8757   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
8758 
8759   ins_encode %{
8760     __ cselw(as_Register($dst$$reg),
8761              zr,
8762              as_Register($src$$reg),
8763              (Assembler::Condition)$cmp$$cmpcode);
8764   %}
8765 
8766   ins_pipe(icond_reg);
8767 %}
8768 
8769 // special case for creating a boolean 0 or 1
8770 
8771 // n.b. this is selected in preference to the rule above because it
8772 // avoids loading constants 0 and 1 into a source register
8773 
8774 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
8775   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
8776 
8777   ins_cost(INSN_COST * 2);
8778   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
8779 
8780   ins_encode %{
8781     // equivalently
8782     // cset(as_Register($dst$$reg),
8783     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
8784     __ csincw(as_Register($dst$$reg),
8785              zr,
8786              zr,
8787              (Assembler::Condition)$cmp$$cmpcode);
8788   %}
8789 
8790   ins_pipe(icond_none);
8791 %}
8792 
8793 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
8794   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
8795 
8796   ins_cost(INSN_COST * 2);
8797   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
8798 
8799   ins_encode %{
8800     // equivalently
8801     // cset(as_Register($dst$$reg),
8802     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
8803     __ csincw(as_Register($dst$$reg),
8804              zr,
8805              zr,
8806              (Assembler::Condition)$cmp$$cmpcode);
8807   %}
8808 
8809   ins_pipe(icond_none);
8810 %}
8811 
8812 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
8813   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
8814 
8815   ins_cost(INSN_COST * 2);
8816   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
8817 
8818   ins_encode %{
8819     __ csel(as_Register($dst$$reg),
8820             as_Register($src2$$reg),
8821             as_Register($src1$$reg),
8822             (Assembler::Condition)$cmp$$cmpcode);
8823   %}
8824 
8825   ins_pipe(icond_reg_reg);
8826 %}
8827 
8828 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
8829   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
8830 
8831   ins_cost(INSN_COST * 2);
8832   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
8833 
8834   ins_encode %{
8835     __ csel(as_Register($dst$$reg),
8836             as_Register($src2$$reg),
8837             as_Register($src1$$reg),
8838             (Assembler::Condition)$cmp$$cmpcode);
8839   %}
8840 
8841   ins_pipe(icond_reg_reg);
8842 %}
8843 
8844 // special cases where one arg is zero
8845 
8846 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
8847   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
8848 
8849   ins_cost(INSN_COST * 2);
8850   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
8851 
8852   ins_encode %{
8853     __ csel(as_Register($dst$$reg),
8854             zr,
8855             as_Register($src$$reg),
8856             (Assembler::Condition)$cmp$$cmpcode);
8857   %}
8858 
8859   ins_pipe(icond_reg);
8860 %}
8861 
8862 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
8863   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
8864 
8865   ins_cost(INSN_COST * 2);
8866   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
8867 
8868   ins_encode %{
8869     __ csel(as_Register($dst$$reg),
8870             zr,
8871             as_Register($src$$reg),
8872             (Assembler::Condition)$cmp$$cmpcode);
8873   %}
8874 
8875   ins_pipe(icond_reg);
8876 %}
8877 
8878 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
8879   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
8880 
8881   ins_cost(INSN_COST * 2);
8882   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
8883 
8884   ins_encode %{
8885     __ csel(as_Register($dst$$reg),
8886             as_Register($src$$reg),
8887             zr,
8888             (Assembler::Condition)$cmp$$cmpcode);
8889   %}
8890 
8891   ins_pipe(icond_reg);
8892 %}
8893 
8894 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
8895   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
8896 
8897   ins_cost(INSN_COST * 2);
8898   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
8899 
8900   ins_encode %{
8901     __ csel(as_Register($dst$$reg),
8902             as_Register($src$$reg),
8903             zr,
8904             (Assembler::Condition)$cmp$$cmpcode);
8905   %}
8906 
8907   ins_pipe(icond_reg);
8908 %}
8909 
8910 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
8911   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
8912 
8913   ins_cost(INSN_COST * 2);
8914   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
8915 
8916   ins_encode %{
8917     __ csel(as_Register($dst$$reg),
8918             as_Register($src2$$reg),
8919             as_Register($src1$$reg),
8920             (Assembler::Condition)$cmp$$cmpcode);
8921   %}
8922 
8923   ins_pipe(icond_reg_reg);
8924 %}
8925 
8926 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
8927   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
8928 
8929   ins_cost(INSN_COST * 2);
8930   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
8931 
8932   ins_encode %{
8933     __ csel(as_Register($dst$$reg),
8934             as_Register($src2$$reg),
8935             as_Register($src1$$reg),
8936             (Assembler::Condition)$cmp$$cmpcode);
8937   %}
8938 
8939   ins_pipe(icond_reg_reg);
8940 %}
8941 
8942 // special cases where one arg is zero
8943 
8944 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
8945   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
8946 
8947   ins_cost(INSN_COST * 2);
8948   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
8949 
8950   ins_encode %{
8951     __ csel(as_Register($dst$$reg),
8952             zr,
8953             as_Register($src$$reg),
8954             (Assembler::Condition)$cmp$$cmpcode);
8955   %}
8956 
8957   ins_pipe(icond_reg);
8958 %}
8959 
8960 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
8961   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
8962 
8963   ins_cost(INSN_COST * 2);
8964   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
8965 
8966   ins_encode %{
8967     __ csel(as_Register($dst$$reg),
8968             zr,
8969             as_Register($src$$reg),
8970             (Assembler::Condition)$cmp$$cmpcode);
8971   %}
8972 
8973   ins_pipe(icond_reg);
8974 %}
8975 
8976 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
8977   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
8978 
8979   ins_cost(INSN_COST * 2);
8980   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
8981 
8982   ins_encode %{
8983     __ csel(as_Register($dst$$reg),
8984             as_Register($src$$reg),
8985             zr,
8986             (Assembler::Condition)$cmp$$cmpcode);
8987   %}
8988 
8989   ins_pipe(icond_reg);
8990 %}
8991 
8992 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
8993   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
8994 
8995   ins_cost(INSN_COST * 2);
8996   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
8997 
8998   ins_encode %{
8999     __ csel(as_Register($dst$$reg),
9000             as_Register($src$$reg),
9001             zr,
9002             (Assembler::Condition)$cmp$$cmpcode);
9003   %}
9004 
9005   ins_pipe(icond_reg);
9006 %}
9007 
9008 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9009   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9010 
9011   ins_cost(INSN_COST * 2);
9012   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9013 
9014   ins_encode %{
9015     __ cselw(as_Register($dst$$reg),
9016              as_Register($src2$$reg),
9017              as_Register($src1$$reg),
9018              (Assembler::Condition)$cmp$$cmpcode);
9019   %}
9020 
9021   ins_pipe(icond_reg_reg);
9022 %}
9023 
9024 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9025   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9026 
9027   ins_cost(INSN_COST * 2);
9028   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9029 
9030   ins_encode %{
9031     __ cselw(as_Register($dst$$reg),
9032              as_Register($src2$$reg),
9033              as_Register($src1$$reg),
9034              (Assembler::Condition)$cmp$$cmpcode);
9035   %}
9036 
9037   ins_pipe(icond_reg_reg);
9038 %}
9039 
9040 // special cases where one arg is zero
9041 
9042 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9043   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9044 
9045   ins_cost(INSN_COST * 2);
9046   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
9047 
9048   ins_encode %{
9049     __ cselw(as_Register($dst$$reg),
9050              zr,
9051              as_Register($src$$reg),
9052              (Assembler::Condition)$cmp$$cmpcode);
9053   %}
9054 
9055   ins_pipe(icond_reg);
9056 %}
9057 
9058 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9059   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9060 
9061   ins_cost(INSN_COST * 2);
9062   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
9063 
9064   ins_encode %{
9065     __ cselw(as_Register($dst$$reg),
9066              zr,
9067              as_Register($src$$reg),
9068              (Assembler::Condition)$cmp$$cmpcode);
9069   %}
9070 
9071   ins_pipe(icond_reg);
9072 %}
9073 
9074 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9075   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9076 
9077   ins_cost(INSN_COST * 2);
9078   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
9079 
9080   ins_encode %{
9081     __ cselw(as_Register($dst$$reg),
9082              as_Register($src$$reg),
9083              zr,
9084              (Assembler::Condition)$cmp$$cmpcode);
9085   %}
9086 
9087   ins_pipe(icond_reg);
9088 %}
9089 
9090 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9091   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9092 
9093   ins_cost(INSN_COST * 2);
9094   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
9095 
9096   ins_encode %{
9097     __ cselw(as_Register($dst$$reg),
9098              as_Register($src$$reg),
9099              zr,
9100              (Assembler::Condition)$cmp$$cmpcode);
9101   %}
9102 
9103   ins_pipe(icond_reg);
9104 %}
9105 
9106 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
9107 %{
9108   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9109 
9110   ins_cost(INSN_COST * 3);
9111 
9112   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9113   ins_encode %{
9114     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9115     __ fcsels(as_FloatRegister($dst$$reg),
9116               as_FloatRegister($src2$$reg),
9117               as_FloatRegister($src1$$reg),
9118               cond);
9119   %}
9120 
9121   ins_pipe(pipe_class_default);
9122 %}
9123 
9124 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
9125 %{
9126   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9127 
9128   ins_cost(INSN_COST * 3);
9129 
9130   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9131   ins_encode %{
9132     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9133     __ fcsels(as_FloatRegister($dst$$reg),
9134               as_FloatRegister($src2$$reg),
9135               as_FloatRegister($src1$$reg),
9136               cond);
9137   %}
9138 
9139   ins_pipe(pipe_class_default);
9140 %}
9141 
9142 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
9143 %{
9144   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9145 
9146   ins_cost(INSN_COST * 3);
9147 
9148   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9149   ins_encode %{
9150     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9151     __ fcseld(as_FloatRegister($dst$$reg),
9152               as_FloatRegister($src2$$reg),
9153               as_FloatRegister($src1$$reg),
9154               cond);
9155   %}
9156 
9157   ins_pipe(pipe_class_default);
9158 %}
9159 
9160 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
9161 %{
9162   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9163 
9164   ins_cost(INSN_COST * 3);
9165 
9166   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9167   ins_encode %{
9168     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9169     __ fcseld(as_FloatRegister($dst$$reg),
9170               as_FloatRegister($src2$$reg),
9171               as_FloatRegister($src1$$reg),
9172               cond);
9173   %}
9174 
9175   ins_pipe(pipe_class_default);
9176 %}
9177 
9178 // ============================================================================
9179 // Arithmetic Instructions
9180 //
9181 
9182 // Integer Addition
9183 
9184 // TODO
9185 // these currently employ operations which do not set CR and hence are
9186 // not flagged as killing CR but we would like to isolate the cases
9187 // where we want to set flags from those where we don't. need to work
9188 // out how to do that.
9189 
9190 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9191   match(Set dst (AddI src1 src2));
9192 
9193   ins_cost(INSN_COST);
9194   format %{ "addw  $dst, $src1, $src2" %}
9195 
9196   ins_encode %{
9197     __ addw(as_Register($dst$$reg),
9198             as_Register($src1$$reg),
9199             as_Register($src2$$reg));
9200   %}
9201 
9202   ins_pipe(ialu_reg_reg);
9203 %}
9204 
9205 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9206   match(Set dst (AddI src1 src2));
9207 
9208   ins_cost(INSN_COST);
9209   format %{ "addw $dst, $src1, $src2" %}
9210 
9211   // use opcode to indicate that this is an add not a sub
9212   opcode(0x0);
9213 
9214   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9215 
9216   ins_pipe(ialu_reg_imm);
9217 %}
9218 
9219 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
9220   match(Set dst (AddI (ConvL2I src1) src2));
9221 
9222   ins_cost(INSN_COST);
9223   format %{ "addw $dst, $src1, $src2" %}
9224 
9225   // use opcode to indicate that this is an add not a sub
9226   opcode(0x0);
9227 
9228   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9229 
9230   ins_pipe(ialu_reg_imm);
9231 %}
9232 
9233 // Pointer Addition
9234 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
9235   match(Set dst (AddP src1 src2));
9236 
9237   ins_cost(INSN_COST);
9238   format %{ "add $dst, $src1, $src2\t# ptr" %}
9239 
9240   ins_encode %{
9241     __ add(as_Register($dst$$reg),
9242            as_Register($src1$$reg),
9243            as_Register($src2$$reg));
9244   %}
9245 
9246   ins_pipe(ialu_reg_reg);
9247 %}
9248 
9249 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
9250   match(Set dst (AddP src1 (ConvI2L src2)));
9251 
9252   ins_cost(1.9 * INSN_COST);
9253   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
9254 
9255   ins_encode %{
9256     __ add(as_Register($dst$$reg),
9257            as_Register($src1$$reg),
9258            as_Register($src2$$reg), ext::sxtw);
9259   %}
9260 
9261   ins_pipe(ialu_reg_reg);
9262 %}
9263 
9264 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
9265   match(Set dst (AddP src1 (LShiftL src2 scale)));
9266 
9267   ins_cost(1.9 * INSN_COST);
9268   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
9269 
9270   ins_encode %{
9271     __ lea(as_Register($dst$$reg),
9272            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9273                    Address::lsl($scale$$constant)));
9274   %}
9275 
9276   ins_pipe(ialu_reg_reg_shift);
9277 %}
9278 
9279 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
9280   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
9281 
9282   ins_cost(1.9 * INSN_COST);
9283   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
9284 
9285   ins_encode %{
9286     __ lea(as_Register($dst$$reg),
9287            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9288                    Address::sxtw($scale$$constant)));
9289   %}
9290 
9291   ins_pipe(ialu_reg_reg_shift);
9292 %}
9293 
9294 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
9295   match(Set dst (LShiftL (ConvI2L src) scale));
9296 
9297   ins_cost(INSN_COST);
9298   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
9299 
9300   ins_encode %{
9301     __ sbfiz(as_Register($dst$$reg),
9302           as_Register($src$$reg),
9303           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
9304   %}
9305 
9306   ins_pipe(ialu_reg_shift);
9307 %}
9308 
9309 // Pointer Immediate Addition
9310 // n.b. this needs to be more expensive than using an indirect memory
9311 // operand
9312 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
9313   match(Set dst (AddP src1 src2));
9314 
9315   ins_cost(INSN_COST);
9316   format %{ "add $dst, $src1, $src2\t# ptr" %}
9317 
9318   // use opcode to indicate that this is an add not a sub
9319   opcode(0x0);
9320 
9321   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9322 
9323   ins_pipe(ialu_reg_imm);
9324 %}
9325 
9326 // Long Addition
9327 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9328 
9329   match(Set dst (AddL src1 src2));
9330 
9331   ins_cost(INSN_COST);
9332   format %{ "add  $dst, $src1, $src2" %}
9333 
9334   ins_encode %{
9335     __ add(as_Register($dst$$reg),
9336            as_Register($src1$$reg),
9337            as_Register($src2$$reg));
9338   %}
9339 
9340   ins_pipe(ialu_reg_reg);
9341 %}
9342 
9343 // No constant pool entries requiredLong Immediate Addition.
9344 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
9345   match(Set dst (AddL src1 src2));
9346 
9347   ins_cost(INSN_COST);
9348   format %{ "add $dst, $src1, $src2" %}
9349 
9350   // use opcode to indicate that this is an add not a sub
9351   opcode(0x0);
9352 
9353   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9354 
9355   ins_pipe(ialu_reg_imm);
9356 %}
9357 
9358 // Integer Subtraction
9359 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9360   match(Set dst (SubI src1 src2));
9361 
9362   ins_cost(INSN_COST);
9363   format %{ "subw  $dst, $src1, $src2" %}
9364 
9365   ins_encode %{
9366     __ subw(as_Register($dst$$reg),
9367             as_Register($src1$$reg),
9368             as_Register($src2$$reg));
9369   %}
9370 
9371   ins_pipe(ialu_reg_reg);
9372 %}
9373 
9374 // Immediate Subtraction
9375 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9376   match(Set dst (SubI src1 src2));
9377 
9378   ins_cost(INSN_COST);
9379   format %{ "subw $dst, $src1, $src2" %}
9380 
9381   // use opcode to indicate that this is a sub not an add
9382   opcode(0x1);
9383 
9384   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9385 
9386   ins_pipe(ialu_reg_imm);
9387 %}
9388 
9389 // Long Subtraction
9390 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9391 
9392   match(Set dst (SubL src1 src2));
9393 
9394   ins_cost(INSN_COST);
9395   format %{ "sub  $dst, $src1, $src2" %}
9396 
9397   ins_encode %{
9398     __ sub(as_Register($dst$$reg),
9399            as_Register($src1$$reg),
9400            as_Register($src2$$reg));
9401   %}
9402 
9403   ins_pipe(ialu_reg_reg);
9404 %}
9405 
9406 // No constant pool entries requiredLong Immediate Subtraction.
9407 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
9408   match(Set dst (SubL src1 src2));
9409 
9410   ins_cost(INSN_COST);
9411   format %{ "sub$dst, $src1, $src2" %}
9412 
9413   // use opcode to indicate that this is a sub not an add
9414   opcode(0x1);
9415 
9416   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9417 
9418   ins_pipe(ialu_reg_imm);
9419 %}
9420 
9421 // Integer Negation (special case for sub)
9422 
9423 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
9424   match(Set dst (SubI zero src));
9425 
9426   ins_cost(INSN_COST);
9427   format %{ "negw $dst, $src\t# int" %}
9428 
9429   ins_encode %{
9430     __ negw(as_Register($dst$$reg),
9431             as_Register($src$$reg));
9432   %}
9433 
9434   ins_pipe(ialu_reg);
9435 %}
9436 
9437 // Long Negation
9438 
9439 instruct negL_reg(iRegLNoSp dst, iRegIorL2I src, immL0 zero, rFlagsReg cr) %{
9440   match(Set dst (SubL zero src));
9441 
9442   ins_cost(INSN_COST);
9443   format %{ "neg $dst, $src\t# long" %}
9444 
9445   ins_encode %{
9446     __ neg(as_Register($dst$$reg),
9447            as_Register($src$$reg));
9448   %}
9449 
9450   ins_pipe(ialu_reg);
9451 %}
9452 
9453 // Integer Multiply
9454 
9455 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9456   match(Set dst (MulI src1 src2));
9457 
9458   ins_cost(INSN_COST * 3);
9459   format %{ "mulw  $dst, $src1, $src2" %}
9460 
9461   ins_encode %{
9462     __ mulw(as_Register($dst$$reg),
9463             as_Register($src1$$reg),
9464             as_Register($src2$$reg));
9465   %}
9466 
9467   ins_pipe(imul_reg_reg);
9468 %}
9469 
9470 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9471   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
9472 
9473   ins_cost(INSN_COST * 3);
9474   format %{ "smull  $dst, $src1, $src2" %}
9475 
9476   ins_encode %{
9477     __ smull(as_Register($dst$$reg),
9478              as_Register($src1$$reg),
9479              as_Register($src2$$reg));
9480   %}
9481 
9482   ins_pipe(imul_reg_reg);
9483 %}
9484 
9485 // Long Multiply
9486 
9487 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9488   match(Set dst (MulL src1 src2));
9489 
9490   ins_cost(INSN_COST * 5);
9491   format %{ "mul  $dst, $src1, $src2" %}
9492 
9493   ins_encode %{
9494     __ mul(as_Register($dst$$reg),
9495            as_Register($src1$$reg),
9496            as_Register($src2$$reg));
9497   %}
9498 
9499   ins_pipe(lmul_reg_reg);
9500 %}
9501 
9502 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
9503 %{
9504   match(Set dst (MulHiL src1 src2));
9505 
9506   ins_cost(INSN_COST * 7);
9507   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
9508 
9509   ins_encode %{
9510     __ smulh(as_Register($dst$$reg),
9511              as_Register($src1$$reg),
9512              as_Register($src2$$reg));
9513   %}
9514 
9515   ins_pipe(lmul_reg_reg);
9516 %}
9517 
9518 // Combined Integer Multiply & Add/Sub
9519 
9520 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
9521   match(Set dst (AddI src3 (MulI src1 src2)));
9522 
9523   ins_cost(INSN_COST * 3);
9524   format %{ "madd  $dst, $src1, $src2, $src3" %}
9525 
9526   ins_encode %{
9527     __ maddw(as_Register($dst$$reg),
9528              as_Register($src1$$reg),
9529              as_Register($src2$$reg),
9530              as_Register($src3$$reg));
9531   %}
9532 
9533   ins_pipe(imac_reg_reg);
9534 %}
9535 
9536 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
9537   match(Set dst (SubI src3 (MulI src1 src2)));
9538 
9539   ins_cost(INSN_COST * 3);
9540   format %{ "msub  $dst, $src1, $src2, $src3" %}
9541 
9542   ins_encode %{
9543     __ msubw(as_Register($dst$$reg),
9544              as_Register($src1$$reg),
9545              as_Register($src2$$reg),
9546              as_Register($src3$$reg));
9547   %}
9548 
9549   ins_pipe(imac_reg_reg);
9550 %}
9551 
9552 // Combined Long Multiply & Add/Sub
9553 
9554 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
9555   match(Set dst (AddL src3 (MulL src1 src2)));
9556 
9557   ins_cost(INSN_COST * 5);
9558   format %{ "madd  $dst, $src1, $src2, $src3" %}
9559 
9560   ins_encode %{
9561     __ madd(as_Register($dst$$reg),
9562             as_Register($src1$$reg),
9563             as_Register($src2$$reg),
9564             as_Register($src3$$reg));
9565   %}
9566 
9567   ins_pipe(lmac_reg_reg);
9568 %}
9569 
9570 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
9571   match(Set dst (SubL src3 (MulL src1 src2)));
9572 
9573   ins_cost(INSN_COST * 5);
9574   format %{ "msub  $dst, $src1, $src2, $src3" %}
9575 
9576   ins_encode %{
9577     __ msub(as_Register($dst$$reg),
9578             as_Register($src1$$reg),
9579             as_Register($src2$$reg),
9580             as_Register($src3$$reg));
9581   %}
9582 
9583   ins_pipe(lmac_reg_reg);
9584 %}
9585 
9586 // Integer Divide
9587 
9588 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9589   match(Set dst (DivI src1 src2));
9590 
9591   ins_cost(INSN_COST * 19);
9592   format %{ "sdivw  $dst, $src1, $src2" %}
9593 
9594   ins_encode(aarch64_enc_divw(dst, src1, src2));
9595   ins_pipe(idiv_reg_reg);
9596 %}
9597 
9598 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
9599   match(Set dst (URShiftI (RShiftI src1 div1) div2));
9600   ins_cost(INSN_COST);
9601   format %{ "lsrw $dst, $src1, $div1" %}
9602   ins_encode %{
9603     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
9604   %}
9605   ins_pipe(ialu_reg_shift);
9606 %}
9607 
9608 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
9609   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
9610   ins_cost(INSN_COST);
9611   format %{ "addw $dst, $src, LSR $div1" %}
9612 
9613   ins_encode %{
9614     __ addw(as_Register($dst$$reg),
9615               as_Register($src$$reg),
9616               as_Register($src$$reg),
9617               Assembler::LSR, 31);
9618   %}
9619   ins_pipe(ialu_reg);
9620 %}
9621 
9622 // Long Divide
9623 
9624 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9625   match(Set dst (DivL src1 src2));
9626 
9627   ins_cost(INSN_COST * 35);
9628   format %{ "sdiv   $dst, $src1, $src2" %}
9629 
9630   ins_encode(aarch64_enc_div(dst, src1, src2));
9631   ins_pipe(ldiv_reg_reg);
9632 %}
9633 
9634 instruct signExtractL(iRegLNoSp dst, iRegL src1, immL_63 div1, immL_63 div2) %{
9635   match(Set dst (URShiftL (RShiftL src1 div1) div2));
9636   ins_cost(INSN_COST);
9637   format %{ "lsr $dst, $src1, $div1" %}
9638   ins_encode %{
9639     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
9640   %}
9641   ins_pipe(ialu_reg_shift);
9642 %}
9643 
9644 instruct div2RoundL(iRegLNoSp dst, iRegL src, immL_63 div1, immL_63 div2) %{
9645   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
9646   ins_cost(INSN_COST);
9647   format %{ "add $dst, $src, $div1" %}
9648 
9649   ins_encode %{
9650     __ add(as_Register($dst$$reg),
9651               as_Register($src$$reg),
9652               as_Register($src$$reg),
9653               Assembler::LSR, 63);
9654   %}
9655   ins_pipe(ialu_reg);
9656 %}
9657 
9658 // Integer Remainder
9659 
9660 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9661   match(Set dst (ModI src1 src2));
9662 
9663   ins_cost(INSN_COST * 22);
9664   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
9665             "msubw($dst, rscratch1, $src2, $src1" %}
9666 
9667   ins_encode(aarch64_enc_modw(dst, src1, src2));
9668   ins_pipe(idiv_reg_reg);
9669 %}
9670 
9671 // Long Remainder
9672 
9673 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9674   match(Set dst (ModL src1 src2));
9675 
9676   ins_cost(INSN_COST * 38);
9677   format %{ "sdiv   rscratch1, $src1, $src2\n"
9678             "msub($dst, rscratch1, $src2, $src1" %}
9679 
9680   ins_encode(aarch64_enc_mod(dst, src1, src2));
9681   ins_pipe(ldiv_reg_reg);
9682 %}
9683 
9684 // Integer Shifts
9685 
9686 // Shift Left Register
9687 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9688   match(Set dst (LShiftI src1 src2));
9689 
9690   ins_cost(INSN_COST * 2);
9691   format %{ "lslvw  $dst, $src1, $src2" %}
9692 
9693   ins_encode %{
9694     __ lslvw(as_Register($dst$$reg),
9695              as_Register($src1$$reg),
9696              as_Register($src2$$reg));
9697   %}
9698 
9699   ins_pipe(ialu_reg_reg_vshift);
9700 %}
9701 
9702 // Shift Left Immediate
9703 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
9704   match(Set dst (LShiftI src1 src2));
9705 
9706   ins_cost(INSN_COST);
9707   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
9708 
9709   ins_encode %{
9710     __ lslw(as_Register($dst$$reg),
9711             as_Register($src1$$reg),
9712             $src2$$constant & 0x1f);
9713   %}
9714 
9715   ins_pipe(ialu_reg_shift);
9716 %}
9717 
9718 // Shift Right Logical Register
9719 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9720   match(Set dst (URShiftI src1 src2));
9721 
9722   ins_cost(INSN_COST * 2);
9723   format %{ "lsrvw  $dst, $src1, $src2" %}
9724 
9725   ins_encode %{
9726     __ lsrvw(as_Register($dst$$reg),
9727              as_Register($src1$$reg),
9728              as_Register($src2$$reg));
9729   %}
9730 
9731   ins_pipe(ialu_reg_reg_vshift);
9732 %}
9733 
9734 // Shift Right Logical Immediate
9735 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
9736   match(Set dst (URShiftI src1 src2));
9737 
9738   ins_cost(INSN_COST);
9739   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
9740 
9741   ins_encode %{
9742     __ lsrw(as_Register($dst$$reg),
9743             as_Register($src1$$reg),
9744             $src2$$constant & 0x1f);
9745   %}
9746 
9747   ins_pipe(ialu_reg_shift);
9748 %}
9749 
9750 // Shift Right Arithmetic Register
9751 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9752   match(Set dst (RShiftI src1 src2));
9753 
9754   ins_cost(INSN_COST * 2);
9755   format %{ "asrvw  $dst, $src1, $src2" %}
9756 
9757   ins_encode %{
9758     __ asrvw(as_Register($dst$$reg),
9759              as_Register($src1$$reg),
9760              as_Register($src2$$reg));
9761   %}
9762 
9763   ins_pipe(ialu_reg_reg_vshift);
9764 %}
9765 
9766 // Shift Right Arithmetic Immediate
9767 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
9768   match(Set dst (RShiftI src1 src2));
9769 
9770   ins_cost(INSN_COST);
9771   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
9772 
9773   ins_encode %{
9774     __ asrw(as_Register($dst$$reg),
9775             as_Register($src1$$reg),
9776             $src2$$constant & 0x1f);
9777   %}
9778 
9779   ins_pipe(ialu_reg_shift);
9780 %}
9781 
9782 // Combined Int Mask and Right Shift (using UBFM)
9783 // TODO
9784 
9785 // Long Shifts
9786 
9787 // Shift Left Register
9788 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
9789   match(Set dst (LShiftL src1 src2));
9790 
9791   ins_cost(INSN_COST * 2);
9792   format %{ "lslv  $dst, $src1, $src2" %}
9793 
9794   ins_encode %{
9795     __ lslv(as_Register($dst$$reg),
9796             as_Register($src1$$reg),
9797             as_Register($src2$$reg));
9798   %}
9799 
9800   ins_pipe(ialu_reg_reg_vshift);
9801 %}
9802 
9803 // Shift Left Immediate
9804 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
9805   match(Set dst (LShiftL src1 src2));
9806 
9807   ins_cost(INSN_COST);
9808   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
9809 
9810   ins_encode %{
9811     __ lsl(as_Register($dst$$reg),
9812             as_Register($src1$$reg),
9813             $src2$$constant & 0x3f);
9814   %}
9815 
9816   ins_pipe(ialu_reg_shift);
9817 %}
9818 
9819 // Shift Right Logical Register
9820 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
9821   match(Set dst (URShiftL src1 src2));
9822 
9823   ins_cost(INSN_COST * 2);
9824   format %{ "lsrv  $dst, $src1, $src2" %}
9825 
9826   ins_encode %{
9827     __ lsrv(as_Register($dst$$reg),
9828             as_Register($src1$$reg),
9829             as_Register($src2$$reg));
9830   %}
9831 
9832   ins_pipe(ialu_reg_reg_vshift);
9833 %}
9834 
9835 // Shift Right Logical Immediate
9836 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
9837   match(Set dst (URShiftL src1 src2));
9838 
9839   ins_cost(INSN_COST);
9840   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
9841 
9842   ins_encode %{
9843     __ lsr(as_Register($dst$$reg),
9844            as_Register($src1$$reg),
9845            $src2$$constant & 0x3f);
9846   %}
9847 
9848   ins_pipe(ialu_reg_shift);
9849 %}
9850 
9851 // A special-case pattern for card table stores.
9852 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
9853   match(Set dst (URShiftL (CastP2X src1) src2));
9854 
9855   ins_cost(INSN_COST);
9856   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
9857 
9858   ins_encode %{
9859     __ lsr(as_Register($dst$$reg),
9860            as_Register($src1$$reg),
9861            $src2$$constant & 0x3f);
9862   %}
9863 
9864   ins_pipe(ialu_reg_shift);
9865 %}
9866 
9867 // Shift Right Arithmetic Register
9868 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
9869   match(Set dst (RShiftL src1 src2));
9870 
9871   ins_cost(INSN_COST * 2);
9872   format %{ "asrv  $dst, $src1, $src2" %}
9873 
9874   ins_encode %{
9875     __ asrv(as_Register($dst$$reg),
9876             as_Register($src1$$reg),
9877             as_Register($src2$$reg));
9878   %}
9879 
9880   ins_pipe(ialu_reg_reg_vshift);
9881 %}
9882 
9883 // Shift Right Arithmetic Immediate
9884 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
9885   match(Set dst (RShiftL src1 src2));
9886 
9887   ins_cost(INSN_COST);
9888   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
9889 
9890   ins_encode %{
9891     __ asr(as_Register($dst$$reg),
9892            as_Register($src1$$reg),
9893            $src2$$constant & 0x3f);
9894   %}
9895 
9896   ins_pipe(ialu_reg_shift);
9897 %}
9898 
9899 // BEGIN This section of the file is automatically generated. Do not edit --------------
9900 
9901 instruct regL_not_reg(iRegLNoSp dst,
9902                          iRegL src1, immL_M1 m1,
9903                          rFlagsReg cr) %{
9904   match(Set dst (XorL src1 m1));
9905   ins_cost(INSN_COST);
9906   format %{ "eon  $dst, $src1, zr" %}
9907 
9908   ins_encode %{
9909     __ eon(as_Register($dst$$reg),
9910               as_Register($src1$$reg),
9911               zr,
9912               Assembler::LSL, 0);
9913   %}
9914 
9915   ins_pipe(ialu_reg);
9916 %}
9917 instruct regI_not_reg(iRegINoSp dst,
9918                          iRegIorL2I src1, immI_M1 m1,
9919                          rFlagsReg cr) %{
9920   match(Set dst (XorI src1 m1));
9921   ins_cost(INSN_COST);
9922   format %{ "eonw  $dst, $src1, zr" %}
9923 
9924   ins_encode %{
9925     __ eonw(as_Register($dst$$reg),
9926               as_Register($src1$$reg),
9927               zr,
9928               Assembler::LSL, 0);
9929   %}
9930 
9931   ins_pipe(ialu_reg);
9932 %}
9933 
9934 instruct AndI_reg_not_reg(iRegINoSp dst,
9935                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
9936                          rFlagsReg cr) %{
9937   match(Set dst (AndI src1 (XorI src2 m1)));
9938   ins_cost(INSN_COST);
9939   format %{ "bicw  $dst, $src1, $src2" %}
9940 
9941   ins_encode %{
9942     __ bicw(as_Register($dst$$reg),
9943               as_Register($src1$$reg),
9944               as_Register($src2$$reg),
9945               Assembler::LSL, 0);
9946   %}
9947 
9948   ins_pipe(ialu_reg_reg);
9949 %}
9950 
9951 instruct AndL_reg_not_reg(iRegLNoSp dst,
9952                          iRegL src1, iRegL src2, immL_M1 m1,
9953                          rFlagsReg cr) %{
9954   match(Set dst (AndL src1 (XorL src2 m1)));
9955   ins_cost(INSN_COST);
9956   format %{ "bic  $dst, $src1, $src2" %}
9957 
9958   ins_encode %{
9959     __ bic(as_Register($dst$$reg),
9960               as_Register($src1$$reg),
9961               as_Register($src2$$reg),
9962               Assembler::LSL, 0);
9963   %}
9964 
9965   ins_pipe(ialu_reg_reg);
9966 %}
9967 
9968 instruct OrI_reg_not_reg(iRegINoSp dst,
9969                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
9970                          rFlagsReg cr) %{
9971   match(Set dst (OrI src1 (XorI src2 m1)));
9972   ins_cost(INSN_COST);
9973   format %{ "ornw  $dst, $src1, $src2" %}
9974 
9975   ins_encode %{
9976     __ ornw(as_Register($dst$$reg),
9977               as_Register($src1$$reg),
9978               as_Register($src2$$reg),
9979               Assembler::LSL, 0);
9980   %}
9981 
9982   ins_pipe(ialu_reg_reg);
9983 %}
9984 
9985 instruct OrL_reg_not_reg(iRegLNoSp dst,
9986                          iRegL src1, iRegL src2, immL_M1 m1,
9987                          rFlagsReg cr) %{
9988   match(Set dst (OrL src1 (XorL src2 m1)));
9989   ins_cost(INSN_COST);
9990   format %{ "orn  $dst, $src1, $src2" %}
9991 
9992   ins_encode %{
9993     __ orn(as_Register($dst$$reg),
9994               as_Register($src1$$reg),
9995               as_Register($src2$$reg),
9996               Assembler::LSL, 0);
9997   %}
9998 
9999   ins_pipe(ialu_reg_reg);
10000 %}
10001 
10002 instruct XorI_reg_not_reg(iRegINoSp dst,
10003                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10004                          rFlagsReg cr) %{
10005   match(Set dst (XorI m1 (XorI src2 src1)));
10006   ins_cost(INSN_COST);
10007   format %{ "eonw  $dst, $src1, $src2" %}
10008 
10009   ins_encode %{
10010     __ eonw(as_Register($dst$$reg),
10011               as_Register($src1$$reg),
10012               as_Register($src2$$reg),
10013               Assembler::LSL, 0);
10014   %}
10015 
10016   ins_pipe(ialu_reg_reg);
10017 %}
10018 
10019 instruct XorL_reg_not_reg(iRegLNoSp dst,
10020                          iRegL src1, iRegL src2, immL_M1 m1,
10021                          rFlagsReg cr) %{
10022   match(Set dst (XorL m1 (XorL src2 src1)));
10023   ins_cost(INSN_COST);
10024   format %{ "eon  $dst, $src1, $src2" %}
10025 
10026   ins_encode %{
10027     __ eon(as_Register($dst$$reg),
10028               as_Register($src1$$reg),
10029               as_Register($src2$$reg),
10030               Assembler::LSL, 0);
10031   %}
10032 
10033   ins_pipe(ialu_reg_reg);
10034 %}
10035 
10036 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
10037                          iRegIorL2I src1, iRegIorL2I src2,
10038                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10039   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
10040   ins_cost(1.9 * INSN_COST);
10041   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
10042 
10043   ins_encode %{
10044     __ bicw(as_Register($dst$$reg),
10045               as_Register($src1$$reg),
10046               as_Register($src2$$reg),
10047               Assembler::LSR,
10048               $src3$$constant & 0x1f);
10049   %}
10050 
10051   ins_pipe(ialu_reg_reg_shift);
10052 %}
10053 
10054 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
10055                          iRegL src1, iRegL src2,
10056                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10057   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
10058   ins_cost(1.9 * INSN_COST);
10059   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
10060 
10061   ins_encode %{
10062     __ bic(as_Register($dst$$reg),
10063               as_Register($src1$$reg),
10064               as_Register($src2$$reg),
10065               Assembler::LSR,
10066               $src3$$constant & 0x3f);
10067   %}
10068 
10069   ins_pipe(ialu_reg_reg_shift);
10070 %}
10071 
10072 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
10073                          iRegIorL2I src1, iRegIorL2I src2,
10074                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10075   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
10076   ins_cost(1.9 * INSN_COST);
10077   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
10078 
10079   ins_encode %{
10080     __ bicw(as_Register($dst$$reg),
10081               as_Register($src1$$reg),
10082               as_Register($src2$$reg),
10083               Assembler::ASR,
10084               $src3$$constant & 0x1f);
10085   %}
10086 
10087   ins_pipe(ialu_reg_reg_shift);
10088 %}
10089 
10090 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
10091                          iRegL src1, iRegL src2,
10092                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10093   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
10094   ins_cost(1.9 * INSN_COST);
10095   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
10096 
10097   ins_encode %{
10098     __ bic(as_Register($dst$$reg),
10099               as_Register($src1$$reg),
10100               as_Register($src2$$reg),
10101               Assembler::ASR,
10102               $src3$$constant & 0x3f);
10103   %}
10104 
10105   ins_pipe(ialu_reg_reg_shift);
10106 %}
10107 
10108 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
10109                          iRegIorL2I src1, iRegIorL2I src2,
10110                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10111   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
10112   ins_cost(1.9 * INSN_COST);
10113   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
10114 
10115   ins_encode %{
10116     __ bicw(as_Register($dst$$reg),
10117               as_Register($src1$$reg),
10118               as_Register($src2$$reg),
10119               Assembler::LSL,
10120               $src3$$constant & 0x1f);
10121   %}
10122 
10123   ins_pipe(ialu_reg_reg_shift);
10124 %}
10125 
10126 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
10127                          iRegL src1, iRegL src2,
10128                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10129   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
10130   ins_cost(1.9 * INSN_COST);
10131   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
10132 
10133   ins_encode %{
10134     __ bic(as_Register($dst$$reg),
10135               as_Register($src1$$reg),
10136               as_Register($src2$$reg),
10137               Assembler::LSL,
10138               $src3$$constant & 0x3f);
10139   %}
10140 
10141   ins_pipe(ialu_reg_reg_shift);
10142 %}
10143 
10144 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
10145                          iRegIorL2I src1, iRegIorL2I src2,
10146                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10147   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
10148   ins_cost(1.9 * INSN_COST);
10149   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
10150 
10151   ins_encode %{
10152     __ eonw(as_Register($dst$$reg),
10153               as_Register($src1$$reg),
10154               as_Register($src2$$reg),
10155               Assembler::LSR,
10156               $src3$$constant & 0x1f);
10157   %}
10158 
10159   ins_pipe(ialu_reg_reg_shift);
10160 %}
10161 
10162 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
10163                          iRegL src1, iRegL src2,
10164                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10165   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
10166   ins_cost(1.9 * INSN_COST);
10167   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
10168 
10169   ins_encode %{
10170     __ eon(as_Register($dst$$reg),
10171               as_Register($src1$$reg),
10172               as_Register($src2$$reg),
10173               Assembler::LSR,
10174               $src3$$constant & 0x3f);
10175   %}
10176 
10177   ins_pipe(ialu_reg_reg_shift);
10178 %}
10179 
10180 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
10181                          iRegIorL2I src1, iRegIorL2I src2,
10182                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10183   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
10184   ins_cost(1.9 * INSN_COST);
10185   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
10186 
10187   ins_encode %{
10188     __ eonw(as_Register($dst$$reg),
10189               as_Register($src1$$reg),
10190               as_Register($src2$$reg),
10191               Assembler::ASR,
10192               $src3$$constant & 0x1f);
10193   %}
10194 
10195   ins_pipe(ialu_reg_reg_shift);
10196 %}
10197 
10198 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
10199                          iRegL src1, iRegL src2,
10200                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10201   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
10202   ins_cost(1.9 * INSN_COST);
10203   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
10204 
10205   ins_encode %{
10206     __ eon(as_Register($dst$$reg),
10207               as_Register($src1$$reg),
10208               as_Register($src2$$reg),
10209               Assembler::ASR,
10210               $src3$$constant & 0x3f);
10211   %}
10212 
10213   ins_pipe(ialu_reg_reg_shift);
10214 %}
10215 
10216 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
10217                          iRegIorL2I src1, iRegIorL2I src2,
10218                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10219   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
10220   ins_cost(1.9 * INSN_COST);
10221   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
10222 
10223   ins_encode %{
10224     __ eonw(as_Register($dst$$reg),
10225               as_Register($src1$$reg),
10226               as_Register($src2$$reg),
10227               Assembler::LSL,
10228               $src3$$constant & 0x1f);
10229   %}
10230 
10231   ins_pipe(ialu_reg_reg_shift);
10232 %}
10233 
10234 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
10235                          iRegL src1, iRegL src2,
10236                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10237   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
10238   ins_cost(1.9 * INSN_COST);
10239   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
10240 
10241   ins_encode %{
10242     __ eon(as_Register($dst$$reg),
10243               as_Register($src1$$reg),
10244               as_Register($src2$$reg),
10245               Assembler::LSL,
10246               $src3$$constant & 0x3f);
10247   %}
10248 
10249   ins_pipe(ialu_reg_reg_shift);
10250 %}
10251 
10252 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
10253                          iRegIorL2I src1, iRegIorL2I src2,
10254                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10255   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
10256   ins_cost(1.9 * INSN_COST);
10257   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
10258 
10259   ins_encode %{
10260     __ ornw(as_Register($dst$$reg),
10261               as_Register($src1$$reg),
10262               as_Register($src2$$reg),
10263               Assembler::LSR,
10264               $src3$$constant & 0x1f);
10265   %}
10266 
10267   ins_pipe(ialu_reg_reg_shift);
10268 %}
10269 
10270 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
10271                          iRegL src1, iRegL src2,
10272                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10273   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
10274   ins_cost(1.9 * INSN_COST);
10275   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
10276 
10277   ins_encode %{
10278     __ orn(as_Register($dst$$reg),
10279               as_Register($src1$$reg),
10280               as_Register($src2$$reg),
10281               Assembler::LSR,
10282               $src3$$constant & 0x3f);
10283   %}
10284 
10285   ins_pipe(ialu_reg_reg_shift);
10286 %}
10287 
10288 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
10289                          iRegIorL2I src1, iRegIorL2I src2,
10290                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10291   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
10292   ins_cost(1.9 * INSN_COST);
10293   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
10294 
10295   ins_encode %{
10296     __ ornw(as_Register($dst$$reg),
10297               as_Register($src1$$reg),
10298               as_Register($src2$$reg),
10299               Assembler::ASR,
10300               $src3$$constant & 0x1f);
10301   %}
10302 
10303   ins_pipe(ialu_reg_reg_shift);
10304 %}
10305 
10306 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
10307                          iRegL src1, iRegL src2,
10308                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10309   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
10310   ins_cost(1.9 * INSN_COST);
10311   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
10312 
10313   ins_encode %{
10314     __ orn(as_Register($dst$$reg),
10315               as_Register($src1$$reg),
10316               as_Register($src2$$reg),
10317               Assembler::ASR,
10318               $src3$$constant & 0x3f);
10319   %}
10320 
10321   ins_pipe(ialu_reg_reg_shift);
10322 %}
10323 
10324 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
10325                          iRegIorL2I src1, iRegIorL2I src2,
10326                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10327   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
10328   ins_cost(1.9 * INSN_COST);
10329   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
10330 
10331   ins_encode %{
10332     __ ornw(as_Register($dst$$reg),
10333               as_Register($src1$$reg),
10334               as_Register($src2$$reg),
10335               Assembler::LSL,
10336               $src3$$constant & 0x1f);
10337   %}
10338 
10339   ins_pipe(ialu_reg_reg_shift);
10340 %}
10341 
10342 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
10343                          iRegL src1, iRegL src2,
10344                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10345   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
10346   ins_cost(1.9 * INSN_COST);
10347   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
10348 
10349   ins_encode %{
10350     __ orn(as_Register($dst$$reg),
10351               as_Register($src1$$reg),
10352               as_Register($src2$$reg),
10353               Assembler::LSL,
10354               $src3$$constant & 0x3f);
10355   %}
10356 
10357   ins_pipe(ialu_reg_reg_shift);
10358 %}
10359 
10360 instruct AndI_reg_URShift_reg(iRegINoSp dst,
10361                          iRegIorL2I src1, iRegIorL2I src2,
10362                          immI src3, rFlagsReg cr) %{
10363   match(Set dst (AndI src1 (URShiftI src2 src3)));
10364 
10365   ins_cost(1.9 * INSN_COST);
10366   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
10367 
10368   ins_encode %{
10369     __ andw(as_Register($dst$$reg),
10370               as_Register($src1$$reg),
10371               as_Register($src2$$reg),
10372               Assembler::LSR,
10373               $src3$$constant & 0x1f);
10374   %}
10375 
10376   ins_pipe(ialu_reg_reg_shift);
10377 %}
10378 
10379 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
10380                          iRegL src1, iRegL src2,
10381                          immI src3, rFlagsReg cr) %{
10382   match(Set dst (AndL src1 (URShiftL src2 src3)));
10383 
10384   ins_cost(1.9 * INSN_COST);
10385   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
10386 
10387   ins_encode %{
10388     __ andr(as_Register($dst$$reg),
10389               as_Register($src1$$reg),
10390               as_Register($src2$$reg),
10391               Assembler::LSR,
10392               $src3$$constant & 0x3f);
10393   %}
10394 
10395   ins_pipe(ialu_reg_reg_shift);
10396 %}
10397 
10398 instruct AndI_reg_RShift_reg(iRegINoSp dst,
10399                          iRegIorL2I src1, iRegIorL2I src2,
10400                          immI src3, rFlagsReg cr) %{
10401   match(Set dst (AndI src1 (RShiftI src2 src3)));
10402 
10403   ins_cost(1.9 * INSN_COST);
10404   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
10405 
10406   ins_encode %{
10407     __ andw(as_Register($dst$$reg),
10408               as_Register($src1$$reg),
10409               as_Register($src2$$reg),
10410               Assembler::ASR,
10411               $src3$$constant & 0x1f);
10412   %}
10413 
10414   ins_pipe(ialu_reg_reg_shift);
10415 %}
10416 
10417 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
10418                          iRegL src1, iRegL src2,
10419                          immI src3, rFlagsReg cr) %{
10420   match(Set dst (AndL src1 (RShiftL src2 src3)));
10421 
10422   ins_cost(1.9 * INSN_COST);
10423   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
10424 
10425   ins_encode %{
10426     __ andr(as_Register($dst$$reg),
10427               as_Register($src1$$reg),
10428               as_Register($src2$$reg),
10429               Assembler::ASR,
10430               $src3$$constant & 0x3f);
10431   %}
10432 
10433   ins_pipe(ialu_reg_reg_shift);
10434 %}
10435 
10436 instruct AndI_reg_LShift_reg(iRegINoSp dst,
10437                          iRegIorL2I src1, iRegIorL2I src2,
10438                          immI src3, rFlagsReg cr) %{
10439   match(Set dst (AndI src1 (LShiftI src2 src3)));
10440 
10441   ins_cost(1.9 * INSN_COST);
10442   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
10443 
10444   ins_encode %{
10445     __ andw(as_Register($dst$$reg),
10446               as_Register($src1$$reg),
10447               as_Register($src2$$reg),
10448               Assembler::LSL,
10449               $src3$$constant & 0x1f);
10450   %}
10451 
10452   ins_pipe(ialu_reg_reg_shift);
10453 %}
10454 
10455 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
10456                          iRegL src1, iRegL src2,
10457                          immI src3, rFlagsReg cr) %{
10458   match(Set dst (AndL src1 (LShiftL src2 src3)));
10459 
10460   ins_cost(1.9 * INSN_COST);
10461   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
10462 
10463   ins_encode %{
10464     __ andr(as_Register($dst$$reg),
10465               as_Register($src1$$reg),
10466               as_Register($src2$$reg),
10467               Assembler::LSL,
10468               $src3$$constant & 0x3f);
10469   %}
10470 
10471   ins_pipe(ialu_reg_reg_shift);
10472 %}
10473 
10474 instruct XorI_reg_URShift_reg(iRegINoSp dst,
10475                          iRegIorL2I src1, iRegIorL2I src2,
10476                          immI src3, rFlagsReg cr) %{
10477   match(Set dst (XorI src1 (URShiftI src2 src3)));
10478 
10479   ins_cost(1.9 * INSN_COST);
10480   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
10481 
10482   ins_encode %{
10483     __ eorw(as_Register($dst$$reg),
10484               as_Register($src1$$reg),
10485               as_Register($src2$$reg),
10486               Assembler::LSR,
10487               $src3$$constant & 0x1f);
10488   %}
10489 
10490   ins_pipe(ialu_reg_reg_shift);
10491 %}
10492 
10493 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
10494                          iRegL src1, iRegL src2,
10495                          immI src3, rFlagsReg cr) %{
10496   match(Set dst (XorL src1 (URShiftL src2 src3)));
10497 
10498   ins_cost(1.9 * INSN_COST);
10499   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
10500 
10501   ins_encode %{
10502     __ eor(as_Register($dst$$reg),
10503               as_Register($src1$$reg),
10504               as_Register($src2$$reg),
10505               Assembler::LSR,
10506               $src3$$constant & 0x3f);
10507   %}
10508 
10509   ins_pipe(ialu_reg_reg_shift);
10510 %}
10511 
10512 instruct XorI_reg_RShift_reg(iRegINoSp dst,
10513                          iRegIorL2I src1, iRegIorL2I src2,
10514                          immI src3, rFlagsReg cr) %{
10515   match(Set dst (XorI src1 (RShiftI src2 src3)));
10516 
10517   ins_cost(1.9 * INSN_COST);
10518   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
10519 
10520   ins_encode %{
10521     __ eorw(as_Register($dst$$reg),
10522               as_Register($src1$$reg),
10523               as_Register($src2$$reg),
10524               Assembler::ASR,
10525               $src3$$constant & 0x1f);
10526   %}
10527 
10528   ins_pipe(ialu_reg_reg_shift);
10529 %}
10530 
10531 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
10532                          iRegL src1, iRegL src2,
10533                          immI src3, rFlagsReg cr) %{
10534   match(Set dst (XorL src1 (RShiftL src2 src3)));
10535 
10536   ins_cost(1.9 * INSN_COST);
10537   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
10538 
10539   ins_encode %{
10540     __ eor(as_Register($dst$$reg),
10541               as_Register($src1$$reg),
10542               as_Register($src2$$reg),
10543               Assembler::ASR,
10544               $src3$$constant & 0x3f);
10545   %}
10546 
10547   ins_pipe(ialu_reg_reg_shift);
10548 %}
10549 
10550 instruct XorI_reg_LShift_reg(iRegINoSp dst,
10551                          iRegIorL2I src1, iRegIorL2I src2,
10552                          immI src3, rFlagsReg cr) %{
10553   match(Set dst (XorI src1 (LShiftI src2 src3)));
10554 
10555   ins_cost(1.9 * INSN_COST);
10556   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
10557 
10558   ins_encode %{
10559     __ eorw(as_Register($dst$$reg),
10560               as_Register($src1$$reg),
10561               as_Register($src2$$reg),
10562               Assembler::LSL,
10563               $src3$$constant & 0x1f);
10564   %}
10565 
10566   ins_pipe(ialu_reg_reg_shift);
10567 %}
10568 
10569 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
10570                          iRegL src1, iRegL src2,
10571                          immI src3, rFlagsReg cr) %{
10572   match(Set dst (XorL src1 (LShiftL src2 src3)));
10573 
10574   ins_cost(1.9 * INSN_COST);
10575   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
10576 
10577   ins_encode %{
10578     __ eor(as_Register($dst$$reg),
10579               as_Register($src1$$reg),
10580               as_Register($src2$$reg),
10581               Assembler::LSL,
10582               $src3$$constant & 0x3f);
10583   %}
10584 
10585   ins_pipe(ialu_reg_reg_shift);
10586 %}
10587 
10588 instruct OrI_reg_URShift_reg(iRegINoSp dst,
10589                          iRegIorL2I src1, iRegIorL2I src2,
10590                          immI src3, rFlagsReg cr) %{
10591   match(Set dst (OrI src1 (URShiftI src2 src3)));
10592 
10593   ins_cost(1.9 * INSN_COST);
10594   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
10595 
10596   ins_encode %{
10597     __ orrw(as_Register($dst$$reg),
10598               as_Register($src1$$reg),
10599               as_Register($src2$$reg),
10600               Assembler::LSR,
10601               $src3$$constant & 0x1f);
10602   %}
10603 
10604   ins_pipe(ialu_reg_reg_shift);
10605 %}
10606 
10607 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
10608                          iRegL src1, iRegL src2,
10609                          immI src3, rFlagsReg cr) %{
10610   match(Set dst (OrL src1 (URShiftL src2 src3)));
10611 
10612   ins_cost(1.9 * INSN_COST);
10613   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
10614 
10615   ins_encode %{
10616     __ orr(as_Register($dst$$reg),
10617               as_Register($src1$$reg),
10618               as_Register($src2$$reg),
10619               Assembler::LSR,
10620               $src3$$constant & 0x3f);
10621   %}
10622 
10623   ins_pipe(ialu_reg_reg_shift);
10624 %}
10625 
10626 instruct OrI_reg_RShift_reg(iRegINoSp dst,
10627                          iRegIorL2I src1, iRegIorL2I src2,
10628                          immI src3, rFlagsReg cr) %{
10629   match(Set dst (OrI src1 (RShiftI src2 src3)));
10630 
10631   ins_cost(1.9 * INSN_COST);
10632   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
10633 
10634   ins_encode %{
10635     __ orrw(as_Register($dst$$reg),
10636               as_Register($src1$$reg),
10637               as_Register($src2$$reg),
10638               Assembler::ASR,
10639               $src3$$constant & 0x1f);
10640   %}
10641 
10642   ins_pipe(ialu_reg_reg_shift);
10643 %}
10644 
10645 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
10646                          iRegL src1, iRegL src2,
10647                          immI src3, rFlagsReg cr) %{
10648   match(Set dst (OrL src1 (RShiftL src2 src3)));
10649 
10650   ins_cost(1.9 * INSN_COST);
10651   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
10652 
10653   ins_encode %{
10654     __ orr(as_Register($dst$$reg),
10655               as_Register($src1$$reg),
10656               as_Register($src2$$reg),
10657               Assembler::ASR,
10658               $src3$$constant & 0x3f);
10659   %}
10660 
10661   ins_pipe(ialu_reg_reg_shift);
10662 %}
10663 
10664 instruct OrI_reg_LShift_reg(iRegINoSp dst,
10665                          iRegIorL2I src1, iRegIorL2I src2,
10666                          immI src3, rFlagsReg cr) %{
10667   match(Set dst (OrI src1 (LShiftI src2 src3)));
10668 
10669   ins_cost(1.9 * INSN_COST);
10670   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
10671 
10672   ins_encode %{
10673     __ orrw(as_Register($dst$$reg),
10674               as_Register($src1$$reg),
10675               as_Register($src2$$reg),
10676               Assembler::LSL,
10677               $src3$$constant & 0x1f);
10678   %}
10679 
10680   ins_pipe(ialu_reg_reg_shift);
10681 %}
10682 
10683 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
10684                          iRegL src1, iRegL src2,
10685                          immI src3, rFlagsReg cr) %{
10686   match(Set dst (OrL src1 (LShiftL src2 src3)));
10687 
10688   ins_cost(1.9 * INSN_COST);
10689   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
10690 
10691   ins_encode %{
10692     __ orr(as_Register($dst$$reg),
10693               as_Register($src1$$reg),
10694               as_Register($src2$$reg),
10695               Assembler::LSL,
10696               $src3$$constant & 0x3f);
10697   %}
10698 
10699   ins_pipe(ialu_reg_reg_shift);
10700 %}
10701 
10702 instruct AddI_reg_URShift_reg(iRegINoSp dst,
10703                          iRegIorL2I src1, iRegIorL2I src2,
10704                          immI src3, rFlagsReg cr) %{
10705   match(Set dst (AddI src1 (URShiftI src2 src3)));
10706 
10707   ins_cost(1.9 * INSN_COST);
10708   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
10709 
10710   ins_encode %{
10711     __ addw(as_Register($dst$$reg),
10712               as_Register($src1$$reg),
10713               as_Register($src2$$reg),
10714               Assembler::LSR,
10715               $src3$$constant & 0x1f);
10716   %}
10717 
10718   ins_pipe(ialu_reg_reg_shift);
10719 %}
10720 
10721 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
10722                          iRegL src1, iRegL src2,
10723                          immI src3, rFlagsReg cr) %{
10724   match(Set dst (AddL src1 (URShiftL src2 src3)));
10725 
10726   ins_cost(1.9 * INSN_COST);
10727   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
10728 
10729   ins_encode %{
10730     __ add(as_Register($dst$$reg),
10731               as_Register($src1$$reg),
10732               as_Register($src2$$reg),
10733               Assembler::LSR,
10734               $src3$$constant & 0x3f);
10735   %}
10736 
10737   ins_pipe(ialu_reg_reg_shift);
10738 %}
10739 
10740 instruct AddI_reg_RShift_reg(iRegINoSp dst,
10741                          iRegIorL2I src1, iRegIorL2I src2,
10742                          immI src3, rFlagsReg cr) %{
10743   match(Set dst (AddI src1 (RShiftI src2 src3)));
10744 
10745   ins_cost(1.9 * INSN_COST);
10746   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
10747 
10748   ins_encode %{
10749     __ addw(as_Register($dst$$reg),
10750               as_Register($src1$$reg),
10751               as_Register($src2$$reg),
10752               Assembler::ASR,
10753               $src3$$constant & 0x1f);
10754   %}
10755 
10756   ins_pipe(ialu_reg_reg_shift);
10757 %}
10758 
10759 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
10760                          iRegL src1, iRegL src2,
10761                          immI src3, rFlagsReg cr) %{
10762   match(Set dst (AddL src1 (RShiftL src2 src3)));
10763 
10764   ins_cost(1.9 * INSN_COST);
10765   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
10766 
10767   ins_encode %{
10768     __ add(as_Register($dst$$reg),
10769               as_Register($src1$$reg),
10770               as_Register($src2$$reg),
10771               Assembler::ASR,
10772               $src3$$constant & 0x3f);
10773   %}
10774 
10775   ins_pipe(ialu_reg_reg_shift);
10776 %}
10777 
10778 instruct AddI_reg_LShift_reg(iRegINoSp dst,
10779                          iRegIorL2I src1, iRegIorL2I src2,
10780                          immI src3, rFlagsReg cr) %{
10781   match(Set dst (AddI src1 (LShiftI src2 src3)));
10782 
10783   ins_cost(1.9 * INSN_COST);
10784   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
10785 
10786   ins_encode %{
10787     __ addw(as_Register($dst$$reg),
10788               as_Register($src1$$reg),
10789               as_Register($src2$$reg),
10790               Assembler::LSL,
10791               $src3$$constant & 0x1f);
10792   %}
10793 
10794   ins_pipe(ialu_reg_reg_shift);
10795 %}
10796 
10797 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
10798                          iRegL src1, iRegL src2,
10799                          immI src3, rFlagsReg cr) %{
10800   match(Set dst (AddL src1 (LShiftL src2 src3)));
10801 
10802   ins_cost(1.9 * INSN_COST);
10803   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
10804 
10805   ins_encode %{
10806     __ add(as_Register($dst$$reg),
10807               as_Register($src1$$reg),
10808               as_Register($src2$$reg),
10809               Assembler::LSL,
10810               $src3$$constant & 0x3f);
10811   %}
10812 
10813   ins_pipe(ialu_reg_reg_shift);
10814 %}
10815 
10816 instruct SubI_reg_URShift_reg(iRegINoSp dst,
10817                          iRegIorL2I src1, iRegIorL2I src2,
10818                          immI src3, rFlagsReg cr) %{
10819   match(Set dst (SubI src1 (URShiftI src2 src3)));
10820 
10821   ins_cost(1.9 * INSN_COST);
10822   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
10823 
10824   ins_encode %{
10825     __ subw(as_Register($dst$$reg),
10826               as_Register($src1$$reg),
10827               as_Register($src2$$reg),
10828               Assembler::LSR,
10829               $src3$$constant & 0x1f);
10830   %}
10831 
10832   ins_pipe(ialu_reg_reg_shift);
10833 %}
10834 
10835 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
10836                          iRegL src1, iRegL src2,
10837                          immI src3, rFlagsReg cr) %{
10838   match(Set dst (SubL src1 (URShiftL src2 src3)));
10839 
10840   ins_cost(1.9 * INSN_COST);
10841   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
10842 
10843   ins_encode %{
10844     __ sub(as_Register($dst$$reg),
10845               as_Register($src1$$reg),
10846               as_Register($src2$$reg),
10847               Assembler::LSR,
10848               $src3$$constant & 0x3f);
10849   %}
10850 
10851   ins_pipe(ialu_reg_reg_shift);
10852 %}
10853 
10854 instruct SubI_reg_RShift_reg(iRegINoSp dst,
10855                          iRegIorL2I src1, iRegIorL2I src2,
10856                          immI src3, rFlagsReg cr) %{
10857   match(Set dst (SubI src1 (RShiftI src2 src3)));
10858 
10859   ins_cost(1.9 * INSN_COST);
10860   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
10861 
10862   ins_encode %{
10863     __ subw(as_Register($dst$$reg),
10864               as_Register($src1$$reg),
10865               as_Register($src2$$reg),
10866               Assembler::ASR,
10867               $src3$$constant & 0x1f);
10868   %}
10869 
10870   ins_pipe(ialu_reg_reg_shift);
10871 %}
10872 
10873 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
10874                          iRegL src1, iRegL src2,
10875                          immI src3, rFlagsReg cr) %{
10876   match(Set dst (SubL src1 (RShiftL src2 src3)));
10877 
10878   ins_cost(1.9 * INSN_COST);
10879   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
10880 
10881   ins_encode %{
10882     __ sub(as_Register($dst$$reg),
10883               as_Register($src1$$reg),
10884               as_Register($src2$$reg),
10885               Assembler::ASR,
10886               $src3$$constant & 0x3f);
10887   %}
10888 
10889   ins_pipe(ialu_reg_reg_shift);
10890 %}
10891 
10892 instruct SubI_reg_LShift_reg(iRegINoSp dst,
10893                          iRegIorL2I src1, iRegIorL2I src2,
10894                          immI src3, rFlagsReg cr) %{
10895   match(Set dst (SubI src1 (LShiftI src2 src3)));
10896 
10897   ins_cost(1.9 * INSN_COST);
10898   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
10899 
10900   ins_encode %{
10901     __ subw(as_Register($dst$$reg),
10902               as_Register($src1$$reg),
10903               as_Register($src2$$reg),
10904               Assembler::LSL,
10905               $src3$$constant & 0x1f);
10906   %}
10907 
10908   ins_pipe(ialu_reg_reg_shift);
10909 %}
10910 
10911 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
10912                          iRegL src1, iRegL src2,
10913                          immI src3, rFlagsReg cr) %{
10914   match(Set dst (SubL src1 (LShiftL src2 src3)));
10915 
10916   ins_cost(1.9 * INSN_COST);
10917   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
10918 
10919   ins_encode %{
10920     __ sub(as_Register($dst$$reg),
10921               as_Register($src1$$reg),
10922               as_Register($src2$$reg),
10923               Assembler::LSL,
10924               $src3$$constant & 0x3f);
10925   %}
10926 
10927   ins_pipe(ialu_reg_reg_shift);
10928 %}
10929 
10930 
10931 
10932 // Shift Left followed by Shift Right.
10933 // This idiom is used by the compiler for the i2b bytecode etc.
10934 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
10935 %{
10936   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
10937   // Make sure we are not going to exceed what sbfm can do.
10938   predicate((unsigned int)n->in(2)->get_int() <= 63
10939             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
10940 
10941   ins_cost(INSN_COST * 2);
10942   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
10943   ins_encode %{
10944     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
10945     int s = 63 - lshift;
10946     int r = (rshift - lshift) & 63;
10947     __ sbfm(as_Register($dst$$reg),
10948             as_Register($src$$reg),
10949             r, s);
10950   %}
10951 
10952   ins_pipe(ialu_reg_shift);
10953 %}
10954 
10955 // Shift Left followed by Shift Right.
10956 // This idiom is used by the compiler for the i2b bytecode etc.
10957 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
10958 %{
10959   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
10960   // Make sure we are not going to exceed what sbfmw can do.
10961   predicate((unsigned int)n->in(2)->get_int() <= 31
10962             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
10963 
10964   ins_cost(INSN_COST * 2);
10965   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
10966   ins_encode %{
10967     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
10968     int s = 31 - lshift;
10969     int r = (rshift - lshift) & 31;
10970     __ sbfmw(as_Register($dst$$reg),
10971             as_Register($src$$reg),
10972             r, s);
10973   %}
10974 
10975   ins_pipe(ialu_reg_shift);
10976 %}
10977 
10978 // Shift Left followed by Shift Right.
10979 // This idiom is used by the compiler for the i2b bytecode etc.
10980 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
10981 %{
10982   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
10983   // Make sure we are not going to exceed what ubfm can do.
10984   predicate((unsigned int)n->in(2)->get_int() <= 63
10985             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
10986 
10987   ins_cost(INSN_COST * 2);
10988   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
10989   ins_encode %{
10990     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
10991     int s = 63 - lshift;
10992     int r = (rshift - lshift) & 63;
10993     __ ubfm(as_Register($dst$$reg),
10994             as_Register($src$$reg),
10995             r, s);
10996   %}
10997 
10998   ins_pipe(ialu_reg_shift);
10999 %}
11000 
11001 // Shift Left followed by Shift Right.
11002 // This idiom is used by the compiler for the i2b bytecode etc.
11003 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11004 %{
11005   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
11006   // Make sure we are not going to exceed what ubfmw can do.
11007   predicate((unsigned int)n->in(2)->get_int() <= 31
11008             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11009 
11010   ins_cost(INSN_COST * 2);
11011   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11012   ins_encode %{
11013     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11014     int s = 31 - lshift;
11015     int r = (rshift - lshift) & 31;
11016     __ ubfmw(as_Register($dst$$reg),
11017             as_Register($src$$reg),
11018             r, s);
11019   %}
11020 
11021   ins_pipe(ialu_reg_shift);
11022 %}
11023 // Bitfield extract with shift & mask
11024 
11025 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11026 %{
11027   match(Set dst (AndI (URShiftI src rshift) mask));
11028 
11029   ins_cost(INSN_COST);
11030   format %{ "ubfxw $dst, $src, $mask" %}
11031   ins_encode %{
11032     int rshift = $rshift$$constant;
11033     long mask = $mask$$constant;
11034     int width = exact_log2(mask+1);
11035     __ ubfxw(as_Register($dst$$reg),
11036             as_Register($src$$reg), rshift, width);
11037   %}
11038   ins_pipe(ialu_reg_shift);
11039 %}
11040 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
11041 %{
11042   match(Set dst (AndL (URShiftL src rshift) mask));
11043 
11044   ins_cost(INSN_COST);
11045   format %{ "ubfx $dst, $src, $mask" %}
11046   ins_encode %{
11047     int rshift = $rshift$$constant;
11048     long mask = $mask$$constant;
11049     int width = exact_log2(mask+1);
11050     __ ubfx(as_Register($dst$$reg),
11051             as_Register($src$$reg), rshift, width);
11052   %}
11053   ins_pipe(ialu_reg_shift);
11054 %}
11055 
11056 // We can use ubfx when extending an And with a mask when we know mask
11057 // is positive.  We know that because immI_bitmask guarantees it.
11058 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11059 %{
11060   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
11061 
11062   ins_cost(INSN_COST * 2);
11063   format %{ "ubfx $dst, $src, $mask" %}
11064   ins_encode %{
11065     int rshift = $rshift$$constant;
11066     long mask = $mask$$constant;
11067     int width = exact_log2(mask+1);
11068     __ ubfx(as_Register($dst$$reg),
11069             as_Register($src$$reg), rshift, width);
11070   %}
11071   ins_pipe(ialu_reg_shift);
11072 %}
11073 
11074 // Rotations
11075 
11076 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11077 %{
11078   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11079   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11080 
11081   ins_cost(INSN_COST);
11082   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11083 
11084   ins_encode %{
11085     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11086             $rshift$$constant & 63);
11087   %}
11088   ins_pipe(ialu_reg_reg_extr);
11089 %}
11090 
11091 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11092 %{
11093   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11094   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11095 
11096   ins_cost(INSN_COST);
11097   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11098 
11099   ins_encode %{
11100     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11101             $rshift$$constant & 31);
11102   %}
11103   ins_pipe(ialu_reg_reg_extr);
11104 %}
11105 
11106 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11107 %{
11108   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11109   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11110 
11111   ins_cost(INSN_COST);
11112   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11113 
11114   ins_encode %{
11115     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11116             $rshift$$constant & 63);
11117   %}
11118   ins_pipe(ialu_reg_reg_extr);
11119 %}
11120 
11121 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11122 %{
11123   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11124   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11125 
11126   ins_cost(INSN_COST);
11127   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11128 
11129   ins_encode %{
11130     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11131             $rshift$$constant & 31);
11132   %}
11133   ins_pipe(ialu_reg_reg_extr);
11134 %}
11135 
11136 
11137 // rol expander
11138 
11139 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11140 %{
11141   effect(DEF dst, USE src, USE shift);
11142 
11143   format %{ "rol    $dst, $src, $shift" %}
11144   ins_cost(INSN_COST * 3);
11145   ins_encode %{
11146     __ subw(rscratch1, zr, as_Register($shift$$reg));
11147     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
11148             rscratch1);
11149     %}
11150   ins_pipe(ialu_reg_reg_vshift);
11151 %}
11152 
11153 // rol expander
11154 
11155 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
11156 %{
11157   effect(DEF dst, USE src, USE shift);
11158 
11159   format %{ "rol    $dst, $src, $shift" %}
11160   ins_cost(INSN_COST * 3);
11161   ins_encode %{
11162     __ subw(rscratch1, zr, as_Register($shift$$reg));
11163     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
11164             rscratch1);
11165     %}
11166   ins_pipe(ialu_reg_reg_vshift);
11167 %}
11168 
11169 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
11170 %{
11171   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
11172 
11173   expand %{
11174     rolL_rReg(dst, src, shift, cr);
11175   %}
11176 %}
11177 
11178 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11179 %{
11180   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
11181 
11182   expand %{
11183     rolL_rReg(dst, src, shift, cr);
11184   %}
11185 %}
11186 
11187 instruct rolI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
11188 %{
11189   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
11190 
11191   expand %{
11192     rolL_rReg(dst, src, shift, cr);
11193   %}
11194 %}
11195 
11196 instruct rolI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11197 %{
11198   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
11199 
11200   expand %{
11201     rolL_rReg(dst, src, shift, cr);
11202   %}
11203 %}
11204 
11205 // ror expander
11206 
11207 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11208 %{
11209   effect(DEF dst, USE src, USE shift);
11210 
11211   format %{ "ror    $dst, $src, $shift" %}
11212   ins_cost(INSN_COST);
11213   ins_encode %{
11214     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
11215             as_Register($shift$$reg));
11216     %}
11217   ins_pipe(ialu_reg_reg_vshift);
11218 %}
11219 
11220 // ror expander
11221 
11222 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
11223 %{
11224   effect(DEF dst, USE src, USE shift);
11225 
11226   format %{ "ror    $dst, $src, $shift" %}
11227   ins_cost(INSN_COST);
11228   ins_encode %{
11229     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
11230             as_Register($shift$$reg));
11231     %}
11232   ins_pipe(ialu_reg_reg_vshift);
11233 %}
11234 
11235 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
11236 %{
11237   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
11238 
11239   expand %{
11240     rorL_rReg(dst, src, shift, cr);
11241   %}
11242 %}
11243 
11244 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11245 %{
11246   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
11247 
11248   expand %{
11249     rorL_rReg(dst, src, shift, cr);
11250   %}
11251 %}
11252 
11253 instruct rorI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
11254 %{
11255   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
11256 
11257   expand %{
11258     rorL_rReg(dst, src, shift, cr);
11259   %}
11260 %}
11261 
11262 instruct rorI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11263 %{
11264   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
11265 
11266   expand %{
11267     rorL_rReg(dst, src, shift, cr);
11268   %}
11269 %}
11270 
11271 // Add/subtract (extended)
11272 
11273 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11274 %{
11275   match(Set dst (AddL src1 (ConvI2L src2)));
11276   ins_cost(INSN_COST);
11277   format %{ "add  $dst, $src1, sxtw $src2" %}
11278 
11279    ins_encode %{
11280      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11281             as_Register($src2$$reg), ext::sxtw);
11282    %}
11283   ins_pipe(ialu_reg_reg);
11284 %};
11285 
11286 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11287 %{
11288   match(Set dst (SubL src1 (ConvI2L src2)));
11289   ins_cost(INSN_COST);
11290   format %{ "sub  $dst, $src1, sxtw $src2" %}
11291 
11292    ins_encode %{
11293      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11294             as_Register($src2$$reg), ext::sxtw);
11295    %}
11296   ins_pipe(ialu_reg_reg);
11297 %};
11298 
11299 
11300 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
11301 %{
11302   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11303   ins_cost(INSN_COST);
11304   format %{ "add  $dst, $src1, sxth $src2" %}
11305 
11306    ins_encode %{
11307      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11308             as_Register($src2$$reg), ext::sxth);
11309    %}
11310   ins_pipe(ialu_reg_reg);
11311 %}
11312 
11313 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11314 %{
11315   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11316   ins_cost(INSN_COST);
11317   format %{ "add  $dst, $src1, sxtb $src2" %}
11318 
11319    ins_encode %{
11320      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11321             as_Register($src2$$reg), ext::sxtb);
11322    %}
11323   ins_pipe(ialu_reg_reg);
11324 %}
11325 
11326 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11327 %{
11328   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
11329   ins_cost(INSN_COST);
11330   format %{ "add  $dst, $src1, uxtb $src2" %}
11331 
11332    ins_encode %{
11333      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11334             as_Register($src2$$reg), ext::uxtb);
11335    %}
11336   ins_pipe(ialu_reg_reg);
11337 %}
11338 
11339 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
11340 %{
11341   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11342   ins_cost(INSN_COST);
11343   format %{ "add  $dst, $src1, sxth $src2" %}
11344 
11345    ins_encode %{
11346      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11347             as_Register($src2$$reg), ext::sxth);
11348    %}
11349   ins_pipe(ialu_reg_reg);
11350 %}
11351 
11352 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
11353 %{
11354   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11355   ins_cost(INSN_COST);
11356   format %{ "add  $dst, $src1, sxtw $src2" %}
11357 
11358    ins_encode %{
11359      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11360             as_Register($src2$$reg), ext::sxtw);
11361    %}
11362   ins_pipe(ialu_reg_reg);
11363 %}
11364 
11365 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11366 %{
11367   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11368   ins_cost(INSN_COST);
11369   format %{ "add  $dst, $src1, sxtb $src2" %}
11370 
11371    ins_encode %{
11372      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11373             as_Register($src2$$reg), ext::sxtb);
11374    %}
11375   ins_pipe(ialu_reg_reg);
11376 %}
11377 
11378 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11379 %{
11380   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
11381   ins_cost(INSN_COST);
11382   format %{ "add  $dst, $src1, uxtb $src2" %}
11383 
11384    ins_encode %{
11385      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11386             as_Register($src2$$reg), ext::uxtb);
11387    %}
11388   ins_pipe(ialu_reg_reg);
11389 %}
11390 
11391 
11392 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
11393 %{
11394   match(Set dst (AddI src1 (AndI src2 mask)));
11395   ins_cost(INSN_COST);
11396   format %{ "addw  $dst, $src1, $src2, uxtb" %}
11397 
11398    ins_encode %{
11399      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11400             as_Register($src2$$reg), ext::uxtb);
11401    %}
11402   ins_pipe(ialu_reg_reg);
11403 %}
11404 
11405 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
11406 %{
11407   match(Set dst (AddI src1 (AndI src2 mask)));
11408   ins_cost(INSN_COST);
11409   format %{ "addw  $dst, $src1, $src2, uxth" %}
11410 
11411    ins_encode %{
11412      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11413             as_Register($src2$$reg), ext::uxth);
11414    %}
11415   ins_pipe(ialu_reg_reg);
11416 %}
11417 
11418 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
11419 %{
11420   match(Set dst (AddL src1 (AndL src2 mask)));
11421   ins_cost(INSN_COST);
11422   format %{ "add  $dst, $src1, $src2, uxtb" %}
11423 
11424    ins_encode %{
11425      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11426             as_Register($src2$$reg), ext::uxtb);
11427    %}
11428   ins_pipe(ialu_reg_reg);
11429 %}
11430 
11431 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
11432 %{
11433   match(Set dst (AddL src1 (AndL src2 mask)));
11434   ins_cost(INSN_COST);
11435   format %{ "add  $dst, $src1, $src2, uxth" %}
11436 
11437    ins_encode %{
11438      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11439             as_Register($src2$$reg), ext::uxth);
11440    %}
11441   ins_pipe(ialu_reg_reg);
11442 %}
11443 
11444 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
11445 %{
11446   match(Set dst (AddL src1 (AndL src2 mask)));
11447   ins_cost(INSN_COST);
11448   format %{ "add  $dst, $src1, $src2, uxtw" %}
11449 
11450    ins_encode %{
11451      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11452             as_Register($src2$$reg), ext::uxtw);
11453    %}
11454   ins_pipe(ialu_reg_reg);
11455 %}
11456 
11457 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
11458 %{
11459   match(Set dst (SubI src1 (AndI src2 mask)));
11460   ins_cost(INSN_COST);
11461   format %{ "subw  $dst, $src1, $src2, uxtb" %}
11462 
11463    ins_encode %{
11464      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
11465             as_Register($src2$$reg), ext::uxtb);
11466    %}
11467   ins_pipe(ialu_reg_reg);
11468 %}
11469 
11470 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
11471 %{
11472   match(Set dst (SubI src1 (AndI src2 mask)));
11473   ins_cost(INSN_COST);
11474   format %{ "subw  $dst, $src1, $src2, uxth" %}
11475 
11476    ins_encode %{
11477      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
11478             as_Register($src2$$reg), ext::uxth);
11479    %}
11480   ins_pipe(ialu_reg_reg);
11481 %}
11482 
11483 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
11484 %{
11485   match(Set dst (SubL src1 (AndL src2 mask)));
11486   ins_cost(INSN_COST);
11487   format %{ "sub  $dst, $src1, $src2, uxtb" %}
11488 
11489    ins_encode %{
11490      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11491             as_Register($src2$$reg), ext::uxtb);
11492    %}
11493   ins_pipe(ialu_reg_reg);
11494 %}
11495 
11496 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
11497 %{
11498   match(Set dst (SubL src1 (AndL src2 mask)));
11499   ins_cost(INSN_COST);
11500   format %{ "sub  $dst, $src1, $src2, uxth" %}
11501 
11502    ins_encode %{
11503      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11504             as_Register($src2$$reg), ext::uxth);
11505    %}
11506   ins_pipe(ialu_reg_reg);
11507 %}
11508 
11509 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
11510 %{
11511   match(Set dst (SubL src1 (AndL src2 mask)));
11512   ins_cost(INSN_COST);
11513   format %{ "sub  $dst, $src1, $src2, uxtw" %}
11514 
11515    ins_encode %{
11516      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11517             as_Register($src2$$reg), ext::uxtw);
11518    %}
11519   ins_pipe(ialu_reg_reg);
11520 %}
11521 
11522 // END This section of the file is automatically generated. Do not edit --------------
11523 
11524 // ============================================================================
11525 // Floating Point Arithmetic Instructions
11526 
11527 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
11528   match(Set dst (AddF src1 src2));
11529 
11530   ins_cost(INSN_COST * 5);
11531   format %{ "fadds   $dst, $src1, $src2" %}
11532 
11533   ins_encode %{
11534     __ fadds(as_FloatRegister($dst$$reg),
11535              as_FloatRegister($src1$$reg),
11536              as_FloatRegister($src2$$reg));
11537   %}
11538 
11539   ins_pipe(pipe_class_default);
11540 %}
11541 
11542 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
11543   match(Set dst (AddD src1 src2));
11544 
11545   ins_cost(INSN_COST * 5);
11546   format %{ "faddd   $dst, $src1, $src2" %}
11547 
11548   ins_encode %{
11549     __ faddd(as_FloatRegister($dst$$reg),
11550              as_FloatRegister($src1$$reg),
11551              as_FloatRegister($src2$$reg));
11552   %}
11553 
11554   ins_pipe(pipe_class_default);
11555 %}
11556 
11557 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
11558   match(Set dst (SubF src1 src2));
11559 
11560   ins_cost(INSN_COST * 5);
11561   format %{ "fsubs   $dst, $src1, $src2" %}
11562 
11563   ins_encode %{
11564     __ fsubs(as_FloatRegister($dst$$reg),
11565              as_FloatRegister($src1$$reg),
11566              as_FloatRegister($src2$$reg));
11567   %}
11568 
11569   ins_pipe(pipe_class_default);
11570 %}
11571 
11572 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
11573   match(Set dst (SubD src1 src2));
11574 
11575   ins_cost(INSN_COST * 5);
11576   format %{ "fsubd   $dst, $src1, $src2" %}
11577 
11578   ins_encode %{
11579     __ fsubd(as_FloatRegister($dst$$reg),
11580              as_FloatRegister($src1$$reg),
11581              as_FloatRegister($src2$$reg));
11582   %}
11583 
11584   ins_pipe(pipe_class_default);
11585 %}
11586 
11587 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
11588   match(Set dst (MulF src1 src2));
11589 
11590   ins_cost(INSN_COST * 6);
11591   format %{ "fmuls   $dst, $src1, $src2" %}
11592 
11593   ins_encode %{
11594     __ fmuls(as_FloatRegister($dst$$reg),
11595              as_FloatRegister($src1$$reg),
11596              as_FloatRegister($src2$$reg));
11597   %}
11598 
11599   ins_pipe(pipe_class_default);
11600 %}
11601 
11602 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
11603   match(Set dst (MulD src1 src2));
11604 
11605   ins_cost(INSN_COST * 6);
11606   format %{ "fmuld   $dst, $src1, $src2" %}
11607 
11608   ins_encode %{
11609     __ fmuld(as_FloatRegister($dst$$reg),
11610              as_FloatRegister($src1$$reg),
11611              as_FloatRegister($src2$$reg));
11612   %}
11613 
11614   ins_pipe(pipe_class_default);
11615 %}
11616 
11617 // We cannot use these fused mul w add/sub ops because they don't
11618 // produce the same result as the equivalent separated ops
11619 // (essentially they don't round the intermediate result). that's a
11620 // shame. leaving them here in case we can idenitfy cases where it is
11621 // legitimate to use them
11622 
11623 
11624 // instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
11625 //   match(Set dst (AddF (MulF src1 src2) src3));
11626 
11627 //   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
11628 
11629 //   ins_encode %{
11630 //     __ fmadds(as_FloatRegister($dst$$reg),
11631 //              as_FloatRegister($src1$$reg),
11632 //              as_FloatRegister($src2$$reg),
11633 //              as_FloatRegister($src3$$reg));
11634 //   %}
11635 
11636 //   ins_pipe(pipe_class_default);
11637 // %}
11638 
11639 // instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
11640 //   match(Set dst (AddD (MulD src1 src2) src3));
11641 
11642 //   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
11643 
11644 //   ins_encode %{
11645 //     __ fmaddd(as_FloatRegister($dst$$reg),
11646 //              as_FloatRegister($src1$$reg),
11647 //              as_FloatRegister($src2$$reg),
11648 //              as_FloatRegister($src3$$reg));
11649 //   %}
11650 
11651 //   ins_pipe(pipe_class_default);
11652 // %}
11653 
11654 // instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
11655 //   match(Set dst (AddF (MulF (NegF src1) src2) src3));
11656 //   match(Set dst (AddF (NegF (MulF src1 src2)) src3));
11657 
11658 //   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
11659 
11660 //   ins_encode %{
11661 //     __ fmsubs(as_FloatRegister($dst$$reg),
11662 //               as_FloatRegister($src1$$reg),
11663 //               as_FloatRegister($src2$$reg),
11664 //              as_FloatRegister($src3$$reg));
11665 //   %}
11666 
11667 //   ins_pipe(pipe_class_default);
11668 // %}
11669 
11670 // instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
11671 //   match(Set dst (AddD (MulD (NegD src1) src2) src3));
11672 //   match(Set dst (AddD (NegD (MulD src1 src2)) src3));
11673 
11674 //   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
11675 
11676 //   ins_encode %{
11677 //     __ fmsubd(as_FloatRegister($dst$$reg),
11678 //               as_FloatRegister($src1$$reg),
11679 //               as_FloatRegister($src2$$reg),
11680 //               as_FloatRegister($src3$$reg));
11681 //   %}
11682 
11683 //   ins_pipe(pipe_class_default);
11684 // %}
11685 
11686 // instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
11687 //   match(Set dst (SubF (MulF (NegF src1) src2) src3));
11688 //   match(Set dst (SubF (NegF (MulF src1 src2)) src3));
11689 
11690 //   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
11691 
11692 //   ins_encode %{
11693 //     __ fnmadds(as_FloatRegister($dst$$reg),
11694 //                as_FloatRegister($src1$$reg),
11695 //                as_FloatRegister($src2$$reg),
11696 //                as_FloatRegister($src3$$reg));
11697 //   %}
11698 
11699 //   ins_pipe(pipe_class_default);
11700 // %}
11701 
11702 // instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
11703 //   match(Set dst (SubD (MulD (NegD src1) src2) src3));
11704 //   match(Set dst (SubD (NegD (MulD src1 src2)) src3));
11705 
11706 //   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
11707 
11708 //   ins_encode %{
11709 //     __ fnmaddd(as_FloatRegister($dst$$reg),
11710 //                as_FloatRegister($src1$$reg),
11711 //                as_FloatRegister($src2$$reg),
11712 //                as_FloatRegister($src3$$reg));
11713 //   %}
11714 
11715 //   ins_pipe(pipe_class_default);
11716 // %}
11717 
11718 // instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
11719 //   match(Set dst (SubF (MulF src1 src2) src3));
11720 
11721 //   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
11722 
11723 //   ins_encode %{
11724 //     __ fnmsubs(as_FloatRegister($dst$$reg),
11725 //                as_FloatRegister($src1$$reg),
11726 //                as_FloatRegister($src2$$reg),
11727 //                as_FloatRegister($src3$$reg));
11728 //   %}
11729 
11730 //   ins_pipe(pipe_class_default);
11731 // %}
11732 
11733 // instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
11734 //   match(Set dst (SubD (MulD src1 src2) src3));
11735 
11736 //   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
11737 
11738 //   ins_encode %{
11739 //   // n.b. insn name should be fnmsubd
11740 //     __ fnmsub(as_FloatRegister($dst$$reg),
11741 //                as_FloatRegister($src1$$reg),
11742 //                as_FloatRegister($src2$$reg),
11743 //                as_FloatRegister($src3$$reg));
11744 //   %}
11745 
11746 //   ins_pipe(pipe_class_default);
11747 // %}
11748 
11749 
11750 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
11751   match(Set dst (DivF src1  src2));
11752 
11753   ins_cost(INSN_COST * 18);
11754   format %{ "fdivs   $dst, $src1, $src2" %}
11755 
11756   ins_encode %{
11757     __ fdivs(as_FloatRegister($dst$$reg),
11758              as_FloatRegister($src1$$reg),
11759              as_FloatRegister($src2$$reg));
11760   %}
11761 
11762   ins_pipe(pipe_class_default);
11763 %}
11764 
11765 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
11766   match(Set dst (DivD src1  src2));
11767 
11768   ins_cost(INSN_COST * 32);
11769   format %{ "fdivd   $dst, $src1, $src2" %}
11770 
11771   ins_encode %{
11772     __ fdivd(as_FloatRegister($dst$$reg),
11773              as_FloatRegister($src1$$reg),
11774              as_FloatRegister($src2$$reg));
11775   %}
11776 
11777   ins_pipe(pipe_class_default);
11778 %}
11779 
11780 instruct negF_reg_reg(vRegF dst, vRegF src) %{
11781   match(Set dst (NegF src));
11782 
11783   ins_cost(INSN_COST * 3);
11784   format %{ "fneg   $dst, $src" %}
11785 
11786   ins_encode %{
11787     __ fnegs(as_FloatRegister($dst$$reg),
11788              as_FloatRegister($src$$reg));
11789   %}
11790 
11791   ins_pipe(pipe_class_default);
11792 %}
11793 
11794 instruct negD_reg_reg(vRegD dst, vRegD src) %{
11795   match(Set dst (NegD src));
11796 
11797   ins_cost(INSN_COST * 3);
11798   format %{ "fnegd   $dst, $src" %}
11799 
11800   ins_encode %{
11801     __ fnegd(as_FloatRegister($dst$$reg),
11802              as_FloatRegister($src$$reg));
11803   %}
11804 
11805   ins_pipe(pipe_class_default);
11806 %}
11807 
11808 instruct absF_reg(vRegF dst, vRegF src) %{
11809   match(Set dst (AbsF src));
11810 
11811   ins_cost(INSN_COST * 3);
11812   format %{ "fabss   $dst, $src" %}
11813   ins_encode %{
11814     __ fabss(as_FloatRegister($dst$$reg),
11815              as_FloatRegister($src$$reg));
11816   %}
11817 
11818   ins_pipe(pipe_class_default);
11819 %}
11820 
11821 instruct absD_reg(vRegD dst, vRegD src) %{
11822   match(Set dst (AbsD src));
11823 
11824   ins_cost(INSN_COST * 3);
11825   format %{ "fabsd   $dst, $src" %}
11826   ins_encode %{
11827     __ fabsd(as_FloatRegister($dst$$reg),
11828              as_FloatRegister($src$$reg));
11829   %}
11830 
11831   ins_pipe(pipe_class_default);
11832 %}
11833 
11834 instruct sqrtD_reg(vRegD dst, vRegD src) %{
11835   match(Set dst (SqrtD src));
11836 
11837   ins_cost(INSN_COST * 50);
11838   format %{ "fsqrtd  $dst, $src" %}
11839   ins_encode %{
11840     __ fsqrtd(as_FloatRegister($dst$$reg),
11841              as_FloatRegister($src$$reg));
11842   %}
11843 
11844   ins_pipe(pipe_class_default);
11845 %}
11846 
11847 instruct sqrtF_reg(vRegF dst, vRegF src) %{
11848   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
11849 
11850   ins_cost(INSN_COST * 50);
11851   format %{ "fsqrts  $dst, $src" %}
11852   ins_encode %{
11853     __ fsqrts(as_FloatRegister($dst$$reg),
11854              as_FloatRegister($src$$reg));
11855   %}
11856 
11857   ins_pipe(pipe_class_default);
11858 %}
11859 
11860 // ============================================================================
11861 // Logical Instructions
11862 
11863 // Integer Logical Instructions
11864 
11865 // And Instructions
11866 
11867 
11868 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
11869   match(Set dst (AndI src1 src2));
11870 
11871   format %{ "andw  $dst, $src1, $src2\t# int" %}
11872 
11873   ins_cost(INSN_COST);
11874   ins_encode %{
11875     __ andw(as_Register($dst$$reg),
11876             as_Register($src1$$reg),
11877             as_Register($src2$$reg));
11878   %}
11879 
11880   ins_pipe(ialu_reg_reg);
11881 %}
11882 
11883 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
11884   match(Set dst (AndI src1 src2));
11885 
11886   format %{ "andsw  $dst, $src1, $src2\t# int" %}
11887 
11888   ins_cost(INSN_COST);
11889   ins_encode %{
11890     __ andw(as_Register($dst$$reg),
11891             as_Register($src1$$reg),
11892             (unsigned long)($src2$$constant));
11893   %}
11894 
11895   ins_pipe(ialu_reg_imm);
11896 %}
11897 
11898 // Or Instructions
11899 
11900 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11901   match(Set dst (OrI src1 src2));
11902 
11903   format %{ "orrw  $dst, $src1, $src2\t# int" %}
11904 
11905   ins_cost(INSN_COST);
11906   ins_encode %{
11907     __ orrw(as_Register($dst$$reg),
11908             as_Register($src1$$reg),
11909             as_Register($src2$$reg));
11910   %}
11911 
11912   ins_pipe(ialu_reg_reg);
11913 %}
11914 
11915 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
11916   match(Set dst (OrI src1 src2));
11917 
11918   format %{ "orrw  $dst, $src1, $src2\t# int" %}
11919 
11920   ins_cost(INSN_COST);
11921   ins_encode %{
11922     __ orrw(as_Register($dst$$reg),
11923             as_Register($src1$$reg),
11924             (unsigned long)($src2$$constant));
11925   %}
11926 
11927   ins_pipe(ialu_reg_imm);
11928 %}
11929 
11930 // Xor Instructions
11931 
11932 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11933   match(Set dst (XorI src1 src2));
11934 
11935   format %{ "eorw  $dst, $src1, $src2\t# int" %}
11936 
11937   ins_cost(INSN_COST);
11938   ins_encode %{
11939     __ eorw(as_Register($dst$$reg),
11940             as_Register($src1$$reg),
11941             as_Register($src2$$reg));
11942   %}
11943 
11944   ins_pipe(ialu_reg_reg);
11945 %}
11946 
11947 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
11948   match(Set dst (XorI src1 src2));
11949 
11950   format %{ "eorw  $dst, $src1, $src2\t# int" %}
11951 
11952   ins_cost(INSN_COST);
11953   ins_encode %{
11954     __ eorw(as_Register($dst$$reg),
11955             as_Register($src1$$reg),
11956             (unsigned long)($src2$$constant));
11957   %}
11958 
11959   ins_pipe(ialu_reg_imm);
11960 %}
11961 
11962 // Long Logical Instructions
11963 // TODO
11964 
11965 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
11966   match(Set dst (AndL src1 src2));
11967 
11968   format %{ "and  $dst, $src1, $src2\t# int" %}
11969 
11970   ins_cost(INSN_COST);
11971   ins_encode %{
11972     __ andr(as_Register($dst$$reg),
11973             as_Register($src1$$reg),
11974             as_Register($src2$$reg));
11975   %}
11976 
11977   ins_pipe(ialu_reg_reg);
11978 %}
11979 
11980 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
11981   match(Set dst (AndL src1 src2));
11982 
11983   format %{ "and  $dst, $src1, $src2\t# int" %}
11984 
11985   ins_cost(INSN_COST);
11986   ins_encode %{
11987     __ andr(as_Register($dst$$reg),
11988             as_Register($src1$$reg),
11989             (unsigned long)($src2$$constant));
11990   %}
11991 
11992   ins_pipe(ialu_reg_imm);
11993 %}
11994 
11995 // Or Instructions
11996 
11997 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11998   match(Set dst (OrL src1 src2));
11999 
12000   format %{ "orr  $dst, $src1, $src2\t# int" %}
12001 
12002   ins_cost(INSN_COST);
12003   ins_encode %{
12004     __ orr(as_Register($dst$$reg),
12005            as_Register($src1$$reg),
12006            as_Register($src2$$reg));
12007   %}
12008 
12009   ins_pipe(ialu_reg_reg);
12010 %}
12011 
12012 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12013   match(Set dst (OrL src1 src2));
12014 
12015   format %{ "orr  $dst, $src1, $src2\t# int" %}
12016 
12017   ins_cost(INSN_COST);
12018   ins_encode %{
12019     __ orr(as_Register($dst$$reg),
12020            as_Register($src1$$reg),
12021            (unsigned long)($src2$$constant));
12022   %}
12023 
12024   ins_pipe(ialu_reg_imm);
12025 %}
12026 
12027 // Xor Instructions
12028 
12029 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12030   match(Set dst (XorL src1 src2));
12031 
12032   format %{ "eor  $dst, $src1, $src2\t# int" %}
12033 
12034   ins_cost(INSN_COST);
12035   ins_encode %{
12036     __ eor(as_Register($dst$$reg),
12037            as_Register($src1$$reg),
12038            as_Register($src2$$reg));
12039   %}
12040 
12041   ins_pipe(ialu_reg_reg);
12042 %}
12043 
12044 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12045   match(Set dst (XorL src1 src2));
12046 
12047   ins_cost(INSN_COST);
12048   format %{ "eor  $dst, $src1, $src2\t# int" %}
12049 
12050   ins_encode %{
12051     __ eor(as_Register($dst$$reg),
12052            as_Register($src1$$reg),
12053            (unsigned long)($src2$$constant));
12054   %}
12055 
12056   ins_pipe(ialu_reg_imm);
12057 %}
12058 
12059 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
12060 %{
12061   match(Set dst (ConvI2L src));
12062 
12063   ins_cost(INSN_COST);
12064   format %{ "sxtw  $dst, $src\t# i2l" %}
12065   ins_encode %{
12066     __ sbfm($dst$$Register, $src$$Register, 0, 31);
12067   %}
12068   ins_pipe(ialu_reg_shift);
12069 %}
12070 
12071 // this pattern occurs in bigmath arithmetic
12072 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
12073 %{
12074   match(Set dst (AndL (ConvI2L src) mask));
12075 
12076   ins_cost(INSN_COST);
12077   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
12078   ins_encode %{
12079     __ ubfm($dst$$Register, $src$$Register, 0, 31);
12080   %}
12081 
12082   ins_pipe(ialu_reg_shift);
12083 %}
12084 
12085 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
12086   match(Set dst (ConvL2I src));
12087 
12088   ins_cost(INSN_COST);
12089   format %{ "movw  $dst, $src \t// l2i" %}
12090 
12091   ins_encode %{
12092     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
12093   %}
12094 
12095   ins_pipe(ialu_reg);
12096 %}
12097 
12098 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
12099 %{
12100   match(Set dst (Conv2B src));
12101   effect(KILL cr);
12102 
12103   format %{
12104     "cmpw $src, zr\n\t"
12105     "cset $dst, ne"
12106   %}
12107 
12108   ins_encode %{
12109     __ cmpw(as_Register($src$$reg), zr);
12110     __ cset(as_Register($dst$$reg), Assembler::NE);
12111   %}
12112 
12113   ins_pipe(ialu_reg);
12114 %}
12115 
12116 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
12117 %{
12118   match(Set dst (Conv2B src));
12119   effect(KILL cr);
12120 
12121   format %{
12122     "cmp  $src, zr\n\t"
12123     "cset $dst, ne"
12124   %}
12125 
12126   ins_encode %{
12127     __ cmp(as_Register($src$$reg), zr);
12128     __ cset(as_Register($dst$$reg), Assembler::NE);
12129   %}
12130 
12131   ins_pipe(ialu_reg);
12132 %}
12133 
12134 instruct convD2F_reg(vRegF dst, vRegD src) %{
12135   match(Set dst (ConvD2F src));
12136 
12137   ins_cost(INSN_COST * 5);
12138   format %{ "fcvtd  $dst, $src \t// d2f" %}
12139 
12140   ins_encode %{
12141     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
12142   %}
12143 
12144   ins_pipe(pipe_class_default);
12145 %}
12146 
12147 instruct convF2D_reg(vRegD dst, vRegF src) %{
12148   match(Set dst (ConvF2D src));
12149 
12150   ins_cost(INSN_COST * 5);
12151   format %{ "fcvts  $dst, $src \t// f2d" %}
12152 
12153   ins_encode %{
12154     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
12155   %}
12156 
12157   ins_pipe(pipe_class_default);
12158 %}
12159 
12160 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
12161   match(Set dst (ConvF2I src));
12162 
12163   ins_cost(INSN_COST * 5);
12164   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
12165 
12166   ins_encode %{
12167     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12168   %}
12169 
12170   ins_pipe(pipe_class_default);
12171 %}
12172 
12173 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
12174   match(Set dst (ConvF2L src));
12175 
12176   ins_cost(INSN_COST * 5);
12177   format %{ "fcvtzs  $dst, $src \t// f2l" %}
12178 
12179   ins_encode %{
12180     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12181   %}
12182 
12183   ins_pipe(pipe_class_default);
12184 %}
12185 
12186 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
12187   match(Set dst (ConvI2F src));
12188 
12189   ins_cost(INSN_COST * 5);
12190   format %{ "scvtfws  $dst, $src \t// i2f" %}
12191 
12192   ins_encode %{
12193     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12194   %}
12195 
12196   ins_pipe(pipe_class_default);
12197 %}
12198 
12199 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
12200   match(Set dst (ConvL2F src));
12201 
12202   ins_cost(INSN_COST * 5);
12203   format %{ "scvtfs  $dst, $src \t// l2f" %}
12204 
12205   ins_encode %{
12206     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12207   %}
12208 
12209   ins_pipe(pipe_class_default);
12210 %}
12211 
12212 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
12213   match(Set dst (ConvD2I src));
12214 
12215   ins_cost(INSN_COST * 5);
12216   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
12217 
12218   ins_encode %{
12219     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12220   %}
12221 
12222   ins_pipe(pipe_class_default);
12223 %}
12224 
12225 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
12226   match(Set dst (ConvD2L src));
12227 
12228   ins_cost(INSN_COST * 5);
12229   format %{ "fcvtzd  $dst, $src \t// d2l" %}
12230 
12231   ins_encode %{
12232     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12233   %}
12234 
12235   ins_pipe(pipe_class_default);
12236 %}
12237 
12238 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
12239   match(Set dst (ConvI2D src));
12240 
12241   ins_cost(INSN_COST * 5);
12242   format %{ "scvtfwd  $dst, $src \t// i2d" %}
12243 
12244   ins_encode %{
12245     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12246   %}
12247 
12248   ins_pipe(pipe_class_default);
12249 %}
12250 
12251 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
12252   match(Set dst (ConvL2D src));
12253 
12254   ins_cost(INSN_COST * 5);
12255   format %{ "scvtfd  $dst, $src \t// l2d" %}
12256 
12257   ins_encode %{
12258     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12259   %}
12260 
12261   ins_pipe(pipe_class_default);
12262 %}
12263 
12264 // stack <-> reg and reg <-> reg shuffles with no conversion
12265 
12266 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
12267 
12268   match(Set dst (MoveF2I src));
12269 
12270   effect(DEF dst, USE src);
12271 
12272   ins_cost(4 * INSN_COST);
12273 
12274   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
12275 
12276   ins_encode %{
12277     __ ldrw($dst$$Register, Address(sp, $src$$disp));
12278   %}
12279 
12280   ins_pipe(iload_reg_reg);
12281 
12282 %}
12283 
12284 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
12285 
12286   match(Set dst (MoveI2F src));
12287 
12288   effect(DEF dst, USE src);
12289 
12290   ins_cost(4 * INSN_COST);
12291 
12292   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
12293 
12294   ins_encode %{
12295     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
12296   %}
12297 
12298   ins_pipe(pipe_class_memory);
12299 
12300 %}
12301 
12302 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
12303 
12304   match(Set dst (MoveD2L src));
12305 
12306   effect(DEF dst, USE src);
12307 
12308   ins_cost(4 * INSN_COST);
12309 
12310   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
12311 
12312   ins_encode %{
12313     __ ldr($dst$$Register, Address(sp, $src$$disp));
12314   %}
12315 
12316   ins_pipe(iload_reg_reg);
12317 
12318 %}
12319 
12320 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
12321 
12322   match(Set dst (MoveL2D src));
12323 
12324   effect(DEF dst, USE src);
12325 
12326   ins_cost(4 * INSN_COST);
12327 
12328   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
12329 
12330   ins_encode %{
12331     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
12332   %}
12333 
12334   ins_pipe(pipe_class_memory);
12335 
12336 %}
12337 
12338 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
12339 
12340   match(Set dst (MoveF2I src));
12341 
12342   effect(DEF dst, USE src);
12343 
12344   ins_cost(INSN_COST);
12345 
12346   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
12347 
12348   ins_encode %{
12349     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
12350   %}
12351 
12352   ins_pipe(pipe_class_memory);
12353 
12354 %}
12355 
12356 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
12357 
12358   match(Set dst (MoveI2F src));
12359 
12360   effect(DEF dst, USE src);
12361 
12362   ins_cost(INSN_COST);
12363 
12364   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
12365 
12366   ins_encode %{
12367     __ strw($src$$Register, Address(sp, $dst$$disp));
12368   %}
12369 
12370   ins_pipe(istore_reg_reg);
12371 
12372 %}
12373 
12374 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
12375 
12376   match(Set dst (MoveD2L src));
12377 
12378   effect(DEF dst, USE src);
12379 
12380   ins_cost(INSN_COST);
12381 
12382   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
12383 
12384   ins_encode %{
12385     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
12386   %}
12387 
12388   ins_pipe(pipe_class_memory);
12389 
12390 %}
12391 
12392 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
12393 
12394   match(Set dst (MoveL2D src));
12395 
12396   effect(DEF dst, USE src);
12397 
12398   ins_cost(INSN_COST);
12399 
12400   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
12401 
12402   ins_encode %{
12403     __ str($src$$Register, Address(sp, $dst$$disp));
12404   %}
12405 
12406   ins_pipe(istore_reg_reg);
12407 
12408 %}
12409 
12410 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
12411 
12412   match(Set dst (MoveF2I src));
12413 
12414   effect(DEF dst, USE src);
12415 
12416   ins_cost(INSN_COST);
12417 
12418   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
12419 
12420   ins_encode %{
12421     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
12422   %}
12423 
12424   ins_pipe(pipe_class_memory);
12425 
12426 %}
12427 
12428 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
12429 
12430   match(Set dst (MoveI2F src));
12431 
12432   effect(DEF dst, USE src);
12433 
12434   ins_cost(INSN_COST);
12435 
12436   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
12437 
12438   ins_encode %{
12439     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
12440   %}
12441 
12442   ins_pipe(pipe_class_memory);
12443 
12444 %}
12445 
12446 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
12447 
12448   match(Set dst (MoveD2L src));
12449 
12450   effect(DEF dst, USE src);
12451 
12452   ins_cost(INSN_COST);
12453 
12454   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
12455 
12456   ins_encode %{
12457     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
12458   %}
12459 
12460   ins_pipe(pipe_class_memory);
12461 
12462 %}
12463 
12464 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
12465 
12466   match(Set dst (MoveL2D src));
12467 
12468   effect(DEF dst, USE src);
12469 
12470   ins_cost(INSN_COST);
12471 
12472   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
12473 
12474   ins_encode %{
12475     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
12476   %}
12477 
12478   ins_pipe(pipe_class_memory);
12479 
12480 %}
12481 
12482 // ============================================================================
12483 // clearing of an array
12484 
12485 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
12486 %{
12487   match(Set dummy (ClearArray cnt base));
12488   effect(USE_KILL cnt, USE_KILL base);
12489 
12490   ins_cost(4 * INSN_COST);
12491   format %{ "ClearArray $cnt, $base" %}
12492 
12493   ins_encode(aarch64_enc_clear_array_reg_reg(cnt, base));
12494 
12495   ins_pipe(pipe_class_memory);
12496 %}
12497 
12498 // ============================================================================
12499 // Overflow Math Instructions
12500 
12501 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
12502 %{
12503   match(Set cr (OverflowAddI op1 op2));
12504 
12505   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
12506   ins_cost(INSN_COST);
12507   ins_encode %{
12508     __ cmnw($op1$$Register, $op2$$Register);
12509   %}
12510 
12511   ins_pipe(icmp_reg_reg);
12512 %}
12513 
12514 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
12515 %{
12516   match(Set cr (OverflowAddI op1 op2));
12517 
12518   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
12519   ins_cost(INSN_COST);
12520   ins_encode %{
12521     __ cmnw($op1$$Register, $op2$$constant);
12522   %}
12523 
12524   ins_pipe(icmp_reg_imm);
12525 %}
12526 
12527 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
12528 %{
12529   match(Set cr (OverflowAddL op1 op2));
12530 
12531   format %{ "cmn   $op1, $op2\t# overflow check long" %}
12532   ins_cost(INSN_COST);
12533   ins_encode %{
12534     __ cmn($op1$$Register, $op2$$Register);
12535   %}
12536 
12537   ins_pipe(icmp_reg_reg);
12538 %}
12539 
12540 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
12541 %{
12542   match(Set cr (OverflowAddL op1 op2));
12543 
12544   format %{ "cmn   $op1, $op2\t# overflow check long" %}
12545   ins_cost(INSN_COST);
12546   ins_encode %{
12547     __ cmn($op1$$Register, $op2$$constant);
12548   %}
12549 
12550   ins_pipe(icmp_reg_imm);
12551 %}
12552 
12553 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
12554 %{
12555   match(Set cr (OverflowSubI op1 op2));
12556 
12557   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
12558   ins_cost(INSN_COST);
12559   ins_encode %{
12560     __ cmpw($op1$$Register, $op2$$Register);
12561   %}
12562 
12563   ins_pipe(icmp_reg_reg);
12564 %}
12565 
12566 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
12567 %{
12568   match(Set cr (OverflowSubI op1 op2));
12569 
12570   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
12571   ins_cost(INSN_COST);
12572   ins_encode %{
12573     __ cmpw($op1$$Register, $op2$$constant);
12574   %}
12575 
12576   ins_pipe(icmp_reg_imm);
12577 %}
12578 
12579 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
12580 %{
12581   match(Set cr (OverflowSubL op1 op2));
12582 
12583   format %{ "cmp   $op1, $op2\t# overflow check long" %}
12584   ins_cost(INSN_COST);
12585   ins_encode %{
12586     __ cmp($op1$$Register, $op2$$Register);
12587   %}
12588 
12589   ins_pipe(icmp_reg_reg);
12590 %}
12591 
12592 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
12593 %{
12594   match(Set cr (OverflowSubL op1 op2));
12595 
12596   format %{ "cmp   $op1, $op2\t# overflow check long" %}
12597   ins_cost(INSN_COST);
12598   ins_encode %{
12599     __ cmp($op1$$Register, $op2$$constant);
12600   %}
12601 
12602   ins_pipe(icmp_reg_imm);
12603 %}
12604 
12605 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
12606 %{
12607   match(Set cr (OverflowSubI zero op1));
12608 
12609   format %{ "cmpw  zr, $op1\t# overflow check int" %}
12610   ins_cost(INSN_COST);
12611   ins_encode %{
12612     __ cmpw(zr, $op1$$Register);
12613   %}
12614 
12615   ins_pipe(icmp_reg_imm);
12616 %}
12617 
12618 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
12619 %{
12620   match(Set cr (OverflowSubL zero op1));
12621 
12622   format %{ "cmp   zr, $op1\t# overflow check long" %}
12623   ins_cost(INSN_COST);
12624   ins_encode %{
12625     __ cmp(zr, $op1$$Register);
12626   %}
12627 
12628   ins_pipe(icmp_reg_imm);
12629 %}
12630 
12631 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
12632 %{
12633   match(Set cr (OverflowMulI op1 op2));
12634 
12635   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
12636             "cmp   rscratch1, rscratch1, sxtw\n\t"
12637             "movw  rscratch1, #0x80000000\n\t"
12638             "cselw rscratch1, rscratch1, zr, NE\n\t"
12639             "cmpw  rscratch1, #1" %}
12640   ins_cost(5 * INSN_COST);
12641   ins_encode %{
12642     __ smull(rscratch1, $op1$$Register, $op2$$Register);
12643     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
12644     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
12645     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
12646     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
12647   %}
12648 
12649   ins_pipe(pipe_slow);
12650 %}
12651 
12652 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
12653 %{
12654   match(If cmp (OverflowMulI op1 op2));
12655   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
12656             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
12657   effect(USE labl, KILL cr);
12658 
12659   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
12660             "cmp   rscratch1, rscratch1, sxtw\n\t"
12661             "b$cmp   $labl" %}
12662   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
12663   ins_encode %{
12664     Label* L = $labl$$label;
12665     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
12666     __ smull(rscratch1, $op1$$Register, $op2$$Register);
12667     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
12668     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
12669   %}
12670 
12671   ins_pipe(pipe_serial);
12672 %}
12673 
12674 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
12675 %{
12676   match(Set cr (OverflowMulL op1 op2));
12677 
12678   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
12679             "smulh rscratch2, $op1, $op2\n\t"
12680             "cmp   rscratch2, rscratch1, ASR #31\n\t"
12681             "movw  rscratch1, #0x80000000\n\t"
12682             "cselw rscratch1, rscratch1, zr, NE\n\t"
12683             "cmpw  rscratch1, #1" %}
12684   ins_cost(6 * INSN_COST);
12685   ins_encode %{
12686     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
12687     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
12688     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
12689     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
12690     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
12691     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
12692   %}
12693 
12694   ins_pipe(pipe_slow);
12695 %}
12696 
12697 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
12698 %{
12699   match(If cmp (OverflowMulL op1 op2));
12700   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
12701             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
12702   effect(USE labl, KILL cr);
12703 
12704   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
12705             "smulh rscratch2, $op1, $op2\n\t"
12706             "cmp   rscratch2, rscratch1, ASR #31\n\t"
12707             "b$cmp $labl" %}
12708   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
12709   ins_encode %{
12710     Label* L = $labl$$label;
12711     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
12712     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
12713     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
12714     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
12715     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
12716   %}
12717 
12718   ins_pipe(pipe_serial);
12719 %}
12720 
12721 // ============================================================================
12722 // Compare Instructions
12723 
12724 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
12725 %{
12726   match(Set cr (CmpI op1 op2));
12727 
12728   effect(DEF cr, USE op1, USE op2);
12729 
12730   ins_cost(INSN_COST);
12731   format %{ "cmpw  $op1, $op2" %}
12732 
12733   ins_encode(aarch64_enc_cmpw(op1, op2));
12734 
12735   ins_pipe(icmp_reg_reg);
12736 %}
12737 
12738 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
12739 %{
12740   match(Set cr (CmpI op1 zero));
12741 
12742   effect(DEF cr, USE op1);
12743 
12744   ins_cost(INSN_COST);
12745   format %{ "cmpw $op1, 0" %}
12746 
12747   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
12748 
12749   ins_pipe(icmp_reg_imm);
12750 %}
12751 
12752 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
12753 %{
12754   match(Set cr (CmpI op1 op2));
12755 
12756   effect(DEF cr, USE op1);
12757 
12758   ins_cost(INSN_COST);
12759   format %{ "cmpw  $op1, $op2" %}
12760 
12761   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
12762 
12763   ins_pipe(icmp_reg_imm);
12764 %}
12765 
12766 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
12767 %{
12768   match(Set cr (CmpI op1 op2));
12769 
12770   effect(DEF cr, USE op1);
12771 
12772   ins_cost(INSN_COST * 2);
12773   format %{ "cmpw  $op1, $op2" %}
12774 
12775   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
12776 
12777   ins_pipe(icmp_reg_imm);
12778 %}
12779 
12780 // Unsigned compare Instructions; really, same as signed compare
12781 // except it should only be used to feed an If or a CMovI which takes a
12782 // cmpOpU.
12783 
12784 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
12785 %{
12786   match(Set cr (CmpU op1 op2));
12787 
12788   effect(DEF cr, USE op1, USE op2);
12789 
12790   ins_cost(INSN_COST);
12791   format %{ "cmpw  $op1, $op2\t# unsigned" %}
12792 
12793   ins_encode(aarch64_enc_cmpw(op1, op2));
12794 
12795   ins_pipe(icmp_reg_reg);
12796 %}
12797 
12798 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
12799 %{
12800   match(Set cr (CmpU op1 zero));
12801 
12802   effect(DEF cr, USE op1);
12803 
12804   ins_cost(INSN_COST);
12805   format %{ "cmpw $op1, #0\t# unsigned" %}
12806 
12807   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
12808 
12809   ins_pipe(icmp_reg_imm);
12810 %}
12811 
12812 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
12813 %{
12814   match(Set cr (CmpU op1 op2));
12815 
12816   effect(DEF cr, USE op1);
12817 
12818   ins_cost(INSN_COST);
12819   format %{ "cmpw  $op1, $op2\t# unsigned" %}
12820 
12821   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
12822 
12823   ins_pipe(icmp_reg_imm);
12824 %}
12825 
12826 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
12827 %{
12828   match(Set cr (CmpU op1 op2));
12829 
12830   effect(DEF cr, USE op1);
12831 
12832   ins_cost(INSN_COST * 2);
12833   format %{ "cmpw  $op1, $op2\t# unsigned" %}
12834 
12835   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
12836 
12837   ins_pipe(icmp_reg_imm);
12838 %}
12839 
12840 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
12841 %{
12842   match(Set cr (CmpL op1 op2));
12843 
12844   effect(DEF cr, USE op1, USE op2);
12845 
12846   ins_cost(INSN_COST);
12847   format %{ "cmp  $op1, $op2" %}
12848 
12849   ins_encode(aarch64_enc_cmp(op1, op2));
12850 
12851   ins_pipe(icmp_reg_reg);
12852 %}
12853 
12854 instruct compL_reg_immI0(rFlagsReg cr, iRegL op1, immI0 zero)
12855 %{
12856   match(Set cr (CmpL op1 zero));
12857 
12858   effect(DEF cr, USE op1);
12859 
12860   ins_cost(INSN_COST);
12861   format %{ "tst  $op1" %}
12862 
12863   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
12864 
12865   ins_pipe(icmp_reg_imm);
12866 %}
12867 
12868 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
12869 %{
12870   match(Set cr (CmpL op1 op2));
12871 
12872   effect(DEF cr, USE op1);
12873 
12874   ins_cost(INSN_COST);
12875   format %{ "cmp  $op1, $op2" %}
12876 
12877   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
12878 
12879   ins_pipe(icmp_reg_imm);
12880 %}
12881 
12882 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
12883 %{
12884   match(Set cr (CmpL op1 op2));
12885 
12886   effect(DEF cr, USE op1);
12887 
12888   ins_cost(INSN_COST * 2);
12889   format %{ "cmp  $op1, $op2" %}
12890 
12891   ins_encode(aarch64_enc_cmp_imm(op1, op2));
12892 
12893   ins_pipe(icmp_reg_imm);
12894 %}
12895 
12896 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
12897 %{
12898   match(Set cr (CmpP op1 op2));
12899 
12900   effect(DEF cr, USE op1, USE op2);
12901 
12902   ins_cost(INSN_COST);
12903   format %{ "cmp  $op1, $op2\t // ptr" %}
12904 
12905   ins_encode(aarch64_enc_cmpp(op1, op2));
12906 
12907   ins_pipe(icmp_reg_reg);
12908 %}
12909 
12910 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
12911 %{
12912   match(Set cr (CmpN op1 op2));
12913 
12914   effect(DEF cr, USE op1, USE op2);
12915 
12916   ins_cost(INSN_COST);
12917   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
12918 
12919   ins_encode(aarch64_enc_cmpn(op1, op2));
12920 
12921   ins_pipe(icmp_reg_reg);
12922 %}
12923 
12924 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
12925 %{
12926   match(Set cr (CmpP op1 zero));
12927 
12928   effect(DEF cr, USE op1, USE zero);
12929 
12930   ins_cost(INSN_COST);
12931   format %{ "cmp  $op1, 0\t // ptr" %}
12932 
12933   ins_encode(aarch64_enc_testp(op1));
12934 
12935   ins_pipe(icmp_reg_imm);
12936 %}
12937 
12938 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
12939 %{
12940   match(Set cr (CmpN op1 zero));
12941 
12942   effect(DEF cr, USE op1, USE zero);
12943 
12944   ins_cost(INSN_COST);
12945   format %{ "cmp  $op1, 0\t // compressed ptr" %}
12946 
12947   ins_encode(aarch64_enc_testn(op1));
12948 
12949   ins_pipe(icmp_reg_imm);
12950 %}
12951 
12952 // FP comparisons
12953 //
12954 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
12955 // using normal cmpOp. See declaration of rFlagsReg for details.
12956 
12957 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
12958 %{
12959   match(Set cr (CmpF src1 src2));
12960 
12961   ins_cost(3 * INSN_COST);
12962   format %{ "fcmps $src1, $src2" %}
12963 
12964   ins_encode %{
12965     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
12966   %}
12967 
12968   ins_pipe(pipe_class_compare);
12969 %}
12970 
12971 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
12972 %{
12973   match(Set cr (CmpF src1 src2));
12974 
12975   ins_cost(3 * INSN_COST);
12976   format %{ "fcmps $src1, 0.0" %}
12977 
12978   ins_encode %{
12979     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
12980   %}
12981 
12982   ins_pipe(pipe_class_compare);
12983 %}
12984 // FROM HERE
12985 
12986 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
12987 %{
12988   match(Set cr (CmpD src1 src2));
12989 
12990   ins_cost(3 * INSN_COST);
12991   format %{ "fcmpd $src1, $src2" %}
12992 
12993   ins_encode %{
12994     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
12995   %}
12996 
12997   ins_pipe(pipe_class_compare);
12998 %}
12999 
13000 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
13001 %{
13002   match(Set cr (CmpD src1 src2));
13003 
13004   ins_cost(3 * INSN_COST);
13005   format %{ "fcmpd $src1, 0.0" %}
13006 
13007   ins_encode %{
13008     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
13009   %}
13010 
13011   ins_pipe(pipe_class_compare);
13012 %}
13013 
13014 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
13015 %{
13016   match(Set dst (CmpF3 src1 src2));
13017   effect(KILL cr);
13018 
13019   ins_cost(5 * INSN_COST);
13020   format %{ "fcmps $src1, $src2\n\t"
13021             "csinvw($dst, zr, zr, eq\n\t"
13022             "csnegw($dst, $dst, $dst, lt)"
13023   %}
13024 
13025   ins_encode %{
13026     Label done;
13027     FloatRegister s1 = as_FloatRegister($src1$$reg);
13028     FloatRegister s2 = as_FloatRegister($src2$$reg);
13029     Register d = as_Register($dst$$reg);
13030     __ fcmps(s1, s2);
13031     // installs 0 if EQ else -1
13032     __ csinvw(d, zr, zr, Assembler::EQ);
13033     // keeps -1 if less or unordered else installs 1
13034     __ csnegw(d, d, d, Assembler::LT);
13035     __ bind(done);
13036   %}
13037 
13038   ins_pipe(pipe_class_default);
13039 
13040 %}
13041 
13042 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
13043 %{
13044   match(Set dst (CmpD3 src1 src2));
13045   effect(KILL cr);
13046 
13047   ins_cost(5 * INSN_COST);
13048   format %{ "fcmpd $src1, $src2\n\t"
13049             "csinvw($dst, zr, zr, eq\n\t"
13050             "csnegw($dst, $dst, $dst, lt)"
13051   %}
13052 
13053   ins_encode %{
13054     Label done;
13055     FloatRegister s1 = as_FloatRegister($src1$$reg);
13056     FloatRegister s2 = as_FloatRegister($src2$$reg);
13057     Register d = as_Register($dst$$reg);
13058     __ fcmpd(s1, s2);
13059     // installs 0 if EQ else -1
13060     __ csinvw(d, zr, zr, Assembler::EQ);
13061     // keeps -1 if less or unordered else installs 1
13062     __ csnegw(d, d, d, Assembler::LT);
13063     __ bind(done);
13064   %}
13065   ins_pipe(pipe_class_default);
13066 
13067 %}
13068 
13069 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
13070 %{
13071   match(Set dst (CmpF3 src1 zero));
13072   effect(KILL cr);
13073 
13074   ins_cost(5 * INSN_COST);
13075   format %{ "fcmps $src1, 0.0\n\t"
13076             "csinvw($dst, zr, zr, eq\n\t"
13077             "csnegw($dst, $dst, $dst, lt)"
13078   %}
13079 
13080   ins_encode %{
13081     Label done;
13082     FloatRegister s1 = as_FloatRegister($src1$$reg);
13083     Register d = as_Register($dst$$reg);
13084     __ fcmps(s1, 0.0D);
13085     // installs 0 if EQ else -1
13086     __ csinvw(d, zr, zr, Assembler::EQ);
13087     // keeps -1 if less or unordered else installs 1
13088     __ csnegw(d, d, d, Assembler::LT);
13089     __ bind(done);
13090   %}
13091 
13092   ins_pipe(pipe_class_default);
13093 
13094 %}
13095 
13096 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
13097 %{
13098   match(Set dst (CmpD3 src1 zero));
13099   effect(KILL cr);
13100 
13101   ins_cost(5 * INSN_COST);
13102   format %{ "fcmpd $src1, 0.0\n\t"
13103             "csinvw($dst, zr, zr, eq\n\t"
13104             "csnegw($dst, $dst, $dst, lt)"
13105   %}
13106 
13107   ins_encode %{
13108     Label done;
13109     FloatRegister s1 = as_FloatRegister($src1$$reg);
13110     Register d = as_Register($dst$$reg);
13111     __ fcmpd(s1, 0.0D);
13112     // installs 0 if EQ else -1
13113     __ csinvw(d, zr, zr, Assembler::EQ);
13114     // keeps -1 if less or unordered else installs 1
13115     __ csnegw(d, d, d, Assembler::LT);
13116     __ bind(done);
13117   %}
13118   ins_pipe(pipe_class_default);
13119 
13120 %}
13121 
13122 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
13123 %{
13124   match(Set dst (CmpLTMask p q));
13125   effect(KILL cr);
13126 
13127   ins_cost(3 * INSN_COST);
13128 
13129   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
13130             "csetw $dst, lt\n\t"
13131             "subw $dst, zr, $dst"
13132   %}
13133 
13134   ins_encode %{
13135     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
13136     __ csetw(as_Register($dst$$reg), Assembler::LT);
13137     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
13138   %}
13139 
13140   ins_pipe(ialu_reg_reg);
13141 %}
13142 
13143 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
13144 %{
13145   match(Set dst (CmpLTMask src zero));
13146   effect(KILL cr);
13147 
13148   ins_cost(INSN_COST);
13149 
13150   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
13151 
13152   ins_encode %{
13153     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
13154   %}
13155 
13156   ins_pipe(ialu_reg_shift);
13157 %}
13158 
13159 // ============================================================================
13160 // Max and Min
13161 
13162 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13163 %{
13164   match(Set dst (MinI src1 src2));
13165 
13166   effect(DEF dst, USE src1, USE src2, KILL cr);
13167   size(8);
13168 
13169   ins_cost(INSN_COST * 3);
13170   format %{
13171     "cmpw $src1 $src2\t signed int\n\t"
13172     "cselw $dst, $src1, $src2 lt\t"
13173   %}
13174 
13175   ins_encode %{
13176     __ cmpw(as_Register($src1$$reg),
13177             as_Register($src2$$reg));
13178     __ cselw(as_Register($dst$$reg),
13179              as_Register($src1$$reg),
13180              as_Register($src2$$reg),
13181              Assembler::LT);
13182   %}
13183 
13184   ins_pipe(ialu_reg_reg);
13185 %}
13186 // FROM HERE
13187 
13188 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13189 %{
13190   match(Set dst (MaxI src1 src2));
13191 
13192   effect(DEF dst, USE src1, USE src2, KILL cr);
13193   size(8);
13194 
13195   ins_cost(INSN_COST * 3);
13196   format %{
13197     "cmpw $src1 $src2\t signed int\n\t"
13198     "cselw $dst, $src1, $src2 gt\t"
13199   %}
13200 
13201   ins_encode %{
13202     __ cmpw(as_Register($src1$$reg),
13203             as_Register($src2$$reg));
13204     __ cselw(as_Register($dst$$reg),
13205              as_Register($src1$$reg),
13206              as_Register($src2$$reg),
13207              Assembler::GT);
13208   %}
13209 
13210   ins_pipe(ialu_reg_reg);
13211 %}
13212 
13213 // ============================================================================
13214 // Branch Instructions
13215 
13216 // Direct Branch.
13217 instruct branch(label lbl)
13218 %{
13219   match(Goto);
13220 
13221   effect(USE lbl);
13222 
13223   ins_cost(BRANCH_COST);
13224   format %{ "b  $lbl" %}
13225 
13226   ins_encode(aarch64_enc_b(lbl));
13227 
13228   ins_pipe(pipe_branch);
13229 %}
13230 
13231 // Conditional Near Branch
13232 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
13233 %{
13234   // Same match rule as `branchConFar'.
13235   match(If cmp cr);
13236 
13237   effect(USE lbl);
13238 
13239   ins_cost(BRANCH_COST);
13240   // If set to 1 this indicates that the current instruction is a
13241   // short variant of a long branch. This avoids using this
13242   // instruction in first-pass matching. It will then only be used in
13243   // the `Shorten_branches' pass.
13244   // ins_short_branch(1);
13245   format %{ "b$cmp  $lbl" %}
13246 
13247   ins_encode(aarch64_enc_br_con(cmp, lbl));
13248 
13249   ins_pipe(pipe_branch_cond);
13250 %}
13251 
13252 // Conditional Near Branch Unsigned
13253 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
13254 %{
13255   // Same match rule as `branchConFar'.
13256   match(If cmp cr);
13257 
13258   effect(USE lbl);
13259 
13260   ins_cost(BRANCH_COST);
13261   // If set to 1 this indicates that the current instruction is a
13262   // short variant of a long branch. This avoids using this
13263   // instruction in first-pass matching. It will then only be used in
13264   // the `Shorten_branches' pass.
13265   // ins_short_branch(1);
13266   format %{ "b$cmp  $lbl\t# unsigned" %}
13267 
13268   ins_encode(aarch64_enc_br_conU(cmp, lbl));
13269 
13270   ins_pipe(pipe_branch_cond);
13271 %}
13272 
13273 // Make use of CBZ and CBNZ.  These instructions, as well as being
13274 // shorter than (cmp; branch), have the additional benefit of not
13275 // killing the flags.
13276 
13277 instruct cmpI_imm0_branch(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
13278   match(If cmp (CmpI op1 op2));
13279   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
13280             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
13281   effect(USE labl);
13282 
13283   ins_cost(BRANCH_COST);
13284   format %{ "cbw$cmp   $op1, $labl" %}
13285   ins_encode %{
13286     Label* L = $labl$$label;
13287     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13288     if (cond == Assembler::EQ)
13289       __ cbzw($op1$$Register, *L);
13290     else
13291       __ cbnzw($op1$$Register, *L);
13292   %}
13293   ins_pipe(pipe_cmp_branch);
13294 %}
13295 
13296 instruct cmpL_imm0_branch(cmpOp cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
13297   match(If cmp (CmpL op1 op2));
13298   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
13299             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
13300   effect(USE labl);
13301 
13302   ins_cost(BRANCH_COST);
13303   format %{ "cb$cmp   $op1, $labl" %}
13304   ins_encode %{
13305     Label* L = $labl$$label;
13306     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13307     if (cond == Assembler::EQ)
13308       __ cbz($op1$$Register, *L);
13309     else
13310       __ cbnz($op1$$Register, *L);
13311   %}
13312   ins_pipe(pipe_cmp_branch);
13313 %}
13314 
13315 instruct cmpP_imm0_branch(cmpOp cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
13316   match(If cmp (CmpP op1 op2));
13317   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
13318             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
13319   effect(USE labl);
13320 
13321   ins_cost(BRANCH_COST);
13322   format %{ "cb$cmp   $op1, $labl" %}
13323   ins_encode %{
13324     Label* L = $labl$$label;
13325     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13326     if (cond == Assembler::EQ)
13327       __ cbz($op1$$Register, *L);
13328     else
13329       __ cbnz($op1$$Register, *L);
13330   %}
13331   ins_pipe(pipe_cmp_branch);
13332 %}
13333 
13334 // Conditional Far Branch
13335 // Conditional Far Branch Unsigned
13336 // TODO: fixme
13337 
13338 // counted loop end branch near
13339 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
13340 %{
13341   match(CountedLoopEnd cmp cr);
13342 
13343   effect(USE lbl);
13344 
13345   ins_cost(BRANCH_COST);
13346   // short variant.
13347   // ins_short_branch(1);
13348   format %{ "b$cmp $lbl \t// counted loop end" %}
13349 
13350   ins_encode(aarch64_enc_br_con(cmp, lbl));
13351 
13352   ins_pipe(pipe_branch);
13353 %}
13354 
13355 // counted loop end branch near Unsigned
13356 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
13357 %{
13358   match(CountedLoopEnd cmp cr);
13359 
13360   effect(USE lbl);
13361 
13362   ins_cost(BRANCH_COST);
13363   // short variant.
13364   // ins_short_branch(1);
13365   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
13366 
13367   ins_encode(aarch64_enc_br_conU(cmp, lbl));
13368 
13369   ins_pipe(pipe_branch);
13370 %}
13371 
13372 // counted loop end branch far
13373 // counted loop end branch far unsigned
13374 // TODO: fixme
13375 
13376 // ============================================================================
13377 // inlined locking and unlocking
13378 
13379 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
13380 %{
13381   match(Set cr (FastLock object box));
13382   effect(TEMP tmp, TEMP tmp2);
13383 
13384   // TODO
13385   // identify correct cost
13386   ins_cost(5 * INSN_COST);
13387   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
13388 
13389   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
13390 
13391   ins_pipe(pipe_serial);
13392 %}
13393 
13394 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
13395 %{
13396   match(Set cr (FastUnlock object box));
13397   effect(TEMP tmp, TEMP tmp2);
13398 
13399   ins_cost(5 * INSN_COST);
13400   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
13401 
13402   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
13403 
13404   ins_pipe(pipe_serial);
13405 %}
13406 
13407 
13408 // ============================================================================
13409 // Safepoint Instructions
13410 
13411 // TODO
13412 // provide a near and far version of this code
13413 
13414 instruct safePoint(iRegP poll)
13415 %{
13416   match(SafePoint poll);
13417 
13418   format %{
13419     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
13420   %}
13421   ins_encode %{
13422     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
13423   %}
13424   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
13425 %}
13426 
13427 
13428 // ============================================================================
13429 // Procedure Call/Return Instructions
13430 
13431 // Call Java Static Instruction
13432 
13433 instruct CallStaticJavaDirect(method meth)
13434 %{
13435   match(CallStaticJava);
13436 
13437   effect(USE meth);
13438 
13439   ins_cost(CALL_COST);
13440 
13441   format %{ "call,static $meth \t// ==> " %}
13442 
13443   ins_encode( aarch64_enc_java_static_call(meth),
13444               aarch64_enc_call_epilog );
13445 
13446   ins_pipe(pipe_class_call);
13447 %}
13448 
13449 // TO HERE
13450 
13451 // Call Java Dynamic Instruction
13452 instruct CallDynamicJavaDirect(method meth)
13453 %{
13454   match(CallDynamicJava);
13455 
13456   effect(USE meth);
13457 
13458   ins_cost(CALL_COST);
13459 
13460   format %{ "CALL,dynamic $meth \t// ==> " %}
13461 
13462   ins_encode( aarch64_enc_java_dynamic_call(meth),
13463                aarch64_enc_call_epilog );
13464 
13465   ins_pipe(pipe_class_call);
13466 %}
13467 
13468 // Call Runtime Instruction
13469 
13470 instruct CallRuntimeDirect(method meth)
13471 %{
13472   match(CallRuntime);
13473 
13474   effect(USE meth);
13475 
13476   ins_cost(CALL_COST);
13477 
13478   format %{ "CALL, runtime $meth" %}
13479 
13480   ins_encode( aarch64_enc_java_to_runtime(meth) );
13481 
13482   ins_pipe(pipe_class_call);
13483 %}
13484 
13485 // Call Runtime Instruction
13486 
13487 instruct CallLeafDirect(method meth)
13488 %{
13489   match(CallLeaf);
13490 
13491   effect(USE meth);
13492 
13493   ins_cost(CALL_COST);
13494 
13495   format %{ "CALL, runtime leaf $meth" %}
13496 
13497   ins_encode( aarch64_enc_java_to_runtime(meth) );
13498 
13499   ins_pipe(pipe_class_call);
13500 %}
13501 
13502 // Call Runtime Instruction
13503 
13504 instruct CallLeafNoFPDirect(method meth)
13505 %{
13506   match(CallLeafNoFP);
13507 
13508   effect(USE meth);
13509 
13510   ins_cost(CALL_COST);
13511 
13512   format %{ "CALL, runtime leaf nofp $meth" %}
13513 
13514   ins_encode( aarch64_enc_java_to_runtime(meth) );
13515 
13516   ins_pipe(pipe_class_call);
13517 %}
13518 
13519 // Tail Call; Jump from runtime stub to Java code.
13520 // Also known as an 'interprocedural jump'.
13521 // Target of jump will eventually return to caller.
13522 // TailJump below removes the return address.
13523 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
13524 %{
13525   match(TailCall jump_target method_oop);
13526 
13527   ins_cost(CALL_COST);
13528 
13529   format %{ "br $jump_target\t# $method_oop holds method oop" %}
13530 
13531   ins_encode(aarch64_enc_tail_call(jump_target));
13532 
13533   ins_pipe(pipe_class_call);
13534 %}
13535 
13536 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
13537 %{
13538   match(TailJump jump_target ex_oop);
13539 
13540   ins_cost(CALL_COST);
13541 
13542   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
13543 
13544   ins_encode(aarch64_enc_tail_jmp(jump_target));
13545 
13546   ins_pipe(pipe_class_call);
13547 %}
13548 
13549 // Create exception oop: created by stack-crawling runtime code.
13550 // Created exception is now available to this handler, and is setup
13551 // just prior to jumping to this handler. No code emitted.
13552 // TODO check
13553 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
13554 instruct CreateException(iRegP_R0 ex_oop)
13555 %{
13556   match(Set ex_oop (CreateEx));
13557 
13558   format %{ " -- \t// exception oop; no code emitted" %}
13559 
13560   size(0);
13561 
13562   ins_encode( /*empty*/ );
13563 
13564   ins_pipe(pipe_class_empty);
13565 %}
13566 
13567 // Rethrow exception: The exception oop will come in the first
13568 // argument position. Then JUMP (not call) to the rethrow stub code.
13569 instruct RethrowException() %{
13570   match(Rethrow);
13571   ins_cost(CALL_COST);
13572 
13573   format %{ "b rethrow_stub" %}
13574 
13575   ins_encode( aarch64_enc_rethrow() );
13576 
13577   ins_pipe(pipe_class_call);
13578 %}
13579 
13580 
13581 // Return Instruction
13582 // epilog node loads ret address into lr as part of frame pop
13583 instruct Ret()
13584 %{
13585   match(Return);
13586 
13587   format %{ "ret\t// return register" %}
13588 
13589   ins_encode( aarch64_enc_ret() );
13590 
13591   ins_pipe(pipe_branch);
13592 %}
13593 
13594 // Die now.
13595 instruct ShouldNotReachHere() %{
13596   match(Halt);
13597 
13598   ins_cost(CALL_COST);
13599   format %{ "ShouldNotReachHere" %}
13600 
13601   ins_encode %{
13602     // TODO
13603     // implement proper trap call here
13604     __ brk(999);
13605   %}
13606 
13607   ins_pipe(pipe_class_default);
13608 %}
13609 
13610 // ============================================================================
13611 // Partial Subtype Check
13612 //
13613 // superklass array for an instance of the superklass.  Set a hidden
13614 // internal cache on a hit (cache is checked with exposed code in
13615 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
13616 // encoding ALSO sets flags.
13617 
13618 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
13619 %{
13620   match(Set result (PartialSubtypeCheck sub super));
13621   effect(KILL cr, KILL temp);
13622 
13623   ins_cost(1100);  // slightly larger than the next version
13624   format %{ "partialSubtypeCheck $result, $sub, $super" %}
13625 
13626   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
13627 
13628   opcode(0x1); // Force zero of result reg on hit
13629 
13630   ins_pipe(pipe_class_memory);
13631 %}
13632 
13633 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
13634 %{
13635   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
13636   effect(KILL temp, KILL result);
13637 
13638   ins_cost(1100);  // slightly larger than the next version
13639   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
13640 
13641   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
13642 
13643   opcode(0x0); // Don't zero result reg on hit
13644 
13645   ins_pipe(pipe_class_memory);
13646 %}
13647 
13648 instruct string_compare(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
13649                         iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
13650 %{
13651   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
13652   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
13653 
13654   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
13655   ins_encode %{
13656     __ string_compare($str1$$Register, $str2$$Register,
13657                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
13658                       $tmp1$$Register);
13659   %}
13660   ins_pipe(pipe_class_memory);
13661 %}
13662 
13663 instruct string_indexof(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
13664        iRegI_R0 result, iRegI tmp1, iRegI tmp2, iRegI tmp3, iRegI tmp4, rFlagsReg cr)
13665 %{
13666   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
13667   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
13668          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
13669   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result" %}
13670 
13671   ins_encode %{
13672     __ string_indexof($str1$$Register, $str2$$Register,
13673                       $cnt1$$Register, $cnt2$$Register,
13674                       $tmp1$$Register, $tmp2$$Register,
13675                       $tmp3$$Register, $tmp4$$Register,
13676                       -1, $result$$Register);
13677   %}
13678   ins_pipe(pipe_class_memory);
13679 %}
13680 
13681 instruct string_indexof_con(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
13682                  immI_le_4 int_cnt2, iRegI_R0 result, iRegI tmp1, iRegI tmp2,
13683                  iRegI tmp3, iRegI tmp4, rFlagsReg cr)
13684 %{
13685   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
13686   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
13687          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
13688   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result" %}
13689 
13690   ins_encode %{
13691     int icnt2 = (int)$int_cnt2$$constant;
13692     __ string_indexof($str1$$Register, $str2$$Register,
13693                       $cnt1$$Register, zr,
13694                       $tmp1$$Register, $tmp2$$Register,
13695                       $tmp3$$Register, $tmp4$$Register,
13696                       icnt2, $result$$Register);
13697   %}
13698   ins_pipe(pipe_class_memory);
13699 %}
13700 
13701 instruct string_equals(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
13702                         iRegI_R0 result, iRegP_R10 tmp, rFlagsReg cr)
13703 %{
13704   match(Set result (StrEquals (Binary str1 str2) cnt));
13705   effect(KILL tmp, USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
13706 
13707   format %{ "String Equals $str1,$str2,$cnt -> $result    // KILL $tmp" %}
13708   ins_encode %{
13709     __ string_equals($str1$$Register, $str2$$Register,
13710                       $cnt$$Register, $result$$Register,
13711                       $tmp$$Register);
13712   %}
13713   ins_pipe(pipe_class_memory);
13714 %}
13715 
13716 instruct array_equals(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
13717                       iRegP_R10 tmp, rFlagsReg cr)
13718 %{
13719   match(Set result (AryEq ary1 ary2));
13720   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
13721 
13722   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
13723   ins_encode %{
13724     __ char_arrays_equals($ary1$$Register, $ary2$$Register,
13725                           $result$$Register, $tmp$$Register);
13726   %}
13727   ins_pipe(pipe_class_memory);
13728 %}
13729 
13730 // encode char[] to byte[] in ISO_8859_1
13731 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
13732                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
13733                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
13734                           iRegI_R0 result, rFlagsReg cr)
13735 %{
13736   match(Set result (EncodeISOArray src (Binary dst len)));
13737   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
13738          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
13739 
13740   format %{ "Encode array $src,$dst,$len -> $result" %}
13741   ins_encode %{
13742     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
13743          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
13744          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
13745   %}
13746   ins_pipe( pipe_class_memory );
13747 %}
13748 
13749 // ============================================================================
13750 // This name is KNOWN by the ADLC and cannot be changed.
13751 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
13752 // for this guy.
13753 instruct tlsLoadP(thread_RegP dst)
13754 %{
13755   match(Set dst (ThreadLocal));
13756 
13757   ins_cost(0);
13758 
13759   format %{ " -- \t// $dst=Thread::current(), empty" %}
13760 
13761   size(0);
13762 
13763   ins_encode( /*empty*/ );
13764 
13765   ins_pipe(pipe_class_empty);
13766 %}
13767 
13768 // ====================VECTOR INSTRUCTIONS=====================================
13769 
13770 // Load vector (32 bits)
13771 instruct loadV4(vecD dst, vmem mem)
13772 %{
13773   predicate(n->as_LoadVector()->memory_size() == 4);
13774   match(Set dst (LoadVector mem));
13775   ins_cost(4 * INSN_COST);
13776   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
13777   ins_encode( aarch64_enc_ldrvS(dst, mem) );
13778   ins_pipe(pipe_class_memory);
13779 %}
13780 
13781 // Load vector (64 bits)
13782 instruct loadV8(vecD dst, vmem mem)
13783 %{
13784   predicate(n->as_LoadVector()->memory_size() == 8);
13785   match(Set dst (LoadVector mem));
13786   ins_cost(4 * INSN_COST);
13787   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
13788   ins_encode( aarch64_enc_ldrvD(dst, mem) );
13789   ins_pipe(pipe_class_memory);
13790 %}
13791 
13792 // Load Vector (128 bits)
13793 instruct loadV16(vecX dst, vmem mem)
13794 %{
13795   predicate(n->as_LoadVector()->memory_size() == 16);
13796   match(Set dst (LoadVector mem));
13797   ins_cost(4 * INSN_COST);
13798   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
13799   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
13800   ins_pipe(pipe_class_memory);
13801 %}
13802 
13803 // Store Vector (32 bits)
13804 instruct storeV4(vecD src, vmem mem)
13805 %{
13806   predicate(n->as_StoreVector()->memory_size() == 4);
13807   match(Set mem (StoreVector mem src));
13808   ins_cost(4 * INSN_COST);
13809   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
13810   ins_encode( aarch64_enc_strvS(src, mem) );
13811   ins_pipe(pipe_class_memory);
13812 %}
13813 
13814 // Store Vector (64 bits)
13815 instruct storeV8(vecD src, vmem mem)
13816 %{
13817   predicate(n->as_StoreVector()->memory_size() == 8);
13818   match(Set mem (StoreVector mem src));
13819   ins_cost(4 * INSN_COST);
13820   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
13821   ins_encode( aarch64_enc_strvD(src, mem) );
13822   ins_pipe(pipe_class_memory);
13823 %}
13824 
13825 // Store Vector (128 bits)
13826 instruct storeV16(vecX src, vmem mem)
13827 %{
13828   predicate(n->as_StoreVector()->memory_size() == 16);
13829   match(Set mem (StoreVector mem src));
13830   ins_cost(4 * INSN_COST);
13831   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
13832   ins_encode( aarch64_enc_strvQ(src, mem) );
13833   ins_pipe(pipe_class_memory);
13834 %}
13835 
13836 instruct replicate8B(vecD dst, iRegIorL2I src)
13837 %{
13838   predicate(n->as_Vector()->length() == 4 ||
13839             n->as_Vector()->length() == 8);
13840   match(Set dst (ReplicateB src));
13841   ins_cost(INSN_COST);
13842   format %{ "dup  $dst, $src\t# vector (8B)" %}
13843   ins_encode %{
13844     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
13845   %}
13846   ins_pipe(pipe_class_default);
13847 %}
13848 
13849 instruct replicate16B(vecX dst, iRegIorL2I src)
13850 %{
13851   predicate(n->as_Vector()->length() == 16);
13852   match(Set dst (ReplicateB src));
13853   ins_cost(INSN_COST);
13854   format %{ "dup  $dst, $src\t# vector (16B)" %}
13855   ins_encode %{
13856     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
13857   %}
13858   ins_pipe(pipe_class_default);
13859 %}
13860 
13861 instruct replicate8B_imm(vecD dst, immI con)
13862 %{
13863   predicate(n->as_Vector()->length() == 4 ||
13864             n->as_Vector()->length() == 8);
13865   match(Set dst (ReplicateB con));
13866   ins_cost(INSN_COST);
13867   format %{ "movi  $dst, $con\t# vector(8B)" %}
13868   ins_encode %{
13869     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
13870   %}
13871   ins_pipe(pipe_class_default);
13872 %}
13873 
13874 instruct replicate16B_imm(vecX dst, immI con)
13875 %{
13876   predicate(n->as_Vector()->length() == 16);
13877   match(Set dst (ReplicateB con));
13878   ins_cost(INSN_COST);
13879   format %{ "movi  $dst, $con\t# vector(16B)" %}
13880   ins_encode %{
13881     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
13882   %}
13883   ins_pipe(pipe_class_default);
13884 %}
13885 
13886 instruct replicate4S(vecD dst, iRegIorL2I src)
13887 %{
13888   predicate(n->as_Vector()->length() == 2 ||
13889             n->as_Vector()->length() == 4);
13890   match(Set dst (ReplicateS src));
13891   ins_cost(INSN_COST);
13892   format %{ "dup  $dst, $src\t# vector (4S)" %}
13893   ins_encode %{
13894     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
13895   %}
13896   ins_pipe(pipe_class_default);
13897 %}
13898 
13899 instruct replicate8S(vecX dst, iRegIorL2I src)
13900 %{
13901   predicate(n->as_Vector()->length() == 8);
13902   match(Set dst (ReplicateS src));
13903   ins_cost(INSN_COST);
13904   format %{ "dup  $dst, $src\t# vector (8S)" %}
13905   ins_encode %{
13906     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
13907   %}
13908   ins_pipe(pipe_class_default);
13909 %}
13910 
13911 instruct replicate4S_imm(vecD dst, immI con)
13912 %{
13913   predicate(n->as_Vector()->length() == 2 ||
13914             n->as_Vector()->length() == 4);
13915   match(Set dst (ReplicateS con));
13916   ins_cost(INSN_COST);
13917   format %{ "movi  $dst, $con\t# vector(4H)" %}
13918   ins_encode %{
13919     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
13920   %}
13921   ins_pipe(pipe_class_default);
13922 %}
13923 
13924 instruct replicate8S_imm(vecX dst, immI con)
13925 %{
13926   predicate(n->as_Vector()->length() == 8);
13927   match(Set dst (ReplicateS con));
13928   ins_cost(INSN_COST);
13929   format %{ "movi  $dst, $con\t# vector(8H)" %}
13930   ins_encode %{
13931     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
13932   %}
13933   ins_pipe(pipe_class_default);
13934 %}
13935 
13936 instruct replicate2I(vecD dst, iRegIorL2I src)
13937 %{
13938   predicate(n->as_Vector()->length() == 2);
13939   match(Set dst (ReplicateI src));
13940   ins_cost(INSN_COST);
13941   format %{ "dup  $dst, $src\t# vector (2I)" %}
13942   ins_encode %{
13943     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
13944   %}
13945   ins_pipe(pipe_class_default);
13946 %}
13947 
13948 instruct replicate4I(vecX dst, iRegIorL2I src)
13949 %{
13950   predicate(n->as_Vector()->length() == 4);
13951   match(Set dst (ReplicateI src));
13952   ins_cost(INSN_COST);
13953   format %{ "dup  $dst, $src\t# vector (4I)" %}
13954   ins_encode %{
13955     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
13956   %}
13957   ins_pipe(pipe_class_default);
13958 %}
13959 
13960 instruct replicate2I_imm(vecD dst, immI con)
13961 %{
13962   predicate(n->as_Vector()->length() == 2);
13963   match(Set dst (ReplicateI con));
13964   ins_cost(INSN_COST);
13965   format %{ "movi  $dst, $con\t# vector(2I)" %}
13966   ins_encode %{
13967     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
13968   %}
13969   ins_pipe(pipe_class_default);
13970 %}
13971 
13972 instruct replicate4I_imm(vecX dst, immI con)
13973 %{
13974   predicate(n->as_Vector()->length() == 4);
13975   match(Set dst (ReplicateI con));
13976   ins_cost(INSN_COST);
13977   format %{ "movi  $dst, $con\t# vector(4I)" %}
13978   ins_encode %{
13979     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
13980   %}
13981   ins_pipe(pipe_class_default);
13982 %}
13983 
13984 instruct replicate2L(vecX dst, iRegL src)
13985 %{
13986   predicate(n->as_Vector()->length() == 2);
13987   match(Set dst (ReplicateL src));
13988   ins_cost(INSN_COST);
13989   format %{ "dup  $dst, $src\t# vector (2L)" %}
13990   ins_encode %{
13991     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
13992   %}
13993   ins_pipe(pipe_class_default);
13994 %}
13995 
13996 instruct replicate2L_zero(vecX dst, immI0 zero)
13997 %{
13998   predicate(n->as_Vector()->length() == 2);
13999   match(Set dst (ReplicateI zero));
14000   ins_cost(INSN_COST);
14001   format %{ "movi  $dst, $zero\t# vector(4I)" %}
14002   ins_encode %{
14003     __ eor(as_FloatRegister($dst$$reg), __ T16B,
14004            as_FloatRegister($dst$$reg),
14005            as_FloatRegister($dst$$reg));
14006   %}
14007   ins_pipe(pipe_class_default);
14008 %}
14009 
14010 instruct replicate2F(vecD dst, vRegF src)
14011 %{
14012   predicate(n->as_Vector()->length() == 2);
14013   match(Set dst (ReplicateF src));
14014   ins_cost(INSN_COST);
14015   format %{ "dup  $dst, $src\t# vector (2F)" %}
14016   ins_encode %{
14017     __ dup(as_FloatRegister($dst$$reg), __ T2S,
14018            as_FloatRegister($src$$reg));
14019   %}
14020   ins_pipe(pipe_class_default);
14021 %}
14022 
14023 instruct replicate4F(vecX dst, vRegF src)
14024 %{
14025   predicate(n->as_Vector()->length() == 4);
14026   match(Set dst (ReplicateF src));
14027   ins_cost(INSN_COST);
14028   format %{ "dup  $dst, $src\t# vector (4F)" %}
14029   ins_encode %{
14030     __ dup(as_FloatRegister($dst$$reg), __ T4S,
14031            as_FloatRegister($src$$reg));
14032   %}
14033   ins_pipe(pipe_class_default);
14034 %}
14035 
14036 instruct replicate2D(vecX dst, vRegD src)
14037 %{
14038   predicate(n->as_Vector()->length() == 2);
14039   match(Set dst (ReplicateD src));
14040   ins_cost(INSN_COST);
14041   format %{ "dup  $dst, $src\t# vector (2D)" %}
14042   ins_encode %{
14043     __ dup(as_FloatRegister($dst$$reg), __ T2D,
14044            as_FloatRegister($src$$reg));
14045   %}
14046   ins_pipe(pipe_class_default);
14047 %}
14048 
14049 // ====================REDUCTION ARITHMETIC====================================
14050 
14051 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp, iRegI tmp2)
14052 %{
14053   match(Set dst (AddReductionVI src1 src2));
14054   ins_cost(INSN_COST);
14055   effect(TEMP tmp, TEMP tmp2);
14056   format %{ "umov  $tmp, $src2, S, 0\n\t"
14057             "umov  $tmp2, $src2, S, 1\n\t"
14058             "addw  $dst, $src1, $tmp\n\t"
14059             "addw  $dst, $dst, $tmp2\t add reduction2i"
14060   %}
14061   ins_encode %{
14062     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
14063     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
14064     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
14065     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
14066   %}
14067   ins_pipe(pipe_class_default);
14068 %}
14069 
14070 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
14071 %{
14072   match(Set dst (AddReductionVI src1 src2));
14073   ins_cost(INSN_COST);
14074   effect(TEMP tmp, TEMP tmp2);
14075   format %{ "addv  $tmp, T4S, $src2\n\t"
14076             "umov  $tmp2, $tmp, S, 0\n\t"
14077             "addw  $dst, $tmp2, $src1\t add reduction4i"
14078   %}
14079   ins_encode %{
14080     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
14081             as_FloatRegister($src2$$reg));
14082     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
14083     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
14084   %}
14085   ins_pipe(pipe_class_default);
14086 %}
14087 
14088 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp)
14089 %{
14090   match(Set dst (MulReductionVI src1 src2));
14091   ins_cost(INSN_COST);
14092   effect(TEMP tmp, TEMP dst);
14093   format %{ "umov  $tmp, $src2, S, 0\n\t"
14094             "mul   $dst, $tmp, $src1\n\t"
14095             "umov  $tmp, $src2, S, 1\n\t"
14096             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
14097   %}
14098   ins_encode %{
14099     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
14100     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
14101     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
14102     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
14103   %}
14104   ins_pipe(pipe_class_default);
14105 %}
14106 
14107 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
14108 %{
14109   match(Set dst (MulReductionVI src1 src2));
14110   ins_cost(INSN_COST);
14111   effect(TEMP tmp, TEMP tmp2, TEMP dst);
14112   format %{ "ins   $tmp, $src2, 0, 1\n\t"
14113             "mul   $tmp, $tmp, $src2\n\t"
14114             "umov  $tmp2, $tmp, S, 0\n\t"
14115             "mul   $dst, $tmp2, $src1\n\t"
14116             "umov  $tmp2, $tmp, S, 1\n\t"
14117             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
14118   %}
14119   ins_encode %{
14120     __ ins(as_FloatRegister($tmp$$reg), __ D,
14121            as_FloatRegister($src2$$reg), 0, 1);
14122     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
14123            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
14124     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
14125     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
14126     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
14127     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
14128   %}
14129   ins_pipe(pipe_class_default);
14130 %}
14131 
14132 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
14133 %{
14134   match(Set dst (AddReductionVF src1 src2));
14135   ins_cost(INSN_COST);
14136   effect(TEMP tmp, TEMP dst);
14137   format %{ "fadds $dst, $src1, $src2\n\t"
14138             "ins   $tmp, S, $src2, 0, 1\n\t"
14139             "fadds $dst, $dst, $tmp\t add reduction2f"
14140   %}
14141   ins_encode %{
14142     __ fadds(as_FloatRegister($dst$$reg),
14143              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14144     __ ins(as_FloatRegister($tmp$$reg), __ S,
14145            as_FloatRegister($src2$$reg), 0, 1);
14146     __ fadds(as_FloatRegister($dst$$reg),
14147              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14148   %}
14149   ins_pipe(pipe_class_default);
14150 %}
14151 
14152 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
14153 %{
14154   match(Set dst (AddReductionVF src1 src2));
14155   ins_cost(INSN_COST);
14156   effect(TEMP tmp, TEMP dst);
14157   format %{ "fadds $dst, $src1, $src2\n\t"
14158             "ins   $tmp, S, $src2, 0, 1\n\t"
14159             "fadds $dst, $dst, $tmp\n\t"
14160             "ins   $tmp, S, $src2, 0, 2\n\t"
14161             "fadds $dst, $dst, $tmp\n\t"
14162             "ins   $tmp, S, $src2, 0, 3\n\t"
14163             "fadds $dst, $dst, $tmp\t add reduction4f"
14164   %}
14165   ins_encode %{
14166     __ fadds(as_FloatRegister($dst$$reg),
14167              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14168     __ ins(as_FloatRegister($tmp$$reg), __ S,
14169            as_FloatRegister($src2$$reg), 0, 1);
14170     __ fadds(as_FloatRegister($dst$$reg),
14171              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14172     __ ins(as_FloatRegister($tmp$$reg), __ S,
14173            as_FloatRegister($src2$$reg), 0, 2);
14174     __ fadds(as_FloatRegister($dst$$reg),
14175              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14176     __ ins(as_FloatRegister($tmp$$reg), __ S,
14177            as_FloatRegister($src2$$reg), 0, 3);
14178     __ fadds(as_FloatRegister($dst$$reg),
14179              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14180   %}
14181   ins_pipe(pipe_class_default);
14182 %}
14183 
14184 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
14185 %{
14186   match(Set dst (MulReductionVF src1 src2));
14187   ins_cost(INSN_COST);
14188   effect(TEMP tmp, TEMP dst);
14189   format %{ "fmuls $dst, $src1, $src2\n\t"
14190             "ins   $tmp, S, $src2, 0, 1\n\t"
14191             "fmuls $dst, $dst, $tmp\t add reduction4f"
14192   %}
14193   ins_encode %{
14194     __ fmuls(as_FloatRegister($dst$$reg),
14195              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14196     __ ins(as_FloatRegister($tmp$$reg), __ S,
14197            as_FloatRegister($src2$$reg), 0, 1);
14198     __ fmuls(as_FloatRegister($dst$$reg),
14199              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14200   %}
14201   ins_pipe(pipe_class_default);
14202 %}
14203 
14204 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
14205 %{
14206   match(Set dst (MulReductionVF src1 src2));
14207   ins_cost(INSN_COST);
14208   effect(TEMP tmp, TEMP dst);
14209   format %{ "fmuls $dst, $src1, $src2\n\t"
14210             "ins   $tmp, S, $src2, 0, 1\n\t"
14211             "fmuls $dst, $dst, $tmp\n\t"
14212             "ins   $tmp, S, $src2, 0, 2\n\t"
14213             "fmuls $dst, $dst, $tmp\n\t"
14214             "ins   $tmp, S, $src2, 0, 3\n\t"
14215             "fmuls $dst, $dst, $tmp\t add reduction4f"
14216   %}
14217   ins_encode %{
14218     __ fmuls(as_FloatRegister($dst$$reg),
14219              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14220     __ ins(as_FloatRegister($tmp$$reg), __ S,
14221            as_FloatRegister($src2$$reg), 0, 1);
14222     __ fmuls(as_FloatRegister($dst$$reg),
14223              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14224     __ ins(as_FloatRegister($tmp$$reg), __ S,
14225            as_FloatRegister($src2$$reg), 0, 2);
14226     __ fmuls(as_FloatRegister($dst$$reg),
14227              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14228     __ ins(as_FloatRegister($tmp$$reg), __ S,
14229            as_FloatRegister($src2$$reg), 0, 3);
14230     __ fmuls(as_FloatRegister($dst$$reg),
14231              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14232   %}
14233   ins_pipe(pipe_class_default);
14234 %}
14235 
14236 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
14237 %{
14238   match(Set dst (AddReductionVD src1 src2));
14239   ins_cost(INSN_COST);
14240   effect(TEMP tmp, TEMP dst);
14241   format %{ "faddd $dst, $src1, $src2\n\t"
14242             "ins   $tmp, D, $src2, 0, 1\n\t"
14243             "faddd $dst, $dst, $tmp\t add reduction2d"
14244   %}
14245   ins_encode %{
14246     __ faddd(as_FloatRegister($dst$$reg),
14247              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14248     __ ins(as_FloatRegister($tmp$$reg), __ D,
14249            as_FloatRegister($src2$$reg), 0, 1);
14250     __ faddd(as_FloatRegister($dst$$reg),
14251              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14252   %}
14253   ins_pipe(pipe_class_default);
14254 %}
14255 
14256 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
14257 %{
14258   match(Set dst (MulReductionVD src1 src2));
14259   ins_cost(INSN_COST);
14260   effect(TEMP tmp, TEMP dst);
14261   format %{ "fmuld $dst, $src1, $src2\n\t"
14262             "ins   $tmp, D, $src2, 0, 1\n\t"
14263             "fmuld $dst, $dst, $tmp\t add reduction2d"
14264   %}
14265   ins_encode %{
14266     __ fmuld(as_FloatRegister($dst$$reg),
14267              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14268     __ ins(as_FloatRegister($tmp$$reg), __ D,
14269            as_FloatRegister($src2$$reg), 0, 1);
14270     __ fmuld(as_FloatRegister($dst$$reg),
14271              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14272   %}
14273   ins_pipe(pipe_class_default);
14274 %}
14275 
14276 // ====================VECTOR ARITHMETIC=======================================
14277 
14278 // --------------------------------- ADD --------------------------------------
14279 
14280 instruct vadd8B(vecD dst, vecD src1, vecD src2)
14281 %{
14282   predicate(n->as_Vector()->length() == 4 ||
14283             n->as_Vector()->length() == 8);
14284   match(Set dst (AddVB src1 src2));
14285   ins_cost(INSN_COST);
14286   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
14287   ins_encode %{
14288     __ addv(as_FloatRegister($dst$$reg), __ T8B,
14289             as_FloatRegister($src1$$reg),
14290             as_FloatRegister($src2$$reg));
14291   %}
14292   ins_pipe(pipe_class_default);
14293 %}
14294 
14295 instruct vadd16B(vecX dst, vecX src1, vecX src2)
14296 %{
14297   predicate(n->as_Vector()->length() == 16);
14298   match(Set dst (AddVB src1 src2));
14299   ins_cost(INSN_COST);
14300   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
14301   ins_encode %{
14302     __ addv(as_FloatRegister($dst$$reg), __ T16B,
14303             as_FloatRegister($src1$$reg),
14304             as_FloatRegister($src2$$reg));
14305   %}
14306   ins_pipe(pipe_class_default);
14307 %}
14308 
14309 instruct vadd4S(vecD dst, vecD src1, vecD src2)
14310 %{
14311   predicate(n->as_Vector()->length() == 2 ||
14312             n->as_Vector()->length() == 4);
14313   match(Set dst (AddVS src1 src2));
14314   ins_cost(INSN_COST);
14315   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
14316   ins_encode %{
14317     __ addv(as_FloatRegister($dst$$reg), __ T4H,
14318             as_FloatRegister($src1$$reg),
14319             as_FloatRegister($src2$$reg));
14320   %}
14321   ins_pipe(pipe_class_default);
14322 %}
14323 
14324 instruct vadd8S(vecX dst, vecX src1, vecX src2)
14325 %{
14326   predicate(n->as_Vector()->length() == 8);
14327   match(Set dst (AddVS src1 src2));
14328   ins_cost(INSN_COST);
14329   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
14330   ins_encode %{
14331     __ addv(as_FloatRegister($dst$$reg), __ T8H,
14332             as_FloatRegister($src1$$reg),
14333             as_FloatRegister($src2$$reg));
14334   %}
14335   ins_pipe(pipe_class_default);
14336 %}
14337 
14338 instruct vadd2I(vecD dst, vecD src1, vecD src2)
14339 %{
14340   predicate(n->as_Vector()->length() == 2);
14341   match(Set dst (AddVI src1 src2));
14342   ins_cost(INSN_COST);
14343   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
14344   ins_encode %{
14345     __ addv(as_FloatRegister($dst$$reg), __ T2S,
14346             as_FloatRegister($src1$$reg),
14347             as_FloatRegister($src2$$reg));
14348   %}
14349   ins_pipe(pipe_class_default);
14350 %}
14351 
14352 instruct vadd4I(vecX dst, vecX src1, vecX src2)
14353 %{
14354   predicate(n->as_Vector()->length() == 4);
14355   match(Set dst (AddVI src1 src2));
14356   ins_cost(INSN_COST);
14357   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
14358   ins_encode %{
14359     __ addv(as_FloatRegister($dst$$reg), __ T4S,
14360             as_FloatRegister($src1$$reg),
14361             as_FloatRegister($src2$$reg));
14362   %}
14363   ins_pipe(pipe_class_default);
14364 %}
14365 
14366 instruct vadd2L(vecX dst, vecX src1, vecX src2)
14367 %{
14368   predicate(n->as_Vector()->length() == 2);
14369   match(Set dst (AddVL src1 src2));
14370   ins_cost(INSN_COST);
14371   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
14372   ins_encode %{
14373     __ addv(as_FloatRegister($dst$$reg), __ T2D,
14374             as_FloatRegister($src1$$reg),
14375             as_FloatRegister($src2$$reg));
14376   %}
14377   ins_pipe(pipe_class_default);
14378 %}
14379 
14380 instruct vadd2F(vecD dst, vecD src1, vecD src2)
14381 %{
14382   predicate(n->as_Vector()->length() == 2);
14383   match(Set dst (AddVF src1 src2));
14384   ins_cost(INSN_COST);
14385   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
14386   ins_encode %{
14387     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
14388             as_FloatRegister($src1$$reg),
14389             as_FloatRegister($src2$$reg));
14390   %}
14391   ins_pipe(pipe_class_default);
14392 %}
14393 
14394 instruct vadd4F(vecX dst, vecX src1, vecX src2)
14395 %{
14396   predicate(n->as_Vector()->length() == 4);
14397   match(Set dst (AddVF src1 src2));
14398   ins_cost(INSN_COST);
14399   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
14400   ins_encode %{
14401     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
14402             as_FloatRegister($src1$$reg),
14403             as_FloatRegister($src2$$reg));
14404   %}
14405   ins_pipe(pipe_class_default);
14406 %}
14407 
14408 instruct vadd2D(vecX dst, vecX src1, vecX src2)
14409 %{
14410   match(Set dst (AddVD src1 src2));
14411   ins_cost(INSN_COST);
14412   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
14413   ins_encode %{
14414     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
14415             as_FloatRegister($src1$$reg),
14416             as_FloatRegister($src2$$reg));
14417   %}
14418   ins_pipe(pipe_class_default);
14419 %}
14420 
14421 // --------------------------------- SUB --------------------------------------
14422 
14423 instruct vsub8B(vecD dst, vecD src1, vecD src2)
14424 %{
14425   predicate(n->as_Vector()->length() == 4 ||
14426             n->as_Vector()->length() == 8);
14427   match(Set dst (SubVB src1 src2));
14428   ins_cost(INSN_COST);
14429   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
14430   ins_encode %{
14431     __ subv(as_FloatRegister($dst$$reg), __ T8B,
14432             as_FloatRegister($src1$$reg),
14433             as_FloatRegister($src2$$reg));
14434   %}
14435   ins_pipe(pipe_class_default);
14436 %}
14437 
14438 instruct vsub16B(vecX dst, vecX src1, vecX src2)
14439 %{
14440   predicate(n->as_Vector()->length() == 16);
14441   match(Set dst (SubVB src1 src2));
14442   ins_cost(INSN_COST);
14443   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
14444   ins_encode %{
14445     __ subv(as_FloatRegister($dst$$reg), __ T16B,
14446             as_FloatRegister($src1$$reg),
14447             as_FloatRegister($src2$$reg));
14448   %}
14449   ins_pipe(pipe_class_default);
14450 %}
14451 
14452 instruct vsub4S(vecD dst, vecD src1, vecD src2)
14453 %{
14454   predicate(n->as_Vector()->length() == 2 ||
14455             n->as_Vector()->length() == 4);
14456   match(Set dst (SubVS src1 src2));
14457   ins_cost(INSN_COST);
14458   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
14459   ins_encode %{
14460     __ subv(as_FloatRegister($dst$$reg), __ T4H,
14461             as_FloatRegister($src1$$reg),
14462             as_FloatRegister($src2$$reg));
14463   %}
14464   ins_pipe(pipe_class_default);
14465 %}
14466 
14467 instruct vsub8S(vecX dst, vecX src1, vecX src2)
14468 %{
14469   predicate(n->as_Vector()->length() == 8);
14470   match(Set dst (SubVS src1 src2));
14471   ins_cost(INSN_COST);
14472   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
14473   ins_encode %{
14474     __ subv(as_FloatRegister($dst$$reg), __ T8H,
14475             as_FloatRegister($src1$$reg),
14476             as_FloatRegister($src2$$reg));
14477   %}
14478   ins_pipe(pipe_class_default);
14479 %}
14480 
14481 instruct vsub2I(vecD dst, vecD src1, vecD src2)
14482 %{
14483   predicate(n->as_Vector()->length() == 2);
14484   match(Set dst (SubVI src1 src2));
14485   ins_cost(INSN_COST);
14486   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
14487   ins_encode %{
14488     __ subv(as_FloatRegister($dst$$reg), __ T2S,
14489             as_FloatRegister($src1$$reg),
14490             as_FloatRegister($src2$$reg));
14491   %}
14492   ins_pipe(pipe_class_default);
14493 %}
14494 
14495 instruct vsub4I(vecX dst, vecX src1, vecX src2)
14496 %{
14497   predicate(n->as_Vector()->length() == 4);
14498   match(Set dst (SubVI src1 src2));
14499   ins_cost(INSN_COST);
14500   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
14501   ins_encode %{
14502     __ subv(as_FloatRegister($dst$$reg), __ T4S,
14503             as_FloatRegister($src1$$reg),
14504             as_FloatRegister($src2$$reg));
14505   %}
14506   ins_pipe(pipe_class_default);
14507 %}
14508 
14509 instruct vsub2L(vecX dst, vecX src1, vecX src2)
14510 %{
14511   predicate(n->as_Vector()->length() == 2);
14512   match(Set dst (SubVL src1 src2));
14513   ins_cost(INSN_COST);
14514   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
14515   ins_encode %{
14516     __ subv(as_FloatRegister($dst$$reg), __ T2D,
14517             as_FloatRegister($src1$$reg),
14518             as_FloatRegister($src2$$reg));
14519   %}
14520   ins_pipe(pipe_class_default);
14521 %}
14522 
14523 instruct vsub2F(vecD dst, vecD src1, vecD src2)
14524 %{
14525   predicate(n->as_Vector()->length() == 2);
14526   match(Set dst (SubVF src1 src2));
14527   ins_cost(INSN_COST);
14528   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
14529   ins_encode %{
14530     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
14531             as_FloatRegister($src1$$reg),
14532             as_FloatRegister($src2$$reg));
14533   %}
14534   ins_pipe(pipe_class_default);
14535 %}
14536 
14537 instruct vsub4F(vecX dst, vecX src1, vecX src2)
14538 %{
14539   predicate(n->as_Vector()->length() == 4);
14540   match(Set dst (SubVF src1 src2));
14541   ins_cost(INSN_COST);
14542   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
14543   ins_encode %{
14544     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
14545             as_FloatRegister($src1$$reg),
14546             as_FloatRegister($src2$$reg));
14547   %}
14548   ins_pipe(pipe_class_default);
14549 %}
14550 
14551 instruct vsub2D(vecX dst, vecX src1, vecX src2)
14552 %{
14553   predicate(n->as_Vector()->length() == 2);
14554   match(Set dst (SubVD src1 src2));
14555   ins_cost(INSN_COST);
14556   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
14557   ins_encode %{
14558     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
14559             as_FloatRegister($src1$$reg),
14560             as_FloatRegister($src2$$reg));
14561   %}
14562   ins_pipe(pipe_class_default);
14563 %}
14564 
14565 // --------------------------------- MUL --------------------------------------
14566 
14567 instruct vmul4S(vecD dst, vecD src1, vecD src2)
14568 %{
14569   predicate(n->as_Vector()->length() == 2 ||
14570             n->as_Vector()->length() == 4);
14571   match(Set dst (MulVS src1 src2));
14572   ins_cost(INSN_COST);
14573   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
14574   ins_encode %{
14575     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
14576             as_FloatRegister($src1$$reg),
14577             as_FloatRegister($src2$$reg));
14578   %}
14579   ins_pipe(pipe_class_default);
14580 %}
14581 
14582 instruct vmul8S(vecX dst, vecX src1, vecX src2)
14583 %{
14584   predicate(n->as_Vector()->length() == 8);
14585   match(Set dst (MulVS src1 src2));
14586   ins_cost(INSN_COST);
14587   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
14588   ins_encode %{
14589     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
14590             as_FloatRegister($src1$$reg),
14591             as_FloatRegister($src2$$reg));
14592   %}
14593   ins_pipe(pipe_class_default);
14594 %}
14595 
14596 instruct vmul2I(vecD dst, vecD src1, vecD src2)
14597 %{
14598   predicate(n->as_Vector()->length() == 2);
14599   match(Set dst (MulVI src1 src2));
14600   ins_cost(INSN_COST);
14601   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
14602   ins_encode %{
14603     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
14604             as_FloatRegister($src1$$reg),
14605             as_FloatRegister($src2$$reg));
14606   %}
14607   ins_pipe(pipe_class_default);
14608 %}
14609 
14610 instruct vmul4I(vecX dst, vecX src1, vecX src2)
14611 %{
14612   predicate(n->as_Vector()->length() == 4);
14613   match(Set dst (MulVI src1 src2));
14614   ins_cost(INSN_COST);
14615   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
14616   ins_encode %{
14617     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
14618             as_FloatRegister($src1$$reg),
14619             as_FloatRegister($src2$$reg));
14620   %}
14621   ins_pipe(pipe_class_default);
14622 %}
14623 
14624 instruct vmul2F(vecD dst, vecD src1, vecD src2)
14625 %{
14626   predicate(n->as_Vector()->length() == 2);
14627   match(Set dst (MulVF src1 src2));
14628   ins_cost(INSN_COST);
14629   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
14630   ins_encode %{
14631     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
14632             as_FloatRegister($src1$$reg),
14633             as_FloatRegister($src2$$reg));
14634   %}
14635   ins_pipe(pipe_class_default);
14636 %}
14637 
14638 instruct vmul4F(vecX dst, vecX src1, vecX src2)
14639 %{
14640   predicate(n->as_Vector()->length() == 4);
14641   match(Set dst (MulVF src1 src2));
14642   ins_cost(INSN_COST);
14643   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
14644   ins_encode %{
14645     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
14646             as_FloatRegister($src1$$reg),
14647             as_FloatRegister($src2$$reg));
14648   %}
14649   ins_pipe(pipe_class_default);
14650 %}
14651 
14652 instruct vmul2D(vecX dst, vecX src1, vecX src2)
14653 %{
14654   predicate(n->as_Vector()->length() == 2);
14655   match(Set dst (MulVD src1 src2));
14656   ins_cost(INSN_COST);
14657   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
14658   ins_encode %{
14659     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
14660             as_FloatRegister($src1$$reg),
14661             as_FloatRegister($src2$$reg));
14662   %}
14663   ins_pipe(pipe_class_default);
14664 %}
14665 
14666 // --------------------------------- DIV --------------------------------------
14667 
14668 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
14669 %{
14670   predicate(n->as_Vector()->length() == 2);
14671   match(Set dst (DivVF src1 src2));
14672   ins_cost(INSN_COST);
14673   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
14674   ins_encode %{
14675     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
14676             as_FloatRegister($src1$$reg),
14677             as_FloatRegister($src2$$reg));
14678   %}
14679   ins_pipe(pipe_class_default);
14680 %}
14681 
14682 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
14683 %{
14684   predicate(n->as_Vector()->length() == 4);
14685   match(Set dst (DivVF src1 src2));
14686   ins_cost(INSN_COST);
14687   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
14688   ins_encode %{
14689     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
14690             as_FloatRegister($src1$$reg),
14691             as_FloatRegister($src2$$reg));
14692   %}
14693   ins_pipe(pipe_class_default);
14694 %}
14695 
14696 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
14697 %{
14698   predicate(n->as_Vector()->length() == 2);
14699   match(Set dst (DivVD src1 src2));
14700   ins_cost(INSN_COST);
14701   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
14702   ins_encode %{
14703     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
14704             as_FloatRegister($src1$$reg),
14705             as_FloatRegister($src2$$reg));
14706   %}
14707   ins_pipe(pipe_class_default);
14708 %}
14709 
14710 // --------------------------------- AND --------------------------------------
14711 
14712 instruct vand8B(vecD dst, vecD src1, vecD src2)
14713 %{
14714   predicate(n->as_Vector()->length_in_bytes() == 4 ||
14715             n->as_Vector()->length_in_bytes() == 8);
14716   match(Set dst (AndV src1 src2));
14717   ins_cost(INSN_COST);
14718   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
14719   ins_encode %{
14720     __ andr(as_FloatRegister($dst$$reg), __ T8B,
14721             as_FloatRegister($src1$$reg),
14722             as_FloatRegister($src2$$reg));
14723   %}
14724   ins_pipe(pipe_class_default);
14725 %}
14726 
14727 instruct vand16B(vecX dst, vecX src1, vecX src2)
14728 %{
14729   predicate(n->as_Vector()->length_in_bytes() == 16);
14730   match(Set dst (AndV src1 src2));
14731   ins_cost(INSN_COST);
14732   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
14733   ins_encode %{
14734     __ andr(as_FloatRegister($dst$$reg), __ T16B,
14735             as_FloatRegister($src1$$reg),
14736             as_FloatRegister($src2$$reg));
14737   %}
14738   ins_pipe(pipe_class_default);
14739 %}
14740 
14741 // --------------------------------- OR ---------------------------------------
14742 
14743 instruct vor8B(vecD dst, vecD src1, vecD src2)
14744 %{
14745   predicate(n->as_Vector()->length_in_bytes() == 4 ||
14746             n->as_Vector()->length_in_bytes() == 8);
14747   match(Set dst (OrV src1 src2));
14748   ins_cost(INSN_COST);
14749   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
14750   ins_encode %{
14751     __ orr(as_FloatRegister($dst$$reg), __ T8B,
14752             as_FloatRegister($src1$$reg),
14753             as_FloatRegister($src2$$reg));
14754   %}
14755   ins_pipe(pipe_class_default);
14756 %}
14757 
14758 instruct vor16B(vecX dst, vecX src1, vecX src2)
14759 %{
14760   predicate(n->as_Vector()->length_in_bytes() == 16);
14761   match(Set dst (OrV src1 src2));
14762   ins_cost(INSN_COST);
14763   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
14764   ins_encode %{
14765     __ orr(as_FloatRegister($dst$$reg), __ T16B,
14766             as_FloatRegister($src1$$reg),
14767             as_FloatRegister($src2$$reg));
14768   %}
14769   ins_pipe(pipe_class_default);
14770 %}
14771 
14772 // --------------------------------- XOR --------------------------------------
14773 
14774 instruct vxor8B(vecD dst, vecD src1, vecD src2)
14775 %{
14776   predicate(n->as_Vector()->length_in_bytes() == 4 ||
14777             n->as_Vector()->length_in_bytes() == 8);
14778   match(Set dst (XorV src1 src2));
14779   ins_cost(INSN_COST);
14780   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
14781   ins_encode %{
14782     __ eor(as_FloatRegister($dst$$reg), __ T8B,
14783             as_FloatRegister($src1$$reg),
14784             as_FloatRegister($src2$$reg));
14785   %}
14786   ins_pipe(pipe_class_default);
14787 %}
14788 
14789 instruct vxor16B(vecX dst, vecX src1, vecX src2)
14790 %{
14791   predicate(n->as_Vector()->length_in_bytes() == 16);
14792   match(Set dst (XorV src1 src2));
14793   ins_cost(INSN_COST);
14794   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
14795   ins_encode %{
14796     __ eor(as_FloatRegister($dst$$reg), __ T16B,
14797             as_FloatRegister($src1$$reg),
14798             as_FloatRegister($src2$$reg));
14799   %}
14800   ins_pipe(pipe_class_default);
14801 %}
14802 
14803 // ------------------------------ Shift ---------------------------------------
14804 
14805 instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
14806   match(Set dst (LShiftCntV cnt));
14807   format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
14808   ins_encode %{
14809     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
14810   %}
14811   ins_pipe(pipe_class_default);
14812 %}
14813 
14814 // Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
14815 instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
14816   match(Set dst (RShiftCntV cnt));
14817   format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
14818   ins_encode %{
14819     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
14820     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
14821   %}
14822   ins_pipe(pipe_class_default);
14823 %}
14824 
14825 instruct vsll8B(vecD dst, vecD src, vecX shift) %{
14826   predicate(n->as_Vector()->length() == 4 ||
14827             n->as_Vector()->length() == 8);
14828   match(Set dst (LShiftVB src shift));
14829   match(Set dst (RShiftVB src shift));
14830   ins_cost(INSN_COST);
14831   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
14832   ins_encode %{
14833     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
14834             as_FloatRegister($src$$reg),
14835             as_FloatRegister($shift$$reg));
14836   %}
14837   ins_pipe(pipe_class_default);
14838 %}
14839 
14840 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
14841   predicate(n->as_Vector()->length() == 16);
14842   match(Set dst (LShiftVB src shift));
14843   match(Set dst (RShiftVB src shift));
14844   ins_cost(INSN_COST);
14845   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
14846   ins_encode %{
14847     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
14848             as_FloatRegister($src$$reg),
14849             as_FloatRegister($shift$$reg));
14850   %}
14851   ins_pipe(pipe_class_default);
14852 %}
14853 
14854 instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
14855   predicate(n->as_Vector()->length() == 4 ||
14856             n->as_Vector()->length() == 8);
14857   match(Set dst (URShiftVB src shift));
14858   ins_cost(INSN_COST);
14859   format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
14860   ins_encode %{
14861     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
14862             as_FloatRegister($src$$reg),
14863             as_FloatRegister($shift$$reg));
14864   %}
14865   ins_pipe(pipe_class_default);
14866 %}
14867 
14868 instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
14869   predicate(n->as_Vector()->length() == 16);
14870   match(Set dst (URShiftVB src shift));
14871   ins_cost(INSN_COST);
14872   format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
14873   ins_encode %{
14874     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
14875             as_FloatRegister($src$$reg),
14876             as_FloatRegister($shift$$reg));
14877   %}
14878   ins_pipe(pipe_class_default);
14879 %}
14880 
14881 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
14882   predicate(n->as_Vector()->length() == 4 ||
14883             n->as_Vector()->length() == 8);
14884   match(Set dst (LShiftVB src shift));
14885   ins_cost(INSN_COST);
14886   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
14887   ins_encode %{
14888     int sh = (int)$shift$$constant & 31;
14889     if (sh >= 8) {
14890       __ eor(as_FloatRegister($dst$$reg), __ T8B,
14891              as_FloatRegister($src$$reg),
14892              as_FloatRegister($src$$reg));
14893     } else {
14894       __ shl(as_FloatRegister($dst$$reg), __ T8B,
14895              as_FloatRegister($src$$reg), sh);
14896     }
14897   %}
14898   ins_pipe(pipe_class_default);
14899 %}
14900 
14901 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
14902   predicate(n->as_Vector()->length() == 16);
14903   match(Set dst (LShiftVB src shift));
14904   ins_cost(INSN_COST);
14905   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
14906   ins_encode %{
14907     int sh = (int)$shift$$constant & 31;
14908     if (sh >= 8) {
14909       __ eor(as_FloatRegister($dst$$reg), __ T16B,
14910              as_FloatRegister($src$$reg),
14911              as_FloatRegister($src$$reg));
14912     } else {
14913       __ shl(as_FloatRegister($dst$$reg), __ T16B,
14914              as_FloatRegister($src$$reg), sh);
14915     }
14916   %}
14917   ins_pipe(pipe_class_default);
14918 %}
14919 
14920 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
14921   predicate(n->as_Vector()->length() == 4 ||
14922             n->as_Vector()->length() == 8);
14923   match(Set dst (RShiftVB src shift));
14924   ins_cost(INSN_COST);
14925   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
14926   ins_encode %{
14927     int sh = (int)$shift$$constant & 31;
14928     if (sh >= 8) sh = 7;
14929     sh = -sh & 7;
14930     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
14931            as_FloatRegister($src$$reg), sh);
14932   %}
14933   ins_pipe(pipe_class_default);
14934 %}
14935 
14936 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
14937   predicate(n->as_Vector()->length() == 16);
14938   match(Set dst (RShiftVB src shift));
14939   ins_cost(INSN_COST);
14940   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
14941   ins_encode %{
14942     int sh = (int)$shift$$constant & 31;
14943     if (sh >= 8) sh = 7;
14944     sh = -sh & 7;
14945     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
14946            as_FloatRegister($src$$reg), sh);
14947   %}
14948   ins_pipe(pipe_class_default);
14949 %}
14950 
14951 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
14952   predicate(n->as_Vector()->length() == 4 ||
14953             n->as_Vector()->length() == 8);
14954   match(Set dst (URShiftVB src shift));
14955   ins_cost(INSN_COST);
14956   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
14957   ins_encode %{
14958     int sh = (int)$shift$$constant & 31;
14959     if (sh >= 8) {
14960       __ eor(as_FloatRegister($dst$$reg), __ T8B,
14961              as_FloatRegister($src$$reg),
14962              as_FloatRegister($src$$reg));
14963     } else {
14964       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
14965              as_FloatRegister($src$$reg), -sh & 7);
14966     }
14967   %}
14968   ins_pipe(pipe_class_default);
14969 %}
14970 
14971 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
14972   predicate(n->as_Vector()->length() == 16);
14973   match(Set dst (URShiftVB src shift));
14974   ins_cost(INSN_COST);
14975   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
14976   ins_encode %{
14977     int sh = (int)$shift$$constant & 31;
14978     if (sh >= 8) {
14979       __ eor(as_FloatRegister($dst$$reg), __ T16B,
14980              as_FloatRegister($src$$reg),
14981              as_FloatRegister($src$$reg));
14982     } else {
14983       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
14984              as_FloatRegister($src$$reg), -sh & 7);
14985     }
14986   %}
14987   ins_pipe(pipe_class_default);
14988 %}
14989 
14990 instruct vsll4S(vecD dst, vecD src, vecX shift) %{
14991   predicate(n->as_Vector()->length() == 2 ||
14992             n->as_Vector()->length() == 4);
14993   match(Set dst (LShiftVS src shift));
14994   match(Set dst (RShiftVS src shift));
14995   ins_cost(INSN_COST);
14996   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
14997   ins_encode %{
14998     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
14999             as_FloatRegister($src$$reg),
15000             as_FloatRegister($shift$$reg));
15001   %}
15002   ins_pipe(pipe_class_default);
15003 %}
15004 
15005 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
15006   predicate(n->as_Vector()->length() == 8);
15007   match(Set dst (LShiftVS src shift));
15008   match(Set dst (RShiftVS src shift));
15009   ins_cost(INSN_COST);
15010   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
15011   ins_encode %{
15012     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
15013             as_FloatRegister($src$$reg),
15014             as_FloatRegister($shift$$reg));
15015   %}
15016   ins_pipe(pipe_class_default);
15017 %}
15018 
15019 instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
15020   predicate(n->as_Vector()->length() == 2 ||
15021             n->as_Vector()->length() == 4);
15022   match(Set dst (URShiftVS src shift));
15023   ins_cost(INSN_COST);
15024   format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
15025   ins_encode %{
15026     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
15027             as_FloatRegister($src$$reg),
15028             as_FloatRegister($shift$$reg));
15029   %}
15030   ins_pipe(pipe_class_default);
15031 %}
15032 
15033 instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
15034   predicate(n->as_Vector()->length() == 8);
15035   match(Set dst (URShiftVS src shift));
15036   ins_cost(INSN_COST);
15037   format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
15038   ins_encode %{
15039     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
15040             as_FloatRegister($src$$reg),
15041             as_FloatRegister($shift$$reg));
15042   %}
15043   ins_pipe(pipe_class_default);
15044 %}
15045 
15046 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
15047   predicate(n->as_Vector()->length() == 2 ||
15048             n->as_Vector()->length() == 4);
15049   match(Set dst (LShiftVS src shift));
15050   ins_cost(INSN_COST);
15051   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
15052   ins_encode %{
15053     int sh = (int)$shift$$constant & 31;
15054     if (sh >= 16) {
15055       __ eor(as_FloatRegister($dst$$reg), __ T8B,
15056              as_FloatRegister($src$$reg),
15057              as_FloatRegister($src$$reg));
15058     } else {
15059       __ shl(as_FloatRegister($dst$$reg), __ T4H,
15060              as_FloatRegister($src$$reg), sh);
15061     }
15062   %}
15063   ins_pipe(pipe_class_default);
15064 %}
15065 
15066 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
15067   predicate(n->as_Vector()->length() == 8);
15068   match(Set dst (LShiftVS src shift));
15069   ins_cost(INSN_COST);
15070   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
15071   ins_encode %{
15072     int sh = (int)$shift$$constant & 31;
15073     if (sh >= 16) {
15074       __ eor(as_FloatRegister($dst$$reg), __ T16B,
15075              as_FloatRegister($src$$reg),
15076              as_FloatRegister($src$$reg));
15077     } else {
15078       __ shl(as_FloatRegister($dst$$reg), __ T8H,
15079              as_FloatRegister($src$$reg), sh);
15080     }
15081   %}
15082   ins_pipe(pipe_class_default);
15083 %}
15084 
15085 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
15086   predicate(n->as_Vector()->length() == 2 ||
15087             n->as_Vector()->length() == 4);
15088   match(Set dst (RShiftVS src shift));
15089   ins_cost(INSN_COST);
15090   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
15091   ins_encode %{
15092     int sh = (int)$shift$$constant & 31;
15093     if (sh >= 16) sh = 15;
15094     sh = -sh & 15;
15095     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
15096            as_FloatRegister($src$$reg), sh);
15097   %}
15098   ins_pipe(pipe_class_default);
15099 %}
15100 
15101 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
15102   predicate(n->as_Vector()->length() == 8);
15103   match(Set dst (RShiftVS src shift));
15104   ins_cost(INSN_COST);
15105   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
15106   ins_encode %{
15107     int sh = (int)$shift$$constant & 31;
15108     if (sh >= 16) sh = 15;
15109     sh = -sh & 15;
15110     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
15111            as_FloatRegister($src$$reg), sh);
15112   %}
15113   ins_pipe(pipe_class_default);
15114 %}
15115 
15116 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
15117   predicate(n->as_Vector()->length() == 2 ||
15118             n->as_Vector()->length() == 4);
15119   match(Set dst (URShiftVS src shift));
15120   ins_cost(INSN_COST);
15121   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
15122   ins_encode %{
15123     int sh = (int)$shift$$constant & 31;
15124     if (sh >= 16) {
15125       __ eor(as_FloatRegister($dst$$reg), __ T8B,
15126              as_FloatRegister($src$$reg),
15127              as_FloatRegister($src$$reg));
15128     } else {
15129       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
15130              as_FloatRegister($src$$reg), -sh & 15);
15131     }
15132   %}
15133   ins_pipe(pipe_class_default);
15134 %}
15135 
15136 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
15137   predicate(n->as_Vector()->length() == 8);
15138   match(Set dst (URShiftVS src shift));
15139   ins_cost(INSN_COST);
15140   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
15141   ins_encode %{
15142     int sh = (int)$shift$$constant & 31;
15143     if (sh >= 16) {
15144       __ eor(as_FloatRegister($dst$$reg), __ T16B,
15145              as_FloatRegister($src$$reg),
15146              as_FloatRegister($src$$reg));
15147     } else {
15148       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
15149              as_FloatRegister($src$$reg), -sh & 15);
15150     }
15151   %}
15152   ins_pipe(pipe_class_default);
15153 %}
15154 
15155 instruct vsll2I(vecD dst, vecD src, vecX shift) %{
15156   predicate(n->as_Vector()->length() == 2);
15157   match(Set dst (LShiftVI src shift));
15158   match(Set dst (RShiftVI src shift));
15159   ins_cost(INSN_COST);
15160   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
15161   ins_encode %{
15162     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
15163             as_FloatRegister($src$$reg),
15164             as_FloatRegister($shift$$reg));
15165   %}
15166   ins_pipe(pipe_class_default);
15167 %}
15168 
15169 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
15170   predicate(n->as_Vector()->length() == 4);
15171   match(Set dst (LShiftVI src shift));
15172   match(Set dst (RShiftVI src shift));
15173   ins_cost(INSN_COST);
15174   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
15175   ins_encode %{
15176     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
15177             as_FloatRegister($src$$reg),
15178             as_FloatRegister($shift$$reg));
15179   %}
15180   ins_pipe(pipe_class_default);
15181 %}
15182 
15183 instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
15184   predicate(n->as_Vector()->length() == 2);
15185   match(Set dst (URShiftVI src shift));
15186   ins_cost(INSN_COST);
15187   format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
15188   ins_encode %{
15189     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
15190             as_FloatRegister($src$$reg),
15191             as_FloatRegister($shift$$reg));
15192   %}
15193   ins_pipe(pipe_class_default);
15194 %}
15195 
15196 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
15197   predicate(n->as_Vector()->length() == 4);
15198   match(Set dst (URShiftVI src shift));
15199   ins_cost(INSN_COST);
15200   format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
15201   ins_encode %{
15202     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
15203             as_FloatRegister($src$$reg),
15204             as_FloatRegister($shift$$reg));
15205   %}
15206   ins_pipe(pipe_class_default);
15207 %}
15208 
15209 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
15210   predicate(n->as_Vector()->length() == 2);
15211   match(Set dst (LShiftVI src shift));
15212   ins_cost(INSN_COST);
15213   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
15214   ins_encode %{
15215     __ shl(as_FloatRegister($dst$$reg), __ T2S,
15216            as_FloatRegister($src$$reg),
15217            (int)$shift$$constant & 31);
15218   %}
15219   ins_pipe(pipe_class_default);
15220 %}
15221 
15222 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
15223   predicate(n->as_Vector()->length() == 4);
15224   match(Set dst (LShiftVI src shift));
15225   ins_cost(INSN_COST);
15226   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
15227   ins_encode %{
15228     __ shl(as_FloatRegister($dst$$reg), __ T4S,
15229            as_FloatRegister($src$$reg),
15230            (int)$shift$$constant & 31);
15231   %}
15232   ins_pipe(pipe_class_default);
15233 %}
15234 
15235 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
15236   predicate(n->as_Vector()->length() == 2);
15237   match(Set dst (RShiftVI src shift));
15238   ins_cost(INSN_COST);
15239   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
15240   ins_encode %{
15241     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
15242             as_FloatRegister($src$$reg),
15243             -(int)$shift$$constant & 31);
15244   %}
15245   ins_pipe(pipe_class_default);
15246 %}
15247 
15248 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
15249   predicate(n->as_Vector()->length() == 4);
15250   match(Set dst (RShiftVI src shift));
15251   ins_cost(INSN_COST);
15252   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
15253   ins_encode %{
15254     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
15255             as_FloatRegister($src$$reg),
15256             -(int)$shift$$constant & 31);
15257   %}
15258   ins_pipe(pipe_class_default);
15259 %}
15260 
15261 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
15262   predicate(n->as_Vector()->length() == 2);
15263   match(Set dst (URShiftVI src shift));
15264   ins_cost(INSN_COST);
15265   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
15266   ins_encode %{
15267     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
15268             as_FloatRegister($src$$reg),
15269             -(int)$shift$$constant & 31);
15270   %}
15271   ins_pipe(pipe_class_default);
15272 %}
15273 
15274 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
15275   predicate(n->as_Vector()->length() == 4);
15276   match(Set dst (URShiftVI src shift));
15277   ins_cost(INSN_COST);
15278   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
15279   ins_encode %{
15280     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
15281             as_FloatRegister($src$$reg),
15282             -(int)$shift$$constant & 31);
15283   %}
15284   ins_pipe(pipe_class_default);
15285 %}
15286 
15287 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
15288   predicate(n->as_Vector()->length() == 2);
15289   match(Set dst (LShiftVL src shift));
15290   match(Set dst (RShiftVL src shift));
15291   ins_cost(INSN_COST);
15292   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
15293   ins_encode %{
15294     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
15295             as_FloatRegister($src$$reg),
15296             as_FloatRegister($shift$$reg));
15297   %}
15298   ins_pipe(pipe_class_default);
15299 %}
15300 
15301 instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
15302   predicate(n->as_Vector()->length() == 2);
15303   match(Set dst (URShiftVL src shift));
15304   ins_cost(INSN_COST);
15305   format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
15306   ins_encode %{
15307     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
15308             as_FloatRegister($src$$reg),
15309             as_FloatRegister($shift$$reg));
15310   %}
15311   ins_pipe(pipe_class_default);
15312 %}
15313 
15314 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
15315   predicate(n->as_Vector()->length() == 2);
15316   match(Set dst (LShiftVL src shift));
15317   ins_cost(INSN_COST);
15318   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
15319   ins_encode %{
15320     __ shl(as_FloatRegister($dst$$reg), __ T2D,
15321            as_FloatRegister($src$$reg),
15322            (int)$shift$$constant & 63);
15323   %}
15324   ins_pipe(pipe_class_default);
15325 %}
15326 
15327 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
15328   predicate(n->as_Vector()->length() == 2);
15329   match(Set dst (RShiftVL src shift));
15330   ins_cost(INSN_COST);
15331   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
15332   ins_encode %{
15333     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
15334             as_FloatRegister($src$$reg),
15335             -(int)$shift$$constant & 63);
15336   %}
15337   ins_pipe(pipe_class_default);
15338 %}
15339 
15340 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
15341   predicate(n->as_Vector()->length() == 2);
15342   match(Set dst (URShiftVL src shift));
15343   ins_cost(INSN_COST);
15344   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
15345   ins_encode %{
15346     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
15347             as_FloatRegister($src$$reg),
15348             -(int)$shift$$constant & 63);
15349   %}
15350   ins_pipe(pipe_class_default);
15351 %}
15352 
15353 //----------PEEPHOLE RULES-----------------------------------------------------
15354 // These must follow all instruction definitions as they use the names
15355 // defined in the instructions definitions.
15356 //
15357 // peepmatch ( root_instr_name [preceding_instruction]* );
15358 //
15359 // peepconstraint %{
15360 // (instruction_number.operand_name relational_op instruction_number.operand_name
15361 //  [, ...] );
15362 // // instruction numbers are zero-based using left to right order in peepmatch
15363 //
15364 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
15365 // // provide an instruction_number.operand_name for each operand that appears
15366 // // in the replacement instruction's match rule
15367 //
15368 // ---------VM FLAGS---------------------------------------------------------
15369 //
15370 // All peephole optimizations can be turned off using -XX:-OptoPeephole
15371 //
15372 // Each peephole rule is given an identifying number starting with zero and
15373 // increasing by one in the order seen by the parser.  An individual peephole
15374 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
15375 // on the command-line.
15376 //
15377 // ---------CURRENT LIMITATIONS----------------------------------------------
15378 //
15379 // Only match adjacent instructions in same basic block
15380 // Only equality constraints
15381 // Only constraints between operands, not (0.dest_reg == RAX_enc)
15382 // Only one replacement instruction
15383 //
15384 // ---------EXAMPLE----------------------------------------------------------
15385 //
15386 // // pertinent parts of existing instructions in architecture description
15387 // instruct movI(iRegINoSp dst, iRegI src)
15388 // %{
15389 //   match(Set dst (CopyI src));
15390 // %}
15391 //
15392 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
15393 // %{
15394 //   match(Set dst (AddI dst src));
15395 //   effect(KILL cr);
15396 // %}
15397 //
15398 // // Change (inc mov) to lea
15399 // peephole %{
15400 //   // increment preceeded by register-register move
15401 //   peepmatch ( incI_iReg movI );
15402 //   // require that the destination register of the increment
15403 //   // match the destination register of the move
15404 //   peepconstraint ( 0.dst == 1.dst );
15405 //   // construct a replacement instruction that sets
15406 //   // the destination to ( move's source register + one )
15407 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
15408 // %}
15409 //
15410 
15411 // Implementation no longer uses movX instructions since
15412 // machine-independent system no longer uses CopyX nodes.
15413 //
15414 // peephole
15415 // %{
15416 //   peepmatch (incI_iReg movI);
15417 //   peepconstraint (0.dst == 1.dst);
15418 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
15419 // %}
15420 
15421 // peephole
15422 // %{
15423 //   peepmatch (decI_iReg movI);
15424 //   peepconstraint (0.dst == 1.dst);
15425 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
15426 // %}
15427 
15428 // peephole
15429 // %{
15430 //   peepmatch (addI_iReg_imm movI);
15431 //   peepconstraint (0.dst == 1.dst);
15432 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
15433 // %}
15434 
15435 // peephole
15436 // %{
15437 //   peepmatch (incL_iReg movL);
15438 //   peepconstraint (0.dst == 1.dst);
15439 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
15440 // %}
15441 
15442 // peephole
15443 // %{
15444 //   peepmatch (decL_iReg movL);
15445 //   peepconstraint (0.dst == 1.dst);
15446 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
15447 // %}
15448 
15449 // peephole
15450 // %{
15451 //   peepmatch (addL_iReg_imm movL);
15452 //   peepconstraint (0.dst == 1.dst);
15453 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
15454 // %}
15455 
15456 // peephole
15457 // %{
15458 //   peepmatch (addP_iReg_imm movP);
15459 //   peepconstraint (0.dst == 1.dst);
15460 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
15461 // %}
15462 
15463 // // Change load of spilled value to only a spill
15464 // instruct storeI(memory mem, iRegI src)
15465 // %{
15466 //   match(Set mem (StoreI mem src));
15467 // %}
15468 //
15469 // instruct loadI(iRegINoSp dst, memory mem)
15470 // %{
15471 //   match(Set dst (LoadI mem));
15472 // %}
15473 //
15474 
15475 //----------SMARTSPILL RULES---------------------------------------------------
15476 // These must follow all instruction definitions as they use the names
15477 // defined in the instructions definitions.
15478 
15479 // Local Variables:
15480 // mode: c++
15481 // End: