1 //
   2 // Copyright (c) 2013, Red Hat Inc.
   3 // Copyright (c) 2003, 2012, Oracle and/or its affiliates.
   4 // All rights reserved.
   5 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6 //
   7 // This code is free software; you can redistribute it and/or modify it
   8 // under the terms of the GNU General Public License version 2 only, as
   9 // published by the Free Software Foundation.
  10 //
  11 // This code is distributed in the hope that it will be useful, but WITHOUT
  12 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14 // version 2 for more details (a copy is included in the LICENSE file that
  15 // accompanied this code).
  16 //
  17 // You should have received a copy of the GNU General Public License version
  18 // 2 along with this work; if not, write to the Free Software Foundation,
  19 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20 //
  21 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22 // or visit www.oracle.com if you need additional information or have any
  23 // questions.
  24 //
  25 //
  26 
  27 // AArch64 Architecture Description File
  28 
  29 //----------REGISTER DEFINITION BLOCK------------------------------------------
  30 // This information is used by the matcher and the register allocator to
  31 // describe individual registers and classes of registers within the target
  32 // archtecture.
  33 
  34 register %{
  35 //----------Architecture Description Register Definitions----------------------
  36 // General Registers
  37 // "reg_def"  name ( register save type, C convention save type,
  38 //                   ideal register type, encoding );
  39 // Register Save Types:
  40 //
  41 // NS  = No-Save:       The register allocator assumes that these registers
  42 //                      can be used without saving upon entry to the method, &
  43 //                      that they do not need to be saved at call sites.
  44 //
  45 // SOC = Save-On-Call:  The register allocator assumes that these registers
  46 //                      can be used without saving upon entry to the method,
  47 //                      but that they must be saved at call sites.
  48 //
  49 // SOE = Save-On-Entry: The register allocator assumes that these registers
  50 //                      must be saved before using them upon entry to the
  51 //                      method, but they do not need to be saved at call
  52 //                      sites.
  53 //
  54 // AS  = Always-Save:   The register allocator assumes that these registers
  55 //                      must be saved before using them upon entry to the
  56 //                      method, & that they must be saved at call sites.
  57 //
  58 // Ideal Register Type is used to determine how to save & restore a
  59 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  60 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  61 //
  62 // The encoding number is the actual bit-pattern placed into the opcodes.
  63 
  64 // We must define the 64 bit int registers in two 32 bit halves, the
  65 // real lower register and a virtual upper half register. upper halves
  66 // are used by the register allocator but are not actually supplied as
  67 // operands to memory ops.
  68 //
  69 // follow the C1 compiler in making registers
  70 //
  71 //   r0-r7,r10-r26 volatile (caller save)
  72 //   r27-r32 system (no save, no allocate)
  73 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  74 //
  75 // as regards Java usage. we don't use any callee save registers
  76 // because this makes it difficult to de-optimise a frame (see comment
  77 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  78 //
  79 
  80 // General Registers
  81 
  82 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  83 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  84 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  85 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  86 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  87 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  88 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  89 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  90 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  91 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  92 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  93 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  94 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  95 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  96 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  97 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  98 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  99 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
 100 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 101 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 102 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 103 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 104 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 105 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 106 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 107 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 108 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 109 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 110 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 111 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 112 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 113 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 114 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 115 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 116 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 117 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 118 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 119 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 120 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 121 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 122 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 123 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 124 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 125 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 126 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 127 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 128 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 129 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 130 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 131 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 132 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 133 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());  
 134 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 135 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 136 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 137 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 138 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 139 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 140 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 141 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 142 
 143 // ----------------------------
 144 // Float/Double Registers
 145 // ----------------------------
 146 
 147 // Double Registers
 148 
 149 // The rules of ADL require that double registers be defined in pairs.
 150 // Each pair must be two 32-bit values, but not necessarily a pair of
 151 // single float registers. In each pair, ADLC-assigned register numbers
 152 // must be adjacent, with the lower number even. Finally, when the
 153 // CPU stores such a register pair to memory, the word associated with
 154 // the lower ADLC-assigned number must be stored to the lower address.
 155 
 156 // AArch64 has 32 floating-point registers. Each can store a vector of
 157 // single or double precision floating-point values up to 8 * 32
 158 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 159 // use the first float or double element of the vector.
 160 
 161 // for Java use float registers v0-v15 are always save on call whereas
 162 // the platform ABI treats v8-v15 as callee save). float registers
 163 // v16-v31 are SOC as per the platform spec
 164 
 165   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 166   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 167   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 168   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 169 
 170   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 171   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 172   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 173   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 174 
 175   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 176   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 177   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 178   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 179 
 180   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 181   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 182   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 183   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 184 
 185   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 186   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 187   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 188   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 189 
 190   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 191   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 192   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 193   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 194 
 195   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 196   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 197   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 198   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 199 
 200   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 201   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 202   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 203   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 204 
 205   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 206   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 207   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 208   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 209 
 210   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 211   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 212   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 213   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 214 
 215   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 216   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 217   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 218   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 219 
 220   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 221   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 222   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 223   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 224 
 225   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 226   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 227   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 228   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 229 
 230   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 231   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 232   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 233   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 234 
 235   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 236   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 237   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 238   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 239 
 240   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 241   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 242   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 243   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 244 
 245   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 246   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 247   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 248   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 249 
 250   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 251   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 252   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 253   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 254 
 255   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 256   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 257   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 258   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 259 
 260   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 261   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 262   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 263   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 264 
 265   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 266   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 267   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 268   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 269 
 270   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 271   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 272   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 273   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 274 
 275   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 276   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 277   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 278   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 279 
 280   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 281   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 282   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 283   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 284 
 285   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 286   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 287   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 288   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 289 
 290   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 291   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 292   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 293   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 294 
 295   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 296   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 297   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 298   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 299 
 300   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 301   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 302   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 303   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 304 
 305   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 306   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 307   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 308   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 309 
 310   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 311   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 312   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 313   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 314 
 315   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 316   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 317   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 318   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 319 
 320   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 321   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 322   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 323   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 324 
 325 // ----------------------------
 326 // Special Registers
 327 // ----------------------------
 328 
 329 // the AArch64 CSPR status flag register is not directly acessible as
 330 // instruction operand. the FPSR status flag register is a system
 331 // register which can be written/read using MSR/MRS but again does not
 332 // appear as an operand (a code identifying the FSPR occurs as an
 333 // immediate value in the instruction).
 334 
 335 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 336 
 337 
 338 // Specify priority of register selection within phases of register
 339 // allocation.  Highest priority is first.  A useful heuristic is to
 340 // give registers a low priority when they are required by machine
 341 // instructions, like EAX and EDX on I486, and choose no-save registers
 342 // before save-on-call, & save-on-call before save-on-entry.  Registers
 343 // which participate in fixed calling sequences should come last.
 344 // Registers which are used as pairs must fall on an even boundary.
 345 
 346 alloc_class chunk0(
 347     // volatiles
 348     R10, R10_H,
 349     R11, R11_H,
 350     R12, R12_H,
 351     R13, R13_H,
 352     R14, R14_H,
 353     R15, R15_H,
 354     R16, R16_H,
 355     R17, R17_H,
 356     R18, R18_H,
 357 
 358     // arg registers
 359     R0, R0_H,
 360     R1, R1_H,
 361     R2, R2_H,
 362     R3, R3_H,
 363     R4, R4_H,
 364     R5, R5_H,
 365     R6, R6_H,
 366     R7, R7_H,
 367 
 368     // non-volatiles
 369     R19, R19_H,
 370     R20, R20_H,
 371     R21, R21_H,
 372     R22, R22_H,
 373     R23, R23_H,
 374     R24, R24_H,
 375     R25, R25_H,
 376     R26, R26_H,
 377     
 378     // non-allocatable registers
 379 
 380     R27, R27_H, // heapbase
 381     R28, R28_H, // thread
 382     R29, R29_H, // fp
 383     R30, R30_H, // lr
 384     R31, R31_H, // sp
 385 );
 386 
 387 alloc_class chunk1(
 388 
 389     // no save
 390     V16, V16_H, V16_J, V16_K,
 391     V17, V17_H, V17_J, V17_K,
 392     V18, V18_H, V18_J, V18_K,
 393     V19, V19_H, V19_J, V19_K,
 394     V20, V20_H, V20_J, V20_K,
 395     V21, V21_H, V21_J, V21_K,
 396     V22, V22_H, V22_J, V22_K,
 397     V23, V23_H, V23_J, V23_K,
 398     V24, V24_H, V24_J, V24_K,
 399     V25, V25_H, V25_J, V25_K,
 400     V26, V26_H, V26_J, V26_K,
 401     V27, V27_H, V27_J, V27_K,
 402     V28, V28_H, V28_J, V28_K,
 403     V29, V29_H, V29_J, V29_K,
 404     V30, V30_H, V30_J, V30_K,
 405     V31, V31_H, V31_J, V31_K,
 406 
 407     // arg registers
 408     V0, V0_H, V0_J, V0_K,
 409     V1, V1_H, V1_J, V1_K,
 410     V2, V2_H, V2_J, V2_K,
 411     V3, V3_H, V3_J, V3_K,
 412     V4, V4_H, V4_J, V4_K,
 413     V5, V5_H, V5_J, V5_K,
 414     V6, V6_H, V6_J, V6_K,
 415     V7, V7_H, V7_J, V7_K,
 416 
 417     // non-volatiles
 418     V8, V8_H, V8_J, V8_K,
 419     V9, V9_H, V9_J, V9_K,
 420     V10, V10_H, V10_J, V10_K,
 421     V11, V11_H, V11_J, V11_K,
 422     V12, V12_H, V12_J, V12_K,
 423     V13, V13_H, V13_J, V13_K,
 424     V14, V14_H, V14_J, V14_K,
 425     V15, V15_H, V15_J, V15_K,
 426 );
 427 
 428 alloc_class chunk2(RFLAGS);
 429 
 430 //----------Architecture Description Register Classes--------------------------
 431 // Several register classes are automatically defined based upon information in
 432 // this architecture description.
 433 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 434 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 435 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 436 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 437 //
 438 
 439 // Class for all 32 bit integer registers -- excludes SP which will
 440 // never be used as an integer register
 441 reg_class any_reg32(
 442     R0,
 443     R1,
 444     R2,
 445     R3,
 446     R4,
 447     R5,
 448     R6,
 449     R7,
 450     R10,
 451     R11,
 452     R12,
 453     R13,
 454     R14,
 455     R15,
 456     R16,
 457     R17,
 458     R18,
 459     R19,
 460     R20,
 461     R21,
 462     R22,
 463     R23,
 464     R24,
 465     R25,
 466     R26,
 467     R27,
 468     R28,
 469     R29,
 470     R30
 471 );
 472 
 473 // Singleton class for R0 int register
 474 reg_class int_r0_reg(R0);
 475 
 476 // Singleton class for R2 int register
 477 reg_class int_r2_reg(R2);
 478 
 479 // Singleton class for R3 int register
 480 reg_class int_r3_reg(R3);
 481 
 482 // Singleton class for R4 int register
 483 reg_class int_r4_reg(R4);
 484 
 485 // Class for all long integer registers (including RSP)
 486 reg_class any_reg(
 487     R0, R0_H,
 488     R1, R1_H,
 489     R2, R2_H,
 490     R3, R3_H,
 491     R4, R4_H,
 492     R5, R5_H,
 493     R6, R6_H,
 494     R7, R7_H,
 495     R10, R10_H,
 496     R11, R11_H,
 497     R12, R12_H,
 498     R13, R13_H,
 499     R14, R14_H,
 500     R15, R15_H,
 501     R16, R16_H,
 502     R17, R17_H,
 503     R18, R18_H,
 504     R19, R19_H,
 505     R20, R20_H,
 506     R21, R21_H,
 507     R22, R22_H,
 508     R23, R23_H,
 509     R24, R24_H,
 510     R25, R25_H,
 511     R26, R26_H,
 512     R27, R27_H,
 513     R28, R28_H,
 514     R29, R29_H,
 515     R30, R30_H,
 516     R31, R31_H
 517 );
 518 
 519 // Class for all non-special integer registers
 520 reg_class no_special_reg32(
 521     R0,
 522     R1,
 523     R2,
 524     R3,
 525     R4,
 526     R5,
 527     R6,
 528     R7,
 529     R10,
 530     R11,
 531     R12,                        // rmethod
 532     R13,
 533     R14,
 534     R15,
 535     R16,
 536     R17,
 537     R18,
 538     R19,
 539     R20,
 540     R21,
 541     R22,
 542     R23,
 543     R24,
 544     R25,
 545     R26
 546  /* R27, */                     // heapbase
 547  /* R28, */                     // thread
 548  /* R29, */                     // fp
 549  /* R30, */                     // lr
 550  /* R31 */                      // sp
 551 );
 552 
 553 // Class for all non-special long integer registers
 554 reg_class no_special_reg(
 555     R0, R0_H,
 556     R1, R1_H,
 557     R2, R2_H,
 558     R3, R3_H,
 559     R4, R4_H,
 560     R5, R5_H,
 561     R6, R6_H,
 562     R7, R7_H,
 563     R10, R10_H,
 564     R11, R11_H,
 565     R12, R12_H,                 // rmethod
 566     R13, R13_H,
 567     R14, R14_H,
 568     R15, R15_H,
 569     R16, R16_H,
 570     R17, R17_H,
 571     R18, R18_H,
 572     R19, R19_H,
 573     R20, R20_H,
 574     R21, R21_H,
 575     R22, R22_H,
 576     R23, R23_H,
 577     R24, R24_H,
 578     R25, R25_H,
 579     R26, R26_H,
 580  /* R27, R27_H, */              // heapbase
 581  /* R28, R28_H, */              // thread
 582  /* R29, R29_H, */              // fp
 583  /* R30, R30_H, */              // lr
 584  /* R31, R31_H */               // sp
 585 );
 586 
 587 // Class for 64 bit register r0
 588 reg_class r0_reg(
 589     R0, R0_H
 590 );
 591 
 592 // Class for 64 bit register r1
 593 reg_class r1_reg(
 594     R1, R1_H
 595 );
 596 
 597 // Class for 64 bit register r2
 598 reg_class r2_reg(
 599     R2, R2_H
 600 );
 601 
 602 // Class for 64 bit register r3
 603 reg_class r3_reg(
 604     R3, R3_H
 605 );
 606 
 607 // Class for 64 bit register r4
 608 reg_class r4_reg(
 609     R4, R4_H
 610 );
 611 
 612 // Class for 64 bit register r5
 613 reg_class r5_reg(
 614     R5, R5_H
 615 );
 616 
 617 // Class for 64 bit register r10
 618 reg_class r10_reg(
 619     R10, R10_H
 620 );
 621 
 622 // Class for 64 bit register r11
 623 reg_class r11_reg(
 624     R11, R11_H
 625 );
 626 
 627 // Class for method register
 628 reg_class method_reg(
 629     R12, R12_H
 630 );
 631 
 632 // Class for heapbase register
 633 reg_class heapbase_reg(
 634     R27, R27_H
 635 );
 636 
 637 // Class for thread register
 638 reg_class thread_reg(
 639     R28, R28_H
 640 );
 641 
 642 // Class for frame pointer register
 643 reg_class fp_reg(
 644     R29, R29_H
 645 );
 646 
 647 // Class for link register
 648 reg_class lr_reg(
 649     R30, R30_H
 650 );
 651 
 652 // Class for long sp register
 653 reg_class sp_reg(
 654   R31, R31_H
 655 );
 656 
 657 // Class for all pointer registers
 658 reg_class ptr_reg(
 659     R0, R0_H,
 660     R1, R1_H,
 661     R2, R2_H,
 662     R3, R3_H,
 663     R4, R4_H,
 664     R5, R5_H,
 665     R6, R6_H,
 666     R7, R7_H,
 667     R10, R10_H,
 668     R11, R11_H,
 669     R12, R12_H,
 670     R13, R13_H,
 671     R14, R14_H,
 672     R15, R15_H,
 673     R16, R16_H,
 674     R17, R17_H,
 675     R18, R18_H,
 676     R19, R19_H,
 677     R20, R20_H,
 678     R21, R21_H,
 679     R22, R22_H,
 680     R23, R23_H,
 681     R24, R24_H,
 682     R25, R25_H,
 683     R26, R26_H,
 684     R27, R27_H,
 685     R28, R28_H,
 686     R29, R29_H,
 687     R30, R30_H,
 688     R31, R31_H
 689 );
 690 
 691 // Class for all non_special pointer registers
 692 reg_class no_special_ptr_reg(
 693     R0, R0_H,
 694     R1, R1_H,
 695     R2, R2_H,
 696     R3, R3_H,
 697     R4, R4_H,
 698     R5, R5_H,
 699     R6, R6_H,
 700     R7, R7_H,
 701     R10, R10_H,
 702     R11, R11_H,
 703     R12, R12_H,
 704     R13, R13_H,
 705     R14, R14_H,
 706     R15, R15_H,
 707     R16, R16_H,
 708     R17, R17_H,
 709     R18, R18_H,
 710     R19, R19_H,
 711     R20, R20_H,
 712     R21, R21_H,
 713     R22, R22_H,
 714     R23, R23_H,
 715     R24, R24_H,
 716     R25, R25_H,
 717     R26, R26_H,
 718  /* R27, R27_H, */              // heapbase
 719  /* R28, R28_H, */              // thread
 720  /* R29, R29_H, */              // fp
 721  /* R30, R30_H, */              // lr
 722  /* R31, R31_H */               // sp
 723 );
 724 
 725 // Class for all float registers
 726 reg_class float_reg(
 727     V0,
 728     V1,
 729     V2,
 730     V3,
 731     V4,
 732     V5,
 733     V6,
 734     V7,
 735     V8,
 736     V9,
 737     V10,
 738     V11,
 739     V12,
 740     V13,
 741     V14,
 742     V15,
 743     V16,
 744     V17,
 745     V18,
 746     V19,
 747     V20,
 748     V21,
 749     V22,
 750     V23,
 751     V24,
 752     V25,
 753     V26,
 754     V27,
 755     V28,
 756     V29,
 757     V30,
 758     V31
 759 );
 760 
 761 // Double precision float registers have virtual `high halves' that
 762 // are needed by the allocator.
 763 // Class for all double registers
 764 reg_class double_reg(
 765     V0, V0_H, 
 766     V1, V1_H, 
 767     V2, V2_H, 
 768     V3, V3_H, 
 769     V4, V4_H, 
 770     V5, V5_H, 
 771     V6, V6_H, 
 772     V7, V7_H, 
 773     V8, V8_H, 
 774     V9, V9_H, 
 775     V10, V10_H, 
 776     V11, V11_H, 
 777     V12, V12_H, 
 778     V13, V13_H, 
 779     V14, V14_H, 
 780     V15, V15_H, 
 781     V16, V16_H, 
 782     V17, V17_H, 
 783     V18, V18_H, 
 784     V19, V19_H, 
 785     V20, V20_H, 
 786     V21, V21_H, 
 787     V22, V22_H, 
 788     V23, V23_H, 
 789     V24, V24_H, 
 790     V25, V25_H, 
 791     V26, V26_H, 
 792     V27, V27_H, 
 793     V28, V28_H, 
 794     V29, V29_H, 
 795     V30, V30_H, 
 796     V31, V31_H
 797 );
 798 
 799 // Class for all 64bit vector registers
 800 reg_class vectord_reg(
 801     V0, V0_H,
 802     V1, V1_H,
 803     V2, V2_H,
 804     V3, V3_H,
 805     V4, V4_H,
 806     V5, V5_H,
 807     V6, V6_H,
 808     V7, V7_H,
 809     V8, V8_H,
 810     V9, V9_H,
 811     V10, V10_H,
 812     V11, V11_H,
 813     V12, V12_H,
 814     V13, V13_H,
 815     V14, V14_H,
 816     V15, V15_H,
 817     V16, V16_H,
 818     V17, V17_H,
 819     V18, V18_H,
 820     V19, V19_H,
 821     V20, V20_H,
 822     V21, V21_H,
 823     V22, V22_H,
 824     V23, V23_H,
 825     V24, V24_H,
 826     V25, V25_H,
 827     V26, V26_H,
 828     V27, V27_H,
 829     V28, V28_H,
 830     V29, V29_H,
 831     V30, V30_H,
 832     V31, V31_H
 833 );
 834 
 835 // Class for all 128bit vector registers
 836 reg_class vectorx_reg(
 837     V0, V0_H, V0_J, V0_K,
 838     V1, V1_H, V1_J, V1_K,
 839     V2, V2_H, V2_J, V2_K,
 840     V3, V3_H, V3_J, V3_K,
 841     V4, V4_H, V4_J, V4_K,
 842     V5, V5_H, V5_J, V5_K,
 843     V6, V6_H, V6_J, V6_K,
 844     V7, V7_H, V7_J, V7_K,
 845     V8, V8_H, V8_J, V8_K,
 846     V9, V9_H, V9_J, V9_K,
 847     V10, V10_H, V10_J, V10_K,
 848     V11, V11_H, V11_J, V11_K,
 849     V12, V12_H, V12_J, V12_K,
 850     V13, V13_H, V13_J, V13_K,
 851     V14, V14_H, V14_J, V14_K,
 852     V15, V15_H, V15_J, V15_K,
 853     V16, V16_H, V16_J, V16_K,
 854     V17, V17_H, V17_J, V17_K,
 855     V18, V18_H, V18_J, V18_K,
 856     V19, V19_H, V19_J, V19_K,
 857     V20, V20_H, V20_J, V20_K,
 858     V21, V21_H, V21_J, V21_K,
 859     V22, V22_H, V22_J, V22_K,
 860     V23, V23_H, V23_J, V23_K,
 861     V24, V24_H, V24_J, V24_K,
 862     V25, V25_H, V25_J, V25_K,
 863     V26, V26_H, V26_J, V26_K,
 864     V27, V27_H, V27_J, V27_K,
 865     V28, V28_H, V28_J, V28_K,
 866     V29, V29_H, V29_J, V29_K,
 867     V30, V30_H, V30_J, V30_K,
 868     V31, V31_H, V31_J, V31_K
 869 );
 870 
 871 // Class for 128 bit register v0
 872 reg_class v0_reg(
 873     V0, V0_H
 874 );
 875 
 876 // Class for 128 bit register v1
 877 reg_class v1_reg(
 878     V1, V1_H
 879 );
 880 
 881 // Class for 128 bit register v2
 882 reg_class v2_reg(
 883     V2, V2_H
 884 );
 885 
 886 // Class for 128 bit register v3
 887 reg_class v3_reg(
 888     V3, V3_H
 889 );
 890 
 891 // Singleton class for condition codes
 892 reg_class int_flags(RFLAGS);
 893 
 894 %}
 895 
 896 //----------DEFINITION BLOCK---------------------------------------------------
 897 // Define name --> value mappings to inform the ADLC of an integer valued name
 898 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 899 // Format:
 900 //        int_def  <name>         ( <int_value>, <expression>);
 901 // Generated Code in ad_<arch>.hpp
 902 //        #define  <name>   (<expression>)
 903 //        // value == <int_value>
 904 // Generated code in ad_<arch>.cpp adlc_verification()
 905 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 906 //
 907 
 908 // we follow the ppc-aix port in using a simple cost model which ranks
 909 // register operations as cheap, memory ops as more expensive and
 910 // branches as most expensive. the first two have a low as well as a
 911 // normal cost. huge cost appears to be a way of saying don't do
 912 // something
 913 
 914 definitions %{
 915   // The default cost (of a register move instruction).
 916   int_def INSN_COST            (    100,     100);
 917   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 918   int_def CALL_COST            (    200,     2 * INSN_COST);
 919   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 920 %}
 921 
 922 
 923 //----------SOURCE BLOCK-------------------------------------------------------
 924 // This is a block of C++ code which provides values, functions, and
 925 // definitions necessary in the rest of the architecture description
 926 
 927 source_hpp %{
 928 
 929 #include "gc_implementation/shenandoah/brooksPointer.hpp"
 930 
 931 class CallStubImpl {
 932  
 933   //--------------------------------------------------------------
 934   //---<  Used for optimization in Compile::shorten_branches  >---
 935   //--------------------------------------------------------------
 936 
 937  public:
 938   // Size of call trampoline stub.
 939   static uint size_call_trampoline() {
 940     return 0; // no call trampolines on this platform
 941   }
 942   
 943   // number of relocations needed by a call trampoline stub
 944   static uint reloc_call_trampoline() { 
 945     return 0; // no call trampolines on this platform
 946   }
 947 };
 948 
 949 class HandlerImpl {
 950 
 951  public:
 952 
 953   static int emit_exception_handler(CodeBuffer &cbuf);
 954   static int emit_deopt_handler(CodeBuffer& cbuf);
 955 
 956   static uint size_exception_handler() {
 957     return MacroAssembler::far_branch_size();
 958   }
 959 
 960   static uint size_deopt_handler() {
 961     // count one adr and one far branch instruction
 962     // return 4 * NativeInstruction::instruction_size;
 963     return NativeInstruction::instruction_size + MacroAssembler::far_branch_size();
 964   }
 965 };
 966 
 967   // graph traversal helpers
 968 
 969   MemBarNode *parent_membar(const Node *n);
 970   MemBarNode *child_membar(const MemBarNode *n);
 971   bool leading_membar(const MemBarNode *barrier);
 972 
 973   bool is_card_mark_membar(const MemBarNode *barrier);
 974   bool is_CAS(int opcode);
 975 
 976   MemBarNode *leading_to_normal(MemBarNode *leading);
 977   MemBarNode *normal_to_leading(const MemBarNode *barrier);
 978   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier);
 979   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing);
 980   MemBarNode *trailing_to_leading(const MemBarNode *trailing);
 981 
 982   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
 983 
 984   bool unnecessary_acquire(const Node *barrier);
 985   bool needs_acquiring_load(const Node *load);
 986 
 987   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
 988 
 989   bool unnecessary_release(const Node *barrier);
 990   bool unnecessary_volatile(const Node *barrier);
 991   bool needs_releasing_store(const Node *store);
 992 
 993   // predicate controlling translation of CompareAndSwapX
 994   bool needs_acquiring_load_exclusive(const Node *load);
 995 
 996   // predicate controlling translation of StoreCM
 997   bool unnecessary_storestore(const Node *storecm);
 998 %}
 999 
1000 source %{
1001 
1002   // Optimizaton of volatile gets and puts
1003   // -------------------------------------
1004   //
1005   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1006   // use to implement volatile reads and writes. For a volatile read
1007   // we simply need
1008   //
1009   //   ldar<x>
1010   //
1011   // and for a volatile write we need
1012   //
1013   //   stlr<x>
1014   // 
1015   // Alternatively, we can implement them by pairing a normal
1016   // load/store with a memory barrier. For a volatile read we need
1017   // 
1018   //   ldr<x>
1019   //   dmb ishld
1020   //
1021   // for a volatile write
1022   //
1023   //   dmb ish
1024   //   str<x>
1025   //   dmb ish
1026   //
1027   // We can also use ldaxr and stlxr to implement compare and swap CAS
1028   // sequences. These are normally translated to an instruction
1029   // sequence like the following
1030   //
1031   //   dmb      ish
1032   // retry:
1033   //   ldxr<x>   rval raddr
1034   //   cmp       rval rold
1035   //   b.ne done
1036   //   stlxr<x>  rval, rnew, rold
1037   //   cbnz      rval retry
1038   // done:
1039   //   cset      r0, eq
1040   //   dmb ishld
1041   //
1042   // Note that the exclusive store is already using an stlxr
1043   // instruction. That is required to ensure visibility to other
1044   // threads of the exclusive write (assuming it succeeds) before that
1045   // of any subsequent writes.
1046   //
1047   // The following instruction sequence is an improvement on the above
1048   //
1049   // retry:
1050   //   ldaxr<x>  rval raddr
1051   //   cmp       rval rold
1052   //   b.ne done
1053   //   stlxr<x>  rval, rnew, rold
1054   //   cbnz      rval retry
1055   // done:
1056   //   cset      r0, eq
1057   //
1058   // We don't need the leading dmb ish since the stlxr guarantees
1059   // visibility of prior writes in the case that the swap is
1060   // successful. Crucially we don't have to worry about the case where
1061   // the swap is not successful since no valid program should be
1062   // relying on visibility of prior changes by the attempting thread
1063   // in the case where the CAS fails.
1064   //
1065   // Similarly, we don't need the trailing dmb ishld if we substitute
1066   // an ldaxr instruction since that will provide all the guarantees we
1067   // require regarding observation of changes made by other threads
1068   // before any change to the CAS address observed by the load.
1069   //
1070   // In order to generate the desired instruction sequence we need to
1071   // be able to identify specific 'signature' ideal graph node
1072   // sequences which i) occur as a translation of a volatile reads or
1073   // writes or CAS operations and ii) do not occur through any other
1074   // translation or graph transformation. We can then provide
1075   // alternative aldc matching rules which translate these node
1076   // sequences to the desired machine code sequences. Selection of the
1077   // alternative rules can be implemented by predicates which identify
1078   // the relevant node sequences.
1079   //
1080   // The ideal graph generator translates a volatile read to the node
1081   // sequence
1082   //
1083   //   LoadX[mo_acquire]
1084   //   MemBarAcquire
1085   //
1086   // As a special case when using the compressed oops optimization we
1087   // may also see this variant
1088   //
1089   //   LoadN[mo_acquire]
1090   //   DecodeN
1091   //   MemBarAcquire
1092   //
1093   // A volatile write is translated to the node sequence
1094   //
1095   //   MemBarRelease
1096   //   StoreX[mo_release] {CardMark}-optional
1097   //   MemBarVolatile
1098   //
1099   // n.b. the above node patterns are generated with a strict
1100   // 'signature' configuration of input and output dependencies (see
1101   // the predicates below for exact details). The card mark may be as
1102   // simple as a few extra nodes or, in a few GC configurations, may
1103   // include more complex control flow between the leading and
1104   // trailing memory barriers. However, whatever the card mark
1105   // configuration these signatures are unique to translated volatile
1106   // reads/stores -- they will not appear as a result of any other
1107   // bytecode translation or inlining nor as a consequence of
1108   // optimizing transforms.
1109   //
1110   // We also want to catch inlined unsafe volatile gets and puts and
1111   // be able to implement them using either ldar<x>/stlr<x> or some
1112   // combination of ldr<x>/stlr<x> and dmb instructions.
1113   //
1114   // Inlined unsafe volatiles puts manifest as a minor variant of the
1115   // normal volatile put node sequence containing an extra cpuorder
1116   // membar
1117   //
1118   //   MemBarRelease
1119   //   MemBarCPUOrder
1120   //   StoreX[mo_release] {CardMark}-optional
1121   //   MemBarVolatile
1122   //
1123   // n.b. as an aside, the cpuorder membar is not itself subject to
1124   // matching and translation by adlc rules.  However, the rule
1125   // predicates need to detect its presence in order to correctly
1126   // select the desired adlc rules.
1127   //
1128   // Inlined unsafe volatile gets manifest as a somewhat different
1129   // node sequence to a normal volatile get
1130   //
1131   //   MemBarCPUOrder
1132   //        ||       \\
1133   //   MemBarAcquire LoadX[mo_acquire]
1134   //        ||
1135   //   MemBarCPUOrder
1136   //
1137   // In this case the acquire membar does not directly depend on the
1138   // load. However, we can be sure that the load is generated from an
1139   // inlined unsafe volatile get if we see it dependent on this unique
1140   // sequence of membar nodes. Similarly, given an acquire membar we
1141   // can know that it was added because of an inlined unsafe volatile
1142   // get if it is fed and feeds a cpuorder membar and if its feed
1143   // membar also feeds an acquiring load.
1144   //
1145   // Finally an inlined (Unsafe) CAS operation is translated to the
1146   // following ideal graph
1147   //
1148   //   MemBarRelease
1149   //   MemBarCPUOrder
1150   //   CompareAndSwapX {CardMark}-optional
1151   //   MemBarCPUOrder
1152   //   MemBarAcquire
1153   //
1154   // So, where we can identify these volatile read and write
1155   // signatures we can choose to plant either of the above two code
1156   // sequences. For a volatile read we can simply plant a normal
1157   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1158   // also choose to inhibit translation of the MemBarAcquire and
1159   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1160   //
1161   // When we recognise a volatile store signature we can choose to
1162   // plant at a dmb ish as a translation for the MemBarRelease, a
1163   // normal str<x> and then a dmb ish for the MemBarVolatile.
1164   // Alternatively, we can inhibit translation of the MemBarRelease
1165   // and MemBarVolatile and instead plant a simple stlr<x>
1166   // instruction.
1167   //
1168   // when we recognise a CAS signature we can choose to plant a dmb
1169   // ish as a translation for the MemBarRelease, the conventional
1170   // macro-instruction sequence for the CompareAndSwap node (which
1171   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1172   // Alternatively, we can elide generation of the dmb instructions
1173   // and plant the alternative CompareAndSwap macro-instruction
1174   // sequence (which uses ldaxr<x>).
1175   // 
1176   // Of course, the above only applies when we see these signature
1177   // configurations. We still want to plant dmb instructions in any
1178   // other cases where we may see a MemBarAcquire, MemBarRelease or
1179   // MemBarVolatile. For example, at the end of a constructor which
1180   // writes final/volatile fields we will see a MemBarRelease
1181   // instruction and this needs a 'dmb ish' lest we risk the
1182   // constructed object being visible without making the
1183   // final/volatile field writes visible.
1184   //
1185   // n.b. the translation rules below which rely on detection of the
1186   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1187   // If we see anything other than the signature configurations we
1188   // always just translate the loads and stores to ldr<x> and str<x>
1189   // and translate acquire, release and volatile membars to the
1190   // relevant dmb instructions.
1191   //
1192 
1193   // graph traversal helpers used for volatile put/get and CAS
1194   // optimization
1195 
1196   // 1) general purpose helpers
1197 
1198   // if node n is linked to a parent MemBarNode by an intervening
1199   // Control and Memory ProjNode return the MemBarNode otherwise return
1200   // NULL.
1201   //
1202   // n may only be a Load or a MemBar.
1203 
1204   MemBarNode *parent_membar(const Node *n)
1205   {
1206     Node *ctl = NULL;
1207     Node *mem = NULL;
1208     Node *membar = NULL;
1209 
1210     if (n->is_Load()) {
1211       ctl = n->lookup(LoadNode::Control);
1212       mem = n->lookup(LoadNode::Memory);
1213     } else if (n->is_MemBar()) {
1214       ctl = n->lookup(TypeFunc::Control);
1215       mem = n->lookup(TypeFunc::Memory);
1216     } else {
1217         return NULL;
1218     }
1219 
1220     if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj()) {
1221       return NULL;
1222     }
1223 
1224     membar = ctl->lookup(0);
1225 
1226     if (!membar || !membar->is_MemBar()) {
1227       return NULL;
1228     }
1229 
1230     if (mem->lookup(0) != membar) {
1231       return NULL;
1232     }
1233 
1234     return membar->as_MemBar();
1235   }
1236 
1237   // if n is linked to a child MemBarNode by intervening Control and
1238   // Memory ProjNodes return the MemBarNode otherwise return NULL.
1239 
1240   MemBarNode *child_membar(const MemBarNode *n)
1241   {
1242     ProjNode *ctl = n->proj_out(TypeFunc::Control);
1243     ProjNode *mem = n->proj_out(TypeFunc::Memory);
1244 
1245     // MemBar needs to have both a Ctl and Mem projection
1246     if (! ctl || ! mem)
1247       return NULL;
1248 
1249     MemBarNode *child = NULL;
1250     Node *x;
1251 
1252     for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1253       x = ctl->fast_out(i);
1254       // if we see a membar we keep hold of it. we may also see a new
1255       // arena copy of the original but it will appear later
1256       if (x->is_MemBar()) {
1257           child = x->as_MemBar();
1258           break;
1259       }
1260     }
1261 
1262     if (child == NULL) {
1263       return NULL;
1264     }
1265 
1266     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1267       x = mem->fast_out(i);
1268       // if we see a membar we keep hold of it. we may also see a new
1269       // arena copy of the original but it will appear later
1270       if (x == child) {
1271         return child;
1272       }
1273     }
1274     return NULL;
1275   }
1276 
1277   // helper predicate use to filter candidates for a leading memory
1278   // barrier
1279   //
1280   // returns true if barrier is a MemBarRelease or a MemBarCPUOrder
1281   // whose Ctl and Mem feeds come from a MemBarRelease otherwise false
1282 
1283   bool leading_membar(const MemBarNode *barrier)
1284   {
1285     int opcode = barrier->Opcode();
1286     // if this is a release membar we are ok
1287     if (opcode == Op_MemBarRelease) {
1288       return true;
1289     }
1290     // if its a cpuorder membar . . .
1291     if (opcode != Op_MemBarCPUOrder) {
1292       return false;
1293     }
1294     // then the parent has to be a release membar
1295     MemBarNode *parent = parent_membar(barrier);
1296     if (!parent) {
1297       return false;
1298     }
1299     opcode = parent->Opcode();
1300     return opcode == Op_MemBarRelease;
1301   }
1302  
1303   // 2) card mark detection helper
1304 
1305   // helper predicate which can be used to detect a volatile membar
1306   // introduced as part of a conditional card mark sequence either by
1307   // G1 or by CMS when UseCondCardMark is true.
1308   //
1309   // membar can be definitively determined to be part of a card mark
1310   // sequence if and only if all the following hold
1311   //
1312   // i) it is a MemBarVolatile
1313   //
1314   // ii) either UseG1GC or (UseConcMarkSweepGC && UseCondCardMark) is
1315   // true
1316   //
1317   // iii) the node's Mem projection feeds a StoreCM node.
1318   
1319   bool is_card_mark_membar(const MemBarNode *barrier)
1320   {
1321     if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark)) {
1322       return false;
1323     }
1324 
1325     if (barrier->Opcode() != Op_MemBarVolatile) {
1326       return false;
1327     }
1328 
1329     ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
1330 
1331     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax ; i++) {
1332       Node *y = mem->fast_out(i);
1333       if (y->Opcode() == Op_StoreCM) {
1334         return true;
1335       }
1336     }
1337   
1338     return false;
1339   }
1340 
1341 
1342   // 3) helper predicates to traverse volatile put or CAS graphs which
1343   // may contain GC barrier subgraphs
1344 
1345   // Preamble
1346   // --------
1347   //
1348   // for volatile writes we can omit generating barriers and employ a
1349   // releasing store when we see a node sequence sequence with a
1350   // leading MemBarRelease and a trailing MemBarVolatile as follows
1351   //
1352   //   MemBarRelease
1353   //  {      ||      } -- optional
1354   //  {MemBarCPUOrder}
1355   //         ||     \\
1356   //         ||     StoreX[mo_release]
1357   //         | \     /
1358   //         | MergeMem
1359   //         | /
1360   //   MemBarVolatile
1361   //
1362   // where
1363   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1364   //  | \ and / indicate further routing of the Ctl and Mem feeds
1365   // 
1366   // this is the graph we see for non-object stores. however, for a
1367   // volatile Object store (StoreN/P) we may see other nodes below the
1368   // leading membar because of the need for a GC pre- or post-write
1369   // barrier.
1370   //
1371   // with most GC configurations we with see this simple variant which
1372   // includes a post-write barrier card mark.
1373   //
1374   //   MemBarRelease______________________________
1375   //         ||    \\               Ctl \        \\
1376   //         ||    StoreN/P[mo_release] CastP2X  StoreB/CM
1377   //         | \     /                       . . .  /
1378   //         | MergeMem
1379   //         | /
1380   //         ||      /
1381   //   MemBarVolatile
1382   //
1383   // i.e. the leading membar feeds Ctl to a CastP2X (which converts
1384   // the object address to an int used to compute the card offset) and
1385   // Ctl+Mem to a StoreB node (which does the actual card mark).
1386   //
1387   // n.b. a StoreCM node will only appear in this configuration when
1388   // using CMS. StoreCM differs from a normal card mark write (StoreB)
1389   // because it implies a requirement to order visibility of the card
1390   // mark (StoreCM) relative to the object put (StoreP/N) using a
1391   // StoreStore memory barrier (arguably this ought to be represented
1392   // explicitly in the ideal graph but that is not how it works). This
1393   // ordering is required for both non-volatile and volatile
1394   // puts. Normally that means we need to translate a StoreCM using
1395   // the sequence
1396   //
1397   //   dmb ishst
1398   //   stlrb
1399   //
1400   // However, in the case of a volatile put if we can recognise this
1401   // configuration and plant an stlr for the object write then we can
1402   // omit the dmb and just plant an strb since visibility of the stlr
1403   // is ordered before visibility of subsequent stores. StoreCM nodes
1404   // also arise when using G1 or using CMS with conditional card
1405   // marking. In these cases (as we shall see) we don't need to insert
1406   // the dmb when translating StoreCM because there is already an
1407   // intervening StoreLoad barrier between it and the StoreP/N.
1408   //
1409   // It is also possible to perform the card mark conditionally on it
1410   // currently being unmarked in which case the volatile put graph
1411   // will look slightly different
1412   //
1413   //   MemBarRelease____________________________________________
1414   //         ||    \\               Ctl \     Ctl \     \\  Mem \
1415   //         ||    StoreN/P[mo_release] CastP2X   If   LoadB     |
1416   //         | \     /                              \            |
1417   //         | MergeMem                            . . .      StoreB
1418   //         | /                                                /
1419   //         ||     /
1420   //   MemBarVolatile
1421   //
1422   // It is worth noting at this stage that both the above
1423   // configurations can be uniquely identified by checking that the
1424   // memory flow includes the following subgraph:
1425   //
1426   //   MemBarRelease
1427   //  {MemBarCPUOrder}
1428   //          |  \      . . .
1429   //          |  StoreX[mo_release]  . . .
1430   //          |   /
1431   //         MergeMem
1432   //          |
1433   //   MemBarVolatile
1434   //
1435   // This is referred to as a *normal* subgraph. It can easily be
1436   // detected starting from any candidate MemBarRelease,
1437   // StoreX[mo_release] or MemBarVolatile.
1438   //
1439   // A simple variation on this normal case occurs for an unsafe CAS
1440   // operation. The basic graph for a non-object CAS is
1441   //
1442   //   MemBarRelease
1443   //         ||
1444   //   MemBarCPUOrder
1445   //         ||     \\   . . .
1446   //         ||     CompareAndSwapX
1447   //         ||       |
1448   //         ||     SCMemProj
1449   //         | \     /
1450   //         | MergeMem
1451   //         | /
1452   //   MemBarCPUOrder
1453   //         ||
1454   //   MemBarAcquire
1455   //
1456   // The same basic variations on this arrangement (mutatis mutandis)
1457   // occur when a card mark is introduced. i.e. we se the same basic
1458   // shape but the StoreP/N is replaced with CompareAndSawpP/N and the
1459   // tail of the graph is a pair comprising a MemBarCPUOrder +
1460   // MemBarAcquire.
1461   //
1462   // So, in the case of a CAS the normal graph has the variant form
1463   //
1464   //   MemBarRelease
1465   //   MemBarCPUOrder
1466   //          |   \      . . .
1467   //          |  CompareAndSwapX  . . .
1468   //          |    |
1469   //          |   SCMemProj
1470   //          |   /  . . .
1471   //         MergeMem
1472   //          |
1473   //   MemBarCPUOrder
1474   //   MemBarAcquire
1475   //
1476   // This graph can also easily be detected starting from any
1477   // candidate MemBarRelease, CompareAndSwapX or MemBarAcquire.
1478   //
1479   // the code below uses two helper predicates, leading_to_normal and
1480   // normal_to_leading to identify these normal graphs, one validating
1481   // the layout starting from the top membar and searching down and
1482   // the other validating the layout starting from the lower membar
1483   // and searching up.
1484   //
1485   // There are two special case GC configurations when a normal graph
1486   // may not be generated: when using G1 (which always employs a
1487   // conditional card mark); and when using CMS with conditional card
1488   // marking configured. These GCs are both concurrent rather than
1489   // stop-the world GCs. So they introduce extra Ctl+Mem flow into the
1490   // graph between the leading and trailing membar nodes, in
1491   // particular enforcing stronger memory serialisation beween the
1492   // object put and the corresponding conditional card mark. CMS
1493   // employs a post-write GC barrier while G1 employs both a pre- and
1494   // post-write GC barrier. Of course the extra nodes may be absent --
1495   // they are only inserted for object puts. This significantly
1496   // complicates the task of identifying whether a MemBarRelease,
1497   // StoreX[mo_release] or MemBarVolatile forms part of a volatile put
1498   // when using these GC configurations (see below). It adds similar
1499   // complexity to the task of identifying whether a MemBarRelease,
1500   // CompareAndSwapX or MemBarAcquire forms part of a CAS.
1501   //
1502   // In both cases the post-write subtree includes an auxiliary
1503   // MemBarVolatile (StoreLoad barrier) separating the object put and
1504   // the read of the corresponding card. This poses two additional
1505   // problems.
1506   //
1507   // Firstly, a card mark MemBarVolatile needs to be distinguished
1508   // from a normal trailing MemBarVolatile. Resolving this first
1509   // problem is straightforward: a card mark MemBarVolatile always
1510   // projects a Mem feed to a StoreCM node and that is a unique marker
1511   //
1512   //      MemBarVolatile (card mark)
1513   //       C |    \     . . .
1514   //         |   StoreCM   . . .
1515   //       . . .
1516   //
1517   // The second problem is how the code generator is to translate the
1518   // card mark barrier? It always needs to be translated to a "dmb
1519   // ish" instruction whether or not it occurs as part of a volatile
1520   // put. A StoreLoad barrier is needed after the object put to ensure
1521   // i) visibility to GC threads of the object put and ii) visibility
1522   // to the mutator thread of any card clearing write by a GC
1523   // thread. Clearly a normal store (str) will not guarantee this
1524   // ordering but neither will a releasing store (stlr). The latter
1525   // guarantees that the object put is visible but does not guarantee
1526   // that writes by other threads have also been observed.
1527   // 
1528   // So, returning to the task of translating the object put and the
1529   // leading/trailing membar nodes: what do the non-normal node graph
1530   // look like for these 2 special cases? and how can we determine the
1531   // status of a MemBarRelease, StoreX[mo_release] or MemBarVolatile
1532   // in both normal and non-normal cases?
1533   //
1534   // A CMS GC post-barrier wraps its card write (StoreCM) inside an If
1535   // which selects conditonal execution based on the value loaded
1536   // (LoadB) from the card. Ctl and Mem are fed to the If via an
1537   // intervening StoreLoad barrier (MemBarVolatile).
1538   //
1539   // So, with CMS we may see a node graph for a volatile object store
1540   // which looks like this
1541   //
1542   //   MemBarRelease
1543   //   MemBarCPUOrder_(leading)__________________
1544   //     C |    M \       \\                   C \
1545   //       |       \    StoreN/P[mo_release]  CastP2X
1546   //       |    Bot \    /
1547   //       |       MergeMem
1548   //       |         /
1549   //      MemBarVolatile (card mark)
1550   //     C |  ||    M |
1551   //       | LoadB    |
1552   //       |   |      |
1553   //       | Cmp      |\
1554   //       | /        | \
1555   //       If         |  \
1556   //       | \        |   \
1557   // IfFalse  IfTrue  |    \
1558   //       \     / \  |     \
1559   //        \   / StoreCM    |
1560   //         \ /      |      |
1561   //        Region   . . .   |
1562   //          | \           /
1563   //          |  . . .  \  / Bot
1564   //          |       MergeMem
1565   //          |          |
1566   //        MemBarVolatile (trailing)
1567   //
1568   // The first MergeMem merges the AliasIdxBot Mem slice from the
1569   // leading membar and the oopptr Mem slice from the Store into the
1570   // card mark membar. The trailing MergeMem merges the AliasIdxBot
1571   // Mem slice from the card mark membar and the AliasIdxRaw slice
1572   // from the StoreCM into the trailing membar (n.b. the latter
1573   // proceeds via a Phi associated with the If region).
1574   //
1575   // The graph for a CAS varies slightly, the obvious difference being
1576   // that the StoreN/P node is replaced by a CompareAndSwapP/N node
1577   // and the trailing MemBarVolatile by a MemBarCPUOrder +
1578   // MemBarAcquire pair. The other important difference is that the
1579   // CompareAndSwap node's SCMemProj is not merged into the card mark
1580   // membar - it still feeds the trailing MergeMem. This also means
1581   // that the card mark membar receives its Mem feed directly from the
1582   // leading membar rather than via a MergeMem.
1583   //
1584   //   MemBarRelease
1585   //   MemBarCPUOrder__(leading)_________________________
1586   //       ||                       \\                 C \
1587   //   MemBarVolatile (card mark)  CompareAndSwapN/P  CastP2X
1588   //     C |  ||    M |              |
1589   //       | LoadB    |       ______/|
1590   //       |   |      |      /       |
1591   //       | Cmp      |     /      SCMemProj
1592   //       | /        |    /         |
1593   //       If         |   /         /
1594   //       | \        |  /         /
1595   // IfFalse  IfTrue  | /         /
1596   //       \     / \  |/ prec    /
1597   //        \   / StoreCM       /
1598   //         \ /      |        /
1599   //        Region   . . .    /
1600   //          | \            /
1601   //          |  . . .  \   / Bot
1602   //          |       MergeMem
1603   //          |          |
1604   //        MemBarCPUOrder
1605   //        MemBarAcquire (trailing)
1606   //
1607   // This has a slightly different memory subgraph to the one seen
1608   // previously but the core of it is the same as for the CAS normal
1609   // sungraph
1610   //
1611   //   MemBarRelease
1612   //   MemBarCPUOrder____
1613   //      ||             \      . . .
1614   //   MemBarVolatile  CompareAndSwapX  . . .
1615   //      |  \            |
1616   //        . . .   SCMemProj
1617   //          |     /  . . .
1618   //         MergeMem
1619   //          |
1620   //   MemBarCPUOrder
1621   //   MemBarAcquire
1622   //
1623   //
1624   // G1 is quite a lot more complicated. The nodes inserted on behalf
1625   // of G1 may comprise: a pre-write graph which adds the old value to
1626   // the SATB queue; the releasing store itself; and, finally, a
1627   // post-write graph which performs a card mark.
1628   //
1629   // The pre-write graph may be omitted, but only when the put is
1630   // writing to a newly allocated (young gen) object and then only if
1631   // there is a direct memory chain to the Initialize node for the
1632   // object allocation. This will not happen for a volatile put since
1633   // any memory chain passes through the leading membar.
1634   //
1635   // The pre-write graph includes a series of 3 If tests. The outermost
1636   // If tests whether SATB is enabled (no else case). The next If tests
1637   // whether the old value is non-NULL (no else case). The third tests
1638   // whether the SATB queue index is > 0, if so updating the queue. The
1639   // else case for this third If calls out to the runtime to allocate a
1640   // new queue buffer.
1641   //
1642   // So with G1 the pre-write and releasing store subgraph looks like
1643   // this (the nested Ifs are omitted).
1644   //
1645   //  MemBarRelease (leading)____________
1646   //     C |  ||  M \   M \    M \  M \ . . .
1647   //       | LoadB   \  LoadL  LoadN   \
1648   //       | /        \                 \
1649   //       If         |\                 \
1650   //       | \        | \                 \
1651   //  IfFalse  IfTrue |  \                 \
1652   //       |     |    |   \                 |
1653   //       |     If   |   /\                |
1654   //       |     |          \               |
1655   //       |                 \              |
1656   //       |    . . .         \             |
1657   //       | /       | /       |            |
1658   //      Region  Phi[M]       |            |
1659   //       | \       |         |            |
1660   //       |  \_____ | ___     |            |
1661   //     C | C \     |   C \ M |            |
1662   //       | CastP2X | StoreN/P[mo_release] |
1663   //       |         |         |            |
1664   //     C |       M |       M |          M |
1665   //        \        |         |           /
1666   //                  . . . 
1667   //          (post write subtree elided)
1668   //                    . . .
1669   //             C \         M /
1670   //         MemBarVolatile (trailing)
1671   //
1672   // n.b. the LoadB in this subgraph is not the card read -- it's a
1673   // read of the SATB queue active flag.
1674   //
1675   // Once again the CAS graph is a minor variant on the above with the
1676   // expected substitutions of CompareAndSawpX for StoreN/P and
1677   // MemBarCPUOrder + MemBarAcquire for trailing MemBarVolatile.
1678   //
1679   // The G1 post-write subtree is also optional, this time when the
1680   // new value being written is either null or can be identified as a
1681   // newly allocated (young gen) object with no intervening control
1682   // flow. The latter cannot happen but the former may, in which case
1683   // the card mark membar is omitted and the memory feeds form the
1684   // leading membar and the SToreN/P are merged direct into the
1685   // trailing membar as per the normal subgraph. So, the only special
1686   // case which arises is when the post-write subgraph is generated.
1687   //
1688   // The kernel of the post-write G1 subgraph is the card mark itself
1689   // which includes a card mark memory barrier (MemBarVolatile), a
1690   // card test (LoadB), and a conditional update (If feeding a
1691   // StoreCM). These nodes are surrounded by a series of nested Ifs
1692   // which try to avoid doing the card mark. The top level If skips if
1693   // the object reference does not cross regions (i.e. it tests if
1694   // (adr ^ val) >> log2(regsize) != 0) -- intra-region references
1695   // need not be recorded. The next If, which skips on a NULL value,
1696   // may be absent (it is not generated if the type of value is >=
1697   // OopPtr::NotNull). The 3rd If skips writes to young regions (by
1698   // checking if card_val != young).  n.b. although this test requires
1699   // a pre-read of the card it can safely be done before the StoreLoad
1700   // barrier. However that does not bypass the need to reread the card
1701   // after the barrier.
1702   //
1703   //                (pre-write subtree elided)
1704   //        . . .                  . . .    . . .  . . .
1705   //        C |                    M |     M |    M |
1706   //       Region                  Phi[M] StoreN    |
1707   //          |                     / \      |      |
1708   //         / \_______            /   \     |      |
1709   //      C / C \      . . .            \    |      |
1710   //       If   CastP2X . . .            |   |      |
1711   //       / \                           |   |      |
1712   //      /   \                          |   |      |
1713   // IfFalse IfTrue                      |   |      |
1714   //   |       |                         |   |     /|
1715   //   |       If                        |   |    / |
1716   //   |      / \                        |   |   /  |
1717   //   |     /   \                        \  |  /   |
1718   //   | IfFalse IfTrue                   MergeMem  |
1719   //   |  . . .    / \                       /      |
1720   //   |          /   \                     /       |
1721   //   |     IfFalse IfTrue                /        |
1722   //   |      . . .    |                  /         |
1723   //   |               If                /          |
1724   //   |               / \              /           |
1725   //   |              /   \            /            |
1726   //   |         IfFalse IfTrue       /             |
1727   //   |           . . .   |         /              |
1728   //   |                    \       /               |
1729   //   |                     \     /                |
1730   //   |             MemBarVolatile__(card mark)    |
1731   //   |                ||   C |  M \  M \          |
1732   //   |               LoadB   If    |    |         |
1733   //   |                      / \    |    |         |
1734   //   |                     . . .   |    |         |
1735   //   |                          \  |    |        /
1736   //   |                        StoreCM   |       /
1737   //   |                          . . .   |      /
1738   //   |                        _________/      /
1739   //   |                       /  _____________/
1740   //   |   . . .       . . .  |  /            /
1741   //   |    |                 | /   _________/
1742   //   |    |               Phi[M] /        /
1743   //   |    |                 |   /        /
1744   //   |    |                 |  /        /
1745   //   |  Region  . . .     Phi[M]  _____/
1746   //   |    /                 |    /
1747   //   |                      |   /   
1748   //   | . . .   . . .        |  /
1749   //   | /                    | /
1750   // Region           |  |  Phi[M]
1751   //   |              |  |  / Bot
1752   //    \            MergeMem 
1753   //     \            /
1754   //     MemBarVolatile
1755   //
1756   // As with CMS the initial MergeMem merges the AliasIdxBot Mem slice
1757   // from the leading membar and the oopptr Mem slice from the Store
1758   // into the card mark membar i.e. the memory flow to the card mark
1759   // membar still looks like a normal graph.
1760   //
1761   // The trailing MergeMem merges an AliasIdxBot Mem slice with other
1762   // Mem slices (from the StoreCM and other card mark queue stores).
1763   // However in this case the AliasIdxBot Mem slice does not come
1764   // direct from the card mark membar. It is merged through a series
1765   // of Phi nodes. These are needed to merge the AliasIdxBot Mem flow
1766   // from the leading membar with the Mem feed from the card mark
1767   // membar. Each Phi corresponds to one of the Ifs which may skip
1768   // around the card mark membar. So when the If implementing the NULL
1769   // value check has been elided the total number of Phis is 2
1770   // otherwise it is 3.
1771   //
1772   // The CAS graph when using G1GC also includes a pre-write subgraph
1773   // and an optional post-write subgraph. Teh sam evarioations are
1774   // introduced as for CMS with conditional card marking i.e. the
1775   // StoreP/N is swapped for a CompareAndSwapP/N, the tariling
1776   // MemBarVolatile for a MemBarCPUOrder + MemBarAcquire pair and the
1777   // Mem feed from the CompareAndSwapP/N includes a precedence
1778   // dependency feed to the StoreCM and a feed via an SCMemProj to the
1779   // trailing membar. So, as before the configuration includes the
1780   // normal CAS graph as a subgraph of the memory flow.
1781   //
1782   // So, the upshot is that in all cases the volatile put graph will
1783   // include a *normal* memory subgraph betwen the leading membar and
1784   // its child membar, either a volatile put graph (including a
1785   // releasing StoreX) or a CAS graph (including a CompareAndSwapX).
1786   // When that child is not a card mark membar then it marks the end
1787   // of the volatile put or CAS subgraph. If the child is a card mark
1788   // membar then the normal subgraph will form part of a volatile put
1789   // subgraph if and only if the child feeds an AliasIdxBot Mem feed
1790   // to a trailing barrier via a MergeMem. That feed is either direct
1791   // (for CMS) or via 2 or 3 Phi nodes merging the leading barrier
1792   // memory flow (for G1).
1793   // 
1794   // The predicates controlling generation of instructions for store
1795   // and barrier nodes employ a few simple helper functions (described
1796   // below) which identify the presence or absence of all these
1797   // subgraph configurations and provide a means of traversing from
1798   // one node in the subgraph to another.
1799 
1800   // is_CAS(int opcode)
1801   //
1802   // return true if opcode is one of the possible CompareAndSwapX
1803   // values otherwise false.
1804 
1805   bool is_CAS(int opcode)
1806   {
1807     return (opcode == Op_CompareAndSwapI ||
1808             opcode == Op_CompareAndSwapL ||
1809             opcode == Op_CompareAndSwapN ||
1810             opcode == Op_CompareAndSwapP);
1811   }
1812 
1813   // leading_to_normal
1814   //
1815   //graph traversal helper which detects the normal case Mem feed from
1816   // a release membar (or, optionally, its cpuorder child) to a
1817   // dependent volatile membar i.e. it ensures that one or other of
1818   // the following Mem flow subgraph is present.
1819   //
1820   //   MemBarRelease
1821   //   MemBarCPUOrder {leading}
1822   //          |  \      . . .
1823   //          |  StoreN/P[mo_release]  . . .
1824   //          |   /
1825   //         MergeMem
1826   //          |
1827   //   MemBarVolatile {trailing or card mark}
1828   //
1829   //   MemBarRelease
1830   //   MemBarCPUOrder {leading}
1831   //      |       \      . . .
1832   //      |     CompareAndSwapX  . . .
1833   //               |
1834   //     . . .    SCMemProj
1835   //           \   |
1836   //      |    MergeMem
1837   //      |       /
1838   //    MemBarCPUOrder
1839   //    MemBarAcquire {trailing}
1840   //
1841   // if the correct configuration is present returns the trailing
1842   // membar otherwise NULL.
1843   //
1844   // the input membar is expected to be either a cpuorder membar or a
1845   // release membar. in the latter case it should not have a cpu membar
1846   // child.
1847   //
1848   // the returned value may be a card mark or trailing membar
1849   //
1850 
1851   MemBarNode *leading_to_normal(MemBarNode *leading)
1852   {
1853     assert((leading->Opcode() == Op_MemBarRelease ||
1854             leading->Opcode() == Op_MemBarCPUOrder),
1855            "expecting a volatile or cpuroder membar!");
1856 
1857     // check the mem flow
1858     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
1859 
1860     if (!mem) {
1861       return NULL;
1862     }
1863 
1864     Node *x = NULL;
1865     StoreNode * st = NULL;
1866     LoadStoreNode *cas = NULL;
1867     MergeMemNode *mm = NULL;
1868 
1869     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1870       x = mem->fast_out(i);
1871       if (x->is_MergeMem()) {
1872         if (mm != NULL) {
1873           return NULL;
1874         }
1875         // two merge mems is one too many
1876         mm = x->as_MergeMem();
1877       } else if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
1878         // two releasing stores/CAS nodes is one too many
1879         if (st != NULL || cas != NULL) {
1880           return NULL;
1881         }
1882         st = x->as_Store();
1883       } else if (is_CAS(x->Opcode())) {
1884         if (st != NULL || cas != NULL) {
1885           return NULL;
1886         }
1887         cas = x->as_LoadStore();
1888       }
1889     }
1890 
1891     // must have a store or a cas
1892     if (!st && !cas) {
1893       return NULL;
1894     }
1895 
1896     // must have a merge if we also have st
1897     if (st && !mm) {
1898       return NULL;
1899     }
1900 
1901     Node *y = NULL;
1902     if (cas) {
1903       // look for an SCMemProj
1904       for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) {
1905         x = cas->fast_out(i);
1906         if (x->is_Proj()) {
1907           y = x;
1908           break;
1909         }
1910       }
1911       if (y == NULL) {
1912         return NULL;
1913       }
1914       // the proj must feed a MergeMem
1915       for (DUIterator_Fast imax, i = y->fast_outs(imax); i < imax; i++) {
1916         x = y->fast_out(i);
1917         if (x->is_MergeMem()) {
1918           mm = x->as_MergeMem();
1919           break;
1920         }
1921       }
1922       if (mm == NULL)
1923         return NULL;
1924     } else {
1925       // ensure the store feeds the existing mergemem;
1926       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
1927         if (st->fast_out(i) == mm) {
1928           y = st;
1929           break;
1930         }
1931       }
1932       if (y == NULL) {
1933         return NULL;
1934       }
1935     }
1936 
1937     MemBarNode *mbar = NULL;
1938     // ensure the merge feeds to the expected type of membar
1939     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
1940       x = mm->fast_out(i);
1941       if (x->is_MemBar()) {
1942         int opcode = x->Opcode();
1943         if (opcode == Op_MemBarVolatile && st) {
1944           mbar = x->as_MemBar();
1945         } else if (cas && opcode == Op_MemBarCPUOrder) {
1946           MemBarNode *y =  x->as_MemBar();
1947           y = child_membar(y);
1948           if (y != NULL && y->Opcode() == Op_MemBarAcquire) {
1949             mbar = y;
1950           }
1951         }
1952         break;
1953       }
1954     }
1955 
1956     return mbar;
1957   }
1958 
1959   // normal_to_leading
1960   //
1961   // graph traversal helper which detects the normal case Mem feed
1962   // from either a card mark or a trailing membar to a preceding
1963   // release membar (optionally its cpuorder child) i.e. it ensures
1964   // that one or other of the following Mem flow subgraphs is present.
1965   //
1966   //   MemBarRelease
1967   //   MemBarCPUOrder {leading}
1968   //          |  \      . . .
1969   //          |  StoreN/P[mo_release]  . . .
1970   //          |   /
1971   //         MergeMem
1972   //          |
1973   //   MemBarVolatile {card mark or trailing}
1974   //
1975   //   MemBarRelease
1976   //   MemBarCPUOrder {leading}
1977   //      |       \      . . .
1978   //      |     CompareAndSwapX  . . .
1979   //               |
1980   //     . . .    SCMemProj
1981   //           \   |
1982   //      |    MergeMem
1983   //      |        /
1984   //    MemBarCPUOrder
1985   //    MemBarAcquire {trailing}
1986   //
1987   // this predicate checks for the same flow as the previous predicate
1988   // but starting from the bottom rather than the top.
1989   //
1990   // if the configuration is present returns the cpuorder member for
1991   // preference or when absent the release membar otherwise NULL.
1992   //
1993   // n.b. the input membar is expected to be a MemBarVolatile but
1994   // need not be a card mark membar.
1995 
1996   MemBarNode *normal_to_leading(const MemBarNode *barrier)
1997   {
1998     // input must be a volatile membar
1999     assert((barrier->Opcode() == Op_MemBarVolatile ||
2000             barrier->Opcode() == Op_MemBarAcquire),
2001            "expecting a volatile or an acquire membar");
2002     Node *x;
2003     bool is_cas = barrier->Opcode() == Op_MemBarAcquire;
2004 
2005     // if we have an acquire membar then it must be fed via a CPUOrder
2006     // membar
2007 
2008     if (is_cas) {
2009       // skip to parent barrier which must be a cpuorder
2010       x = parent_membar(barrier);
2011       if (x->Opcode() != Op_MemBarCPUOrder)
2012         return NULL;
2013     } else {
2014       // start from the supplied barrier
2015       x = (Node *)barrier;
2016     }
2017 
2018     // the Mem feed to the membar should be a merge
2019     x = x ->in(TypeFunc::Memory);
2020     if (!x->is_MergeMem())
2021       return NULL;
2022 
2023     MergeMemNode *mm = x->as_MergeMem();
2024 
2025     if (is_cas) {
2026       // the merge should be fed from the CAS via an SCMemProj node
2027       x = NULL;
2028       for (uint idx = 1; idx < mm->req(); idx++) {
2029         if (mm->in(idx)->Opcode() == Op_SCMemProj) {
2030           x = mm->in(idx);
2031           break;
2032         }
2033       }
2034       if (x == NULL) {
2035         return NULL;
2036       }
2037       // check for a CAS feeding this proj
2038       x = x->in(0);
2039       int opcode = x->Opcode();
2040       if (!is_CAS(opcode)) {
2041         return NULL;
2042       }
2043       // the CAS should get its mem feed from the leading membar
2044       x = x->in(MemNode::Memory);
2045     } else {
2046       // the merge should get its Bottom mem feed from the leading membar
2047       x = mm->in(Compile::AliasIdxBot);      
2048     } 
2049 
2050     // ensure this is a non control projection
2051     if (!x->is_Proj() || x->is_CFG()) {
2052       return NULL;
2053     }
2054     // if it is fed by a membar that's the one we want
2055     x = x->in(0);
2056 
2057     if (!x->is_MemBar()) {
2058       return NULL;
2059     }
2060 
2061     MemBarNode *leading = x->as_MemBar();
2062     // reject invalid candidates
2063     if (!leading_membar(leading)) {
2064       return NULL;
2065     }
2066 
2067     // ok, we have a leading membar, now for the sanity clauses
2068 
2069     // the leading membar must feed Mem to a releasing store or CAS
2070     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
2071     StoreNode *st = NULL;
2072     LoadStoreNode *cas = NULL;
2073     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2074       x = mem->fast_out(i);
2075       if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2076         // two stores or CASes is one too many
2077         if (st != NULL || cas != NULL) {
2078           return NULL;
2079         }
2080         st = x->as_Store();
2081       } else if (is_CAS(x->Opcode())) {
2082         if (st != NULL || cas != NULL) {
2083           return NULL;
2084         }
2085         cas = x->as_LoadStore();
2086       }
2087     }
2088 
2089     // we should not have both a store and a cas
2090     if (st == NULL & cas == NULL) {
2091       return NULL;
2092     }
2093 
2094     if (st == NULL) {
2095       // nothing more to check
2096       return leading;
2097     } else {
2098       // we should not have a store if we started from an acquire
2099       if (is_cas) {
2100         return NULL;
2101       }
2102 
2103       // the store should feed the merge we used to get here
2104       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2105         if (st->fast_out(i) == mm) {
2106           return leading;
2107         }
2108       }
2109     }
2110 
2111     return NULL;
2112   }
2113 
2114   // card_mark_to_trailing
2115   //
2116   // graph traversal helper which detects extra, non-normal Mem feed
2117   // from a card mark volatile membar to a trailing membar i.e. it
2118   // ensures that one of the following three GC post-write Mem flow
2119   // subgraphs is present.
2120   //
2121   // 1)
2122   //     . . .
2123   //       |
2124   //   MemBarVolatile (card mark)
2125   //      |          |     
2126   //      |        StoreCM
2127   //      |          |
2128   //      |        . . .
2129   //  Bot |  / 
2130   //   MergeMem 
2131   //      |
2132   //      |
2133   //    MemBarVolatile {trailing}
2134   //
2135   // 2)
2136   //   MemBarRelease/CPUOrder (leading)
2137   //    |
2138   //    | 
2139   //    |\       . . .
2140   //    | \        | 
2141   //    |  \  MemBarVolatile (card mark) 
2142   //    |   \   |     |
2143   //     \   \  |   StoreCM    . . .
2144   //      \   \ |
2145   //       \  Phi
2146   //        \ /
2147   //        Phi  . . .
2148   //     Bot |   /
2149   //       MergeMem
2150   //         |
2151   //    MemBarVolatile {trailing}
2152   //
2153   //
2154   // 3)
2155   //   MemBarRelease/CPUOrder (leading)
2156   //    |
2157   //    |\
2158   //    | \
2159   //    |  \      . . .
2160   //    |   \       |
2161   //    |\   \  MemBarVolatile (card mark)
2162   //    | \   \   |     |
2163   //    |  \   \  |   StoreCM    . . .
2164   //    |   \   \ |
2165   //     \   \  Phi
2166   //      \   \ /  
2167   //       \  Phi
2168   //        \ /
2169   //        Phi  . . .
2170   //     Bot |   /
2171   //       MergeMem
2172   //         |
2173   //         |
2174   //    MemBarVolatile {trailing}
2175   //
2176   // configuration 1 is only valid if UseConcMarkSweepGC &&
2177   // UseCondCardMark
2178   //
2179   // configurations 2 and 3 are only valid if UseG1GC.
2180   //
2181   // if a valid configuration is present returns the trailing membar
2182   // otherwise NULL.
2183   //
2184   // n.b. the supplied membar is expected to be a card mark
2185   // MemBarVolatile i.e. the caller must ensure the input node has the
2186   // correct operand and feeds Mem to a StoreCM node
2187 
2188   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier)
2189   {
2190     // input must be a card mark volatile membar
2191     assert(is_card_mark_membar(barrier), "expecting a card mark membar");
2192 
2193     Node *feed = barrier->proj_out(TypeFunc::Memory);
2194     Node *x;
2195     MergeMemNode *mm = NULL;
2196 
2197     const int MAX_PHIS = 3;     // max phis we will search through
2198     int phicount = 0;           // current search count
2199 
2200     bool retry_feed = true;
2201     while (retry_feed) {
2202       // see if we have a direct MergeMem feed
2203       for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
2204         x = feed->fast_out(i);
2205         // the correct Phi will be merging a Bot memory slice
2206         if (x->is_MergeMem()) {
2207           mm = x->as_MergeMem();
2208           break;
2209         }
2210       }
2211       if (mm) {
2212         retry_feed = false;
2213       } else if (UseG1GC & phicount++ < MAX_PHIS) {
2214         // the barrier may feed indirectly via one or two Phi nodes
2215         PhiNode *phi = NULL;
2216         for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
2217           x = feed->fast_out(i);
2218           // the correct Phi will be merging a Bot memory slice
2219           if (x->is_Phi() && x->adr_type() == TypePtr::BOTTOM) {
2220             phi = x->as_Phi();
2221             break;
2222           }
2223         }
2224         if (!phi) {
2225           return NULL;
2226         }
2227         // look for another merge below this phi
2228         feed = phi;
2229       } else {
2230         // couldn't find a merge
2231         return NULL;
2232       }
2233     }
2234 
2235     // sanity check this feed turns up as the expected slice
2236     assert(mm->as_MergeMem()->in(Compile::AliasIdxBot) == feed, "expecting membar to feed AliasIdxBot slice to Merge");
2237 
2238     MemBarNode *trailing = NULL;
2239     // be sure we have a trailing membar the merge
2240     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2241       x = mm->fast_out(i);
2242       if (x->is_MemBar() && x->Opcode() == Op_MemBarVolatile) {
2243         trailing = x->as_MemBar();
2244         break;
2245       }
2246     }
2247 
2248     return trailing;
2249   }
2250 
2251   // trailing_to_card_mark
2252   //
2253   // graph traversal helper which detects extra, non-normal Mem feed
2254   // from a trailing volatile membar to a preceding card mark volatile
2255   // membar i.e. it identifies whether one of the three possible extra
2256   // GC post-write Mem flow subgraphs is present
2257   //
2258   // this predicate checks for the same flow as the previous predicate
2259   // but starting from the bottom rather than the top.
2260   //
2261   // if the configuration is present returns the card mark membar
2262   // otherwise NULL
2263   //
2264   // n.b. the supplied membar is expected to be a trailing
2265   // MemBarVolatile i.e. the caller must ensure the input node has the
2266   // correct opcode
2267 
2268   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing)
2269   {
2270     assert(trailing->Opcode() == Op_MemBarVolatile,
2271            "expecting a volatile membar");
2272     assert(!is_card_mark_membar(trailing),
2273            "not expecting a card mark membar");
2274 
2275     // the Mem feed to the membar should be a merge
2276     Node *x = trailing->in(TypeFunc::Memory);
2277     if (!x->is_MergeMem()) {
2278       return NULL;
2279     }
2280 
2281     MergeMemNode *mm = x->as_MergeMem();
2282 
2283     x = mm->in(Compile::AliasIdxBot);
2284     // with G1 we may possibly see a Phi or two before we see a Memory
2285     // Proj from the card mark membar
2286 
2287     const int MAX_PHIS = 3;     // max phis we will search through
2288     int phicount = 0;           // current search count
2289 
2290     bool retry_feed = !x->is_Proj();
2291 
2292     while (retry_feed) {
2293       if (UseG1GC && x->is_Phi() && phicount++ < MAX_PHIS) {
2294         PhiNode *phi = x->as_Phi();
2295         ProjNode *proj = NULL;
2296         PhiNode *nextphi = NULL;
2297         bool found_leading = false;
2298         for (uint i = 1; i < phi->req(); i++) {
2299           x = phi->in(i);
2300           if (x->is_Phi()) {
2301             nextphi = x->as_Phi();
2302           } else if (x->is_Proj()) {
2303             int opcode = x->in(0)->Opcode();
2304             if (opcode == Op_MemBarVolatile) {
2305               proj = x->as_Proj();
2306             } else if (opcode == Op_MemBarRelease ||
2307                        opcode == Op_MemBarCPUOrder) {
2308               // probably a leading membar
2309               found_leading = true;
2310             }
2311           }
2312         }
2313         // if we found a correct looking proj then retry from there
2314         // otherwise we must see a leading and a phi or this the
2315         // wrong config
2316         if (proj != NULL) {
2317           x = proj;
2318           retry_feed = false;
2319         } else if (found_leading && nextphi != NULL) {
2320           // retry from this phi to check phi2
2321           x = nextphi;
2322         } else {
2323           // not what we were looking for
2324           return NULL;
2325         }
2326       } else {
2327         return NULL;
2328       }
2329     }
2330     // the proj has to come from the card mark membar
2331     x = x->in(0);
2332     if (!x->is_MemBar()) {
2333       return NULL;
2334     }
2335 
2336     MemBarNode *card_mark_membar = x->as_MemBar();
2337 
2338     if (!is_card_mark_membar(card_mark_membar)) {
2339       return NULL;
2340     }
2341 
2342     return card_mark_membar;
2343   }
2344 
2345   // trailing_to_leading
2346   //
2347   // graph traversal helper which checks the Mem flow up the graph
2348   // from a (non-card mark) trailing membar attempting to locate and
2349   // return an associated leading membar. it first looks for a
2350   // subgraph in the normal configuration (relying on helper
2351   // normal_to_leading). failing that it then looks for one of the
2352   // possible post-write card mark subgraphs linking the trailing node
2353   // to a the card mark membar (relying on helper
2354   // trailing_to_card_mark), and then checks that the card mark membar
2355   // is fed by a leading membar (once again relying on auxiliary
2356   // predicate normal_to_leading).
2357   //
2358   // if the configuration is valid returns the cpuorder member for
2359   // preference or when absent the release membar otherwise NULL.
2360   //
2361   // n.b. the input membar is expected to be either a volatile or
2362   // acquire membar but in the former case must *not* be a card mark
2363   // membar.
2364 
2365   MemBarNode *trailing_to_leading(const MemBarNode *trailing)
2366   {
2367     assert((trailing->Opcode() == Op_MemBarAcquire ||
2368             trailing->Opcode() == Op_MemBarVolatile),
2369            "expecting an acquire or volatile membar");
2370     assert((trailing->Opcode() != Op_MemBarVolatile ||
2371             !is_card_mark_membar(trailing)),
2372            "not expecting a card mark membar");
2373 
2374     MemBarNode *leading = normal_to_leading(trailing);
2375 
2376     if (leading) {
2377       return leading;
2378     }
2379 
2380     // nothing more to do if this is an acquire
2381     if (trailing->Opcode() == Op_MemBarAcquire) {
2382       return NULL;
2383     }
2384 
2385     MemBarNode *card_mark_membar = trailing_to_card_mark(trailing);
2386 
2387     if (!card_mark_membar) {
2388       return NULL;
2389     }
2390 
2391     return normal_to_leading(card_mark_membar);
2392   }
2393 
2394   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
2395 
2396 bool unnecessary_acquire(const Node *barrier)
2397 {
2398   assert(barrier->is_MemBar(), "expecting a membar");
2399 
2400   if (UseBarriersForVolatile) {
2401     // we need to plant a dmb
2402     return false;
2403   }
2404 
2405   // a volatile read derived from bytecode (or also from an inlined
2406   // SHA field read via LibraryCallKit::load_field_from_object)
2407   // manifests as a LoadX[mo_acquire] followed by an acquire membar
2408   // with a bogus read dependency on it's preceding load. so in those
2409   // cases we will find the load node at the PARMS offset of the
2410   // acquire membar.  n.b. there may be an intervening DecodeN node.
2411   //
2412   // a volatile load derived from an inlined unsafe field access
2413   // manifests as a cpuorder membar with Ctl and Mem projections
2414   // feeding both an acquire membar and a LoadX[mo_acquire]. The
2415   // acquire then feeds another cpuorder membar via Ctl and Mem
2416   // projections. The load has no output dependency on these trailing
2417   // membars because subsequent nodes inserted into the graph take
2418   // their control feed from the final membar cpuorder meaning they
2419   // are all ordered after the load.
2420 
2421   Node *x = barrier->lookup(TypeFunc::Parms);
2422   if (x) {
2423     // we are starting from an acquire and it has a fake dependency
2424     //
2425     // need to check for
2426     //
2427     //   LoadX[mo_acquire]
2428     //   {  |1   }
2429     //   {DecodeN}
2430     //      |Parms
2431     //   MemBarAcquire*
2432     //
2433     // where * tags node we were passed
2434     // and |k means input k
2435     if (x->is_DecodeNarrowPtr()) {
2436       x = x->in(1);
2437     }
2438 
2439     return (x->is_Load() && x->as_Load()->is_acquire());
2440   }
2441   
2442   // now check for an unsafe volatile get
2443 
2444   // need to check for
2445   //
2446   //   MemBarCPUOrder
2447   //        ||       \\
2448   //   MemBarAcquire* LoadX[mo_acquire]
2449   //        ||
2450   //   MemBarCPUOrder
2451   //
2452   // where * tags node we were passed
2453   // and || or \\ are Ctl+Mem feeds via intermediate Proj Nodes
2454 
2455   // check for a parent MemBarCPUOrder
2456   ProjNode *ctl;
2457   ProjNode *mem;
2458   MemBarNode *parent = parent_membar(barrier);
2459   if (!parent || parent->Opcode() != Op_MemBarCPUOrder)
2460     return false;
2461   ctl = parent->proj_out(TypeFunc::Control);
2462   mem = parent->proj_out(TypeFunc::Memory);
2463   if (!ctl || !mem) {
2464     return false;
2465   }
2466   // ensure the proj nodes both feed a LoadX[mo_acquire]
2467   LoadNode *ld = NULL;
2468   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
2469     x = ctl->fast_out(i);
2470     // if we see a load we keep hold of it and stop searching
2471     if (x->is_Load()) {
2472       ld = x->as_Load();
2473       break;
2474     }
2475   }
2476   // it must be an acquiring load
2477   if (ld && ld->is_acquire()) {
2478 
2479     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2480       x = mem->fast_out(i);
2481       // if we see the same load we drop it and stop searching
2482       if (x == ld) {
2483         ld = NULL;
2484         break;
2485       }
2486     }
2487     // we must have dropped the load
2488     if (ld == NULL) {
2489       // check for a child cpuorder membar
2490       MemBarNode *child  = child_membar(barrier->as_MemBar());
2491       if (child && child->Opcode() == Op_MemBarCPUOrder)
2492         return true;
2493     }
2494   }
2495 
2496   // final option for unnecessary mebar is that it is a trailing node
2497   // belonging to a CAS
2498 
2499   MemBarNode *leading = trailing_to_leading(barrier->as_MemBar());
2500 
2501   return leading != NULL;
2502 }
2503 
2504 bool needs_acquiring_load(const Node *n)
2505 {
2506   assert(n->is_Load(), "expecting a load");
2507   if (UseBarriersForVolatile) {
2508     // we use a normal load and a dmb
2509     return false;
2510   }
2511 
2512   LoadNode *ld = n->as_Load();
2513 
2514   if (!ld->is_acquire()) {
2515     return false;
2516   }
2517 
2518   // check if this load is feeding an acquire membar
2519   //
2520   //   LoadX[mo_acquire]
2521   //   {  |1   }
2522   //   {DecodeN}
2523   //      |Parms
2524   //   MemBarAcquire*
2525   //
2526   // where * tags node we were passed
2527   // and |k means input k
2528 
2529   Node *start = ld;
2530   Node *mbacq = NULL;
2531 
2532   // if we hit a DecodeNarrowPtr we reset the start node and restart
2533   // the search through the outputs
2534  restart:
2535 
2536   for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) {
2537     Node *x = start->fast_out(i);
2538     if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) {
2539       mbacq = x;
2540     } else if (!mbacq &&
2541                (x->is_DecodeNarrowPtr() ||
2542                 (x->is_Mach() && x->Opcode() == Op_DecodeN))) {
2543       start = x;
2544       goto restart;
2545     }
2546   }
2547 
2548   if (mbacq) {
2549     return true;
2550   }
2551 
2552   // now check for an unsafe volatile get
2553 
2554   // check if Ctl and Proj feed comes from a MemBarCPUOrder
2555   //
2556   //     MemBarCPUOrder
2557   //        ||       \\
2558   //   MemBarAcquire* LoadX[mo_acquire]
2559   //        ||
2560   //   MemBarCPUOrder
2561 
2562   MemBarNode *membar;
2563 
2564   membar = parent_membar(ld);
2565 
2566   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2567     return false;
2568   }
2569 
2570   // ensure that there is a CPUOrder->Acquire->CPUOrder membar chain
2571 
2572   membar = child_membar(membar);
2573 
2574   if (!membar || !membar->Opcode() == Op_MemBarAcquire) {
2575     return false;
2576   }
2577 
2578   membar = child_membar(membar);
2579   
2580   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2581     return false;
2582   }
2583 
2584   return true;
2585 }
2586 
2587 bool unnecessary_release(const Node *n)
2588 {
2589   assert((n->is_MemBar() &&
2590           n->Opcode() == Op_MemBarRelease),
2591          "expecting a release membar");
2592 
2593   if (UseBarriersForVolatile) {
2594     // we need to plant a dmb
2595     return false;
2596   }
2597 
2598   // if there is a dependent CPUOrder barrier then use that as the
2599   // leading
2600 
2601   MemBarNode *barrier = n->as_MemBar();
2602   // check for an intervening cpuorder membar
2603   MemBarNode *b = child_membar(barrier);
2604   if (b && b->Opcode() == Op_MemBarCPUOrder) {
2605     // ok, so start the check from the dependent cpuorder barrier
2606     barrier = b;
2607   }
2608 
2609   // must start with a normal feed
2610   MemBarNode *child_barrier = leading_to_normal(barrier);
2611 
2612   if (!child_barrier) {
2613     return false;
2614   }
2615 
2616   if (!is_card_mark_membar(child_barrier)) {
2617     // this is the trailing membar and we are done
2618     return true;
2619   }
2620 
2621   // must be sure this card mark feeds a trailing membar
2622   MemBarNode *trailing = card_mark_to_trailing(child_barrier);
2623   return (trailing != NULL);
2624 }
2625 
2626 bool unnecessary_volatile(const Node *n)
2627 {
2628   // assert n->is_MemBar();
2629   if (UseBarriersForVolatile) {
2630     // we need to plant a dmb
2631     return false;
2632   }
2633 
2634   MemBarNode *mbvol = n->as_MemBar();
2635 
2636   // first we check if this is part of a card mark. if so then we have
2637   // to generate a StoreLoad barrier
2638   
2639   if (is_card_mark_membar(mbvol)) {
2640       return false;
2641   }
2642 
2643   // ok, if it's not a card mark then we still need to check if it is
2644   // a trailing membar of a volatile put hgraph.
2645 
2646   return (trailing_to_leading(mbvol) != NULL);
2647 }
2648 
2649 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
2650 
2651 bool needs_releasing_store(const Node *n)
2652 {
2653   // assert n->is_Store();
2654   if (UseBarriersForVolatile) {
2655     // we use a normal store and dmb combination
2656     return false;
2657   }
2658 
2659   StoreNode *st = n->as_Store();
2660 
2661   // the store must be marked as releasing
2662   if (!st->is_release()) {
2663     return false;
2664   }
2665 
2666   // the store must be fed by a membar
2667 
2668   Node *x = st->lookup(StoreNode::Memory);
2669 
2670   if (! x || !x->is_Proj()) {
2671     return false;
2672   }
2673 
2674   ProjNode *proj = x->as_Proj();
2675 
2676   x = proj->lookup(0);
2677 
2678   if (!x || !x->is_MemBar()) {
2679     return false;
2680   }
2681 
2682   MemBarNode *barrier = x->as_MemBar();
2683 
2684   // if the barrier is a release membar or a cpuorder mmebar fed by a
2685   // release membar then we need to check whether that forms part of a
2686   // volatile put graph.
2687 
2688   // reject invalid candidates
2689   if (!leading_membar(barrier)) {
2690     return false;
2691   }
2692 
2693   // does this lead a normal subgraph?
2694   MemBarNode *mbvol = leading_to_normal(barrier);
2695 
2696   if (!mbvol) {
2697     return false;
2698   }
2699 
2700   // all done unless this is a card mark
2701   if (!is_card_mark_membar(mbvol)) {
2702     return true;
2703   }
2704   
2705   // we found a card mark -- just make sure we have a trailing barrier
2706 
2707   return (card_mark_to_trailing(mbvol) != NULL);
2708 }
2709 
2710 // predicate controlling translation of CAS
2711 //
2712 // returns true if CAS needs to use an acquiring load otherwise false
2713 
2714 bool needs_acquiring_load_exclusive(const Node *n)
2715 {
2716   assert(is_CAS(n->Opcode()), "expecting a compare and swap");
2717   if (UseBarriersForVolatile) {
2718     return false;
2719   }
2720 
2721   // CAS nodes only ought to turn up in inlined unsafe CAS operations
2722 #ifdef ASSERT
2723   LoadStoreNode *st = n->as_LoadStore();
2724 
2725   // the store must be fed by a membar
2726 
2727   Node *x = st->lookup(StoreNode::Memory);
2728 
2729   assert (x && x->is_Proj(), "CAS not fed by memory proj!");
2730 
2731   ProjNode *proj = x->as_Proj();
2732 
2733   x = proj->lookup(0);
2734 
2735   assert (x && x->is_MemBar(), "CAS not fed by membar!");
2736 
2737   MemBarNode *barrier = x->as_MemBar();
2738 
2739   // the barrier must be a cpuorder mmebar fed by a release membar
2740 
2741   assert(barrier->Opcode() == Op_MemBarCPUOrder,
2742          "CAS not fed by cpuorder membar!");
2743       
2744   MemBarNode *b = parent_membar(barrier);
2745   assert ((b != NULL && b->Opcode() == Op_MemBarRelease),
2746           "CAS not fed by cpuorder+release membar pair!");
2747 
2748   // does this lead a normal subgraph?
2749   MemBarNode *mbar = leading_to_normal(barrier);
2750 
2751   assert(mbar != NULL, "CAS not embedded in normal graph!");
2752 
2753   assert(mbar->Opcode() == Op_MemBarAcquire, "trailing membar should be an acquire");
2754 #endif // ASSERT
2755   // so we can just return true here
2756   return true;
2757 }
2758 
2759 // predicate controlling translation of StoreCM
2760 //
2761 // returns true if a StoreStore must precede the card write otherwise
2762 // false
2763 
2764 bool unnecessary_storestore(const Node *storecm)
2765 {
2766   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
2767 
2768   // we only ever need to generate a dmb ishst between an object put
2769   // and the associated card mark when we are using CMS without
2770   // conditional card marking
2771 
2772   if (!UseConcMarkSweepGC || UseCondCardMark) {
2773     return true;
2774   }
2775 
2776   // if we are implementing volatile puts using barriers then the
2777   // object put as an str so we must insert the dmb ishst
2778 
2779   if (UseBarriersForVolatile) {
2780     return false;
2781   }
2782 
2783   // we can omit the dmb ishst if this StoreCM is part of a volatile
2784   // put because in thta case the put will be implemented by stlr
2785   //
2786   // we need to check for a normal subgraph feeding this StoreCM.
2787   // that means the StoreCM must be fed Memory from a leading membar,
2788   // either a MemBarRelease or its dependent MemBarCPUOrder, and the
2789   // leading membar must be part of a normal subgraph
2790 
2791   Node *x = storecm->in(StoreNode::Memory);
2792 
2793   if (!x->is_Proj()) {
2794     return false;
2795   }
2796 
2797   x = x->in(0);
2798 
2799   if (!x->is_MemBar()) {
2800     return false;
2801   }
2802 
2803   MemBarNode *leading = x->as_MemBar();
2804 
2805   // reject invalid candidates
2806   if (!leading_membar(leading)) {
2807     return false;
2808   }
2809 
2810   // we can omit the StoreStore if it is the head of a normal subgraph
2811   return (leading_to_normal(leading) != NULL);
2812 }
2813 
2814 
2815 #define __ _masm.
2816 
2817 // advance declaratuons for helper functions to convert register
2818 // indices to register objects
2819 
2820 // the ad file has to provide implementations of certain methods
2821 // expected by the generic code
2822 //
2823 // REQUIRED FUNCTIONALITY
2824 
2825 //=============================================================================
2826 
2827 // !!!!! Special hack to get all types of calls to specify the byte offset
2828 //       from the start of the call to the point where the return address
2829 //       will point.
2830 
2831 int MachCallStaticJavaNode::ret_addr_offset()
2832 {
2833   // call should be a simple bl
2834   // unless this is a method handle invoke in which case it is
2835   // mov(rfp, sp), bl, mov(sp, rfp)
2836   int off = 4;
2837   if (_method_handle_invoke) {
2838     off += 4;
2839   }
2840   return off;
2841 }
2842 
2843 int MachCallDynamicJavaNode::ret_addr_offset()
2844 {
2845   return 16; // movz, movk, movk, bl
2846 }
2847 
2848 int MachCallRuntimeNode::ret_addr_offset() {
2849   // for generated stubs the call will be
2850   //   bl(addr)
2851   // for real runtime callouts it will be six instructions
2852   // see aarch64_enc_java_to_runtime
2853   //   adr(rscratch2, retaddr)
2854   //   lea(rscratch1, RuntimeAddress(addr)
2855   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
2856   //   blrt rscratch1
2857   CodeBlob *cb = CodeCache::find_blob(_entry_point);
2858   if (cb) {
2859     return MacroAssembler::far_branch_size();
2860   } else {
2861     return 6 * NativeInstruction::instruction_size;
2862   }
2863 }
2864 
2865 // Indicate if the safepoint node needs the polling page as an input
2866 
2867 // the shared code plants the oop data at the start of the generated
2868 // code for the safepoint node and that needs ot be at the load
2869 // instruction itself. so we cannot plant a mov of the safepoint poll
2870 // address followed by a load. setting this to true means the mov is
2871 // scheduled as a prior instruction. that's better for scheduling
2872 // anyway.
2873 
2874 bool SafePointNode::needs_polling_address_input()
2875 {
2876   return true;
2877 }
2878 
2879 //=============================================================================
2880 
2881 #ifndef PRODUCT
2882 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2883   st->print("BREAKPOINT");
2884 }
2885 #endif
2886 
2887 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2888   MacroAssembler _masm(&cbuf);
2889   __ brk(0);
2890 }
2891 
2892 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
2893   return MachNode::size(ra_);
2894 }
2895 
2896 //=============================================================================
2897 
2898 #ifndef PRODUCT
2899   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
2900     st->print("nop \t# %d bytes pad for loops and calls", _count);
2901   }
2902 #endif
2903 
2904   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
2905     MacroAssembler _masm(&cbuf);
2906     for (int i = 0; i < _count; i++) { 
2907       __ nop();
2908     }
2909   }
2910 
2911   uint MachNopNode::size(PhaseRegAlloc*) const {
2912     return _count * NativeInstruction::instruction_size;
2913   }
2914 
2915 //=============================================================================
2916 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
2917 
2918 int Compile::ConstantTable::calculate_table_base_offset() const {
2919   return 0;  // absolute addressing, no offset
2920 }
2921 
2922 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
2923 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
2924   ShouldNotReachHere();
2925 }
2926 
2927 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
2928   // Empty encoding
2929 }
2930 
2931 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
2932   return 0;
2933 }
2934 
2935 #ifndef PRODUCT
2936 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
2937   st->print("-- \t// MachConstantBaseNode (empty encoding)");
2938 }
2939 #endif
2940 
2941 #ifndef PRODUCT
2942 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2943   Compile* C = ra_->C;
2944 
2945   int framesize = C->frame_slots() << LogBytesPerInt;
2946 
2947   if (C->need_stack_bang(framesize))
2948     st->print("# stack bang size=%d\n\t", framesize);
2949 
2950   if (framesize == 0) {
2951     // Is this even possible?
2952     st->print("stp  lr, rfp, [sp, #%d]!", -(2 * wordSize)); 
2953   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
2954     st->print("sub  sp, sp, #%d\n\t", framesize);
2955     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
2956   } else {
2957     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize)); 
2958     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
2959     st->print("sub  sp, sp, rscratch1");
2960   }
2961 }
2962 #endif
2963 
2964 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2965   Compile* C = ra_->C;
2966   MacroAssembler _masm(&cbuf);
2967 
2968   // n.b. frame size includes space for return pc and rfp
2969   long framesize = ((long)C->frame_slots()) << LogBytesPerInt;
2970   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
2971 
2972   // insert a nop at the start of the prolog so we can patch in a
2973   // branch if we need to invalidate the method later
2974   __ nop();
2975 
2976   if (C->need_stack_bang(framesize))
2977     __ generate_stack_overflow_check(framesize);
2978 
2979   __ build_frame(framesize);
2980 
2981   if (NotifySimulator) {
2982     __ notify(Assembler::method_entry);
2983   }
2984 
2985   if (VerifyStackAtCalls) {
2986     Unimplemented();
2987   }
2988 
2989   C->set_frame_complete(cbuf.insts_size());
2990 
2991   if (C->has_mach_constant_base_node()) {
2992     // NOTE: We set the table base offset here because users might be
2993     // emitted before MachConstantBaseNode.
2994     Compile::ConstantTable& constant_table = C->constant_table();
2995     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
2996   }
2997 }
2998 
2999 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
3000 {
3001   return MachNode::size(ra_); // too many variables; just compute it
3002                               // the hard way
3003 }
3004 
3005 int MachPrologNode::reloc() const
3006 {
3007   return 0;
3008 }
3009 
3010 //=============================================================================
3011 
3012 #ifndef PRODUCT
3013 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3014   Compile* C = ra_->C;
3015   int framesize = C->frame_slots() << LogBytesPerInt;
3016 
3017   st->print("# pop frame %d\n\t",framesize);
3018 
3019   if (framesize == 0) {
3020     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
3021   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
3022     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
3023     st->print("add  sp, sp, #%d\n\t", framesize);
3024   } else {
3025     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
3026     st->print("add  sp, sp, rscratch1\n\t");
3027     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
3028   }
3029 
3030   if (do_polling() && C->is_method_compilation()) {
3031     st->print("# touch polling page\n\t");
3032     st->print("mov  rscratch1, #" INTPTR_FORMAT "\n\t", p2i(os::get_polling_page()));
3033     st->print("ldr zr, [rscratch1]");
3034   }
3035 }
3036 #endif
3037 
3038 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3039   Compile* C = ra_->C;
3040   MacroAssembler _masm(&cbuf);
3041   int framesize = C->frame_slots() << LogBytesPerInt;
3042 
3043   __ remove_frame(framesize);
3044 
3045   if (NotifySimulator) {
3046     __ notify(Assembler::method_reentry);
3047   }
3048 
3049   if (do_polling() && C->is_method_compilation()) {
3050     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
3051   }
3052 }
3053 
3054 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
3055   // Variable size. Determine dynamically.
3056   return MachNode::size(ra_);
3057 }
3058 
3059 int MachEpilogNode::reloc() const {
3060   // Return number of relocatable values contained in this instruction.
3061   return 1; // 1 for polling page.
3062 }
3063 
3064 const Pipeline * MachEpilogNode::pipeline() const {
3065   return MachNode::pipeline_class();
3066 }
3067 
3068 // This method seems to be obsolete. It is declared in machnode.hpp
3069 // and defined in all *.ad files, but it is never called. Should we
3070 // get rid of it?
3071 int MachEpilogNode::safepoint_offset() const {
3072   assert(do_polling(), "no return for this epilog node");
3073   return 4;
3074 }
3075 
3076 //=============================================================================
3077 
3078 // Figure out which register class each belongs in: rc_int, rc_float or
3079 // rc_stack.
3080 enum RC { rc_bad, rc_int, rc_float, rc_stack };
3081 
3082 static enum RC rc_class(OptoReg::Name reg) {
3083 
3084   if (reg == OptoReg::Bad) {
3085     return rc_bad;
3086   }
3087 
3088   // we have 30 int registers * 2 halves
3089   // (rscratch1 and rscratch2 are omitted)
3090 
3091   if (reg < 60) {
3092     return rc_int;
3093   }
3094 
3095   // we have 32 float register * 2 halves
3096   if (reg < 60 + 128) {
3097     return rc_float;
3098   }
3099 
3100   // Between float regs & stack is the flags regs.
3101   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
3102 
3103   return rc_stack;
3104 }
3105 
3106 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
3107   Compile* C = ra_->C;
3108 
3109   // Get registers to move.
3110   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
3111   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
3112   OptoReg::Name dst_hi = ra_->get_reg_second(this);
3113   OptoReg::Name dst_lo = ra_->get_reg_first(this);
3114 
3115   enum RC src_hi_rc = rc_class(src_hi);
3116   enum RC src_lo_rc = rc_class(src_lo);
3117   enum RC dst_hi_rc = rc_class(dst_hi);
3118   enum RC dst_lo_rc = rc_class(dst_lo);
3119 
3120   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
3121 
3122   if (src_hi != OptoReg::Bad) {
3123     assert((src_lo&1)==0 && src_lo+1==src_hi &&
3124            (dst_lo&1)==0 && dst_lo+1==dst_hi,
3125            "expected aligned-adjacent pairs");
3126   }
3127 
3128   if (src_lo == dst_lo && src_hi == dst_hi) {
3129     return 0;            // Self copy, no move.
3130   }
3131 
3132   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
3133               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
3134   int src_offset = ra_->reg2offset(src_lo);
3135   int dst_offset = ra_->reg2offset(dst_lo);
3136 
3137   if (bottom_type()->isa_vect() != NULL) {
3138     uint ireg = ideal_reg();
3139     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
3140     if (cbuf) {
3141       MacroAssembler _masm(cbuf);
3142       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
3143       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
3144         // stack->stack
3145         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
3146         if (ireg == Op_VecD) {
3147           __ unspill(rscratch1, true, src_offset);
3148           __ spill(rscratch1, true, dst_offset);
3149         } else {
3150           __ spill_copy128(src_offset, dst_offset);
3151         }
3152       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
3153         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3154                ireg == Op_VecD ? __ T8B : __ T16B,
3155                as_FloatRegister(Matcher::_regEncode[src_lo]));
3156       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
3157         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3158                        ireg == Op_VecD ? __ D : __ Q,
3159                        ra_->reg2offset(dst_lo));
3160       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
3161         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3162                        ireg == Op_VecD ? __ D : __ Q,
3163                        ra_->reg2offset(src_lo));
3164       } else {
3165         ShouldNotReachHere();
3166       }
3167     }
3168   } else if (cbuf) {
3169     MacroAssembler _masm(cbuf);
3170     switch (src_lo_rc) {
3171     case rc_int:
3172       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
3173         if (is64) {
3174             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
3175                    as_Register(Matcher::_regEncode[src_lo]));
3176         } else {
3177             MacroAssembler _masm(cbuf);
3178             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
3179                     as_Register(Matcher::_regEncode[src_lo]));
3180         }
3181       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
3182         if (is64) {
3183             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3184                      as_Register(Matcher::_regEncode[src_lo]));
3185         } else {
3186             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3187                      as_Register(Matcher::_regEncode[src_lo]));
3188         }
3189       } else {                    // gpr --> stack spill
3190         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3191         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
3192       }
3193       break;
3194     case rc_float:
3195       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
3196         if (is64) {
3197             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
3198                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3199         } else {
3200             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
3201                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3202         }
3203       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
3204           if (cbuf) {
3205             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3206                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3207         } else {
3208             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3209                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3210         }
3211       } else {                    // fpr --> stack spill
3212         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3213         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3214                  is64 ? __ D : __ S, dst_offset);
3215       }
3216       break;
3217     case rc_stack:
3218       if (dst_lo_rc == rc_int) {  // stack --> gpr load
3219         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
3220       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
3221         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3222                    is64 ? __ D : __ S, src_offset);
3223       } else {                    // stack --> stack copy
3224         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3225         __ unspill(rscratch1, is64, src_offset);
3226         __ spill(rscratch1, is64, dst_offset);
3227       }
3228       break;
3229     default:
3230       assert(false, "bad rc_class for spill");
3231       ShouldNotReachHere();
3232     }
3233   }
3234 
3235   if (st) {
3236     st->print("spill ");
3237     if (src_lo_rc == rc_stack) {
3238       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
3239     } else {
3240       st->print("%s -> ", Matcher::regName[src_lo]);
3241     }
3242     if (dst_lo_rc == rc_stack) {
3243       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
3244     } else {
3245       st->print("%s", Matcher::regName[dst_lo]);
3246     }
3247     if (bottom_type()->isa_vect() != NULL) {
3248       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
3249     } else {
3250       st->print("\t# spill size = %d", is64 ? 64:32);
3251     }
3252   }
3253 
3254   return 0;
3255 
3256 }
3257 
3258 #ifndef PRODUCT
3259 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3260   if (!ra_)
3261     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
3262   else
3263     implementation(NULL, ra_, false, st);
3264 }
3265 #endif
3266 
3267 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3268   implementation(&cbuf, ra_, false, NULL);
3269 }
3270 
3271 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
3272   return MachNode::size(ra_);
3273 }
3274 
3275 //=============================================================================
3276 
3277 #ifndef PRODUCT
3278 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3279   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3280   int reg = ra_->get_reg_first(this);
3281   st->print("add %s, rsp, #%d]\t# box lock",
3282             Matcher::regName[reg], offset);
3283 }
3284 #endif
3285 
3286 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3287   MacroAssembler _masm(&cbuf);
3288 
3289   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3290   int reg    = ra_->get_encode(this);
3291 
3292   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
3293     __ add(as_Register(reg), sp, offset);
3294   } else {
3295     ShouldNotReachHere();
3296   }
3297 }
3298 
3299 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
3300   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
3301   return 4;
3302 }
3303 
3304 //=============================================================================
3305 
3306 #ifndef PRODUCT
3307 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
3308 {
3309   st->print_cr("# MachUEPNode");
3310   if (UseCompressedClassPointers) {
3311     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3312     if (Universe::narrow_klass_shift() != 0) {
3313       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
3314     }
3315   } else {
3316    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3317   }
3318   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
3319   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
3320 }
3321 #endif
3322 
3323 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
3324 {
3325   // This is the unverified entry point.
3326   MacroAssembler _masm(&cbuf);
3327 
3328   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
3329   Label skip;
3330   // TODO
3331   // can we avoid this skip and still use a reloc?
3332   __ br(Assembler::EQ, skip);
3333   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
3334   __ bind(skip);
3335 }
3336 
3337 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
3338 {
3339   return MachNode::size(ra_);
3340 }
3341 
3342 // REQUIRED EMIT CODE
3343 
3344 //=============================================================================
3345 
3346 // Emit exception handler code.
3347 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
3348 {
3349   // mov rscratch1 #exception_blob_entry_point
3350   // br rscratch1
3351   // Note that the code buffer's insts_mark is always relative to insts.
3352   // That's why we must use the macroassembler to generate a handler.
3353   MacroAssembler _masm(&cbuf);
3354   address base = __ start_a_stub(size_exception_handler());
3355   if (base == NULL) {
3356     ciEnv::current()->record_failure("CodeCache is full");
3357     return 0;  // CodeBuffer::expand failed
3358   }
3359   int offset = __ offset();
3360   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
3361   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
3362   __ end_a_stub();
3363   return offset;
3364 }
3365 
3366 // Emit deopt handler code.
3367 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
3368 {
3369   // Note that the code buffer's insts_mark is always relative to insts.
3370   // That's why we must use the macroassembler to generate a handler.
3371   MacroAssembler _masm(&cbuf);
3372   address base = __ start_a_stub(size_deopt_handler());
3373   if (base == NULL) {
3374     ciEnv::current()->record_failure("CodeCache is full");
3375     return 0;  // CodeBuffer::expand failed
3376   }
3377   int offset = __ offset();
3378 
3379   __ adr(lr, __ pc());
3380   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
3381 
3382   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
3383   __ end_a_stub();
3384   return offset;
3385 }
3386 
3387 // REQUIRED MATCHER CODE
3388 
3389 //=============================================================================
3390 
3391 const bool Matcher::match_rule_supported(int opcode) {
3392 
3393   // TODO 
3394   // identify extra cases that we might want to provide match rules for
3395   // e.g. Op_StrEquals and other intrinsics
3396   if (!has_match_rule(opcode)) {
3397     return false;
3398   }
3399 
3400   return true;  // Per default match rules are supported.
3401 }
3402 
3403 int Matcher::regnum_to_fpu_offset(int regnum)
3404 {
3405   Unimplemented();
3406   return 0;
3407 }
3408 
3409 // Is this branch offset short enough that a short branch can be used?
3410 //
3411 // NOTE: If the platform does not provide any short branch variants, then
3412 //       this method should return false for offset 0.
3413 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
3414   // The passed offset is relative to address of the branch.
3415 
3416   return (-32768 <= offset && offset < 32768);
3417 }
3418 
3419 const bool Matcher::isSimpleConstant64(jlong value) {
3420   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
3421   // Probably always true, even if a temp register is required.
3422   return true;
3423 }
3424 
3425 // true just means we have fast l2f conversion
3426 const bool Matcher::convL2FSupported(void) {
3427   return true;
3428 }
3429 
3430 // Vector width in bytes.
3431 const int Matcher::vector_width_in_bytes(BasicType bt) {
3432   int size = MIN2(16,(int)MaxVectorSize);
3433   // Minimum 2 values in vector
3434   if (size < 2*type2aelembytes(bt)) size = 0;
3435   // But never < 4
3436   if (size < 4) size = 0;
3437   return size;
3438 }
3439 
3440 // Limits on vector size (number of elements) loaded into vector.
3441 const int Matcher::max_vector_size(const BasicType bt) {
3442   return vector_width_in_bytes(bt)/type2aelembytes(bt);
3443 }
3444 const int Matcher::min_vector_size(const BasicType bt) {
3445 //  For the moment limit the vector size to 8 bytes
3446     int size = 8 / type2aelembytes(bt);
3447     if (size < 2) size = 2;
3448     return size;
3449 }
3450 
3451 // Vector ideal reg.
3452 const int Matcher::vector_ideal_reg(int len) {
3453   switch(len) {
3454     case  8: return Op_VecD;
3455     case 16: return Op_VecX;
3456   }
3457   ShouldNotReachHere();
3458   return 0;
3459 }
3460 
3461 const int Matcher::vector_shift_count_ideal_reg(int size) {
3462   return Op_VecX;
3463 }
3464 
3465 // AES support not yet implemented
3466 const bool Matcher::pass_original_key_for_aes() {
3467   return false;
3468 }
3469 
3470 // x86 supports misaligned vectors store/load.
3471 const bool Matcher::misaligned_vectors_ok() {
3472   return !AlignVector; // can be changed by flag
3473 }
3474 
3475 // false => size gets scaled to BytesPerLong, ok.
3476 const bool Matcher::init_array_count_is_in_bytes = false;
3477 
3478 // Threshold size for cleararray.
3479 const int Matcher::init_array_short_size = 4 * BytesPerLong;
3480 
3481 // Use conditional move (CMOVL)
3482 const int Matcher::long_cmove_cost() {
3483   // long cmoves are no more expensive than int cmoves
3484   return 0;
3485 }
3486 
3487 const int Matcher::float_cmove_cost() {
3488   // float cmoves are no more expensive than int cmoves
3489   return 0;
3490 }
3491 
3492 // Does the CPU require late expand (see block.cpp for description of late expand)?
3493 const bool Matcher::require_postalloc_expand = false;
3494 
3495 // Should the Matcher clone shifts on addressing modes, expecting them
3496 // to be subsumed into complex addressing expressions or compute them
3497 // into registers?  True for Intel but false for most RISCs
3498 const bool Matcher::clone_shift_expressions = false;
3499 
3500 // Do we need to mask the count passed to shift instructions or does
3501 // the cpu only look at the lower 5/6 bits anyway?
3502 const bool Matcher::need_masked_shift_count = false;
3503 
3504 // This affects two different things:
3505 //  - how Decode nodes are matched
3506 //  - how ImplicitNullCheck opportunities are recognized
3507 // If true, the matcher will try to remove all Decodes and match them
3508 // (as operands) into nodes. NullChecks are not prepared to deal with 
3509 // Decodes by final_graph_reshaping().
3510 // If false, final_graph_reshaping() forces the decode behind the Cmp
3511 // for a NullCheck. The matcher matches the Decode node into a register.
3512 // Implicit_null_check optimization moves the Decode along with the 
3513 // memory operation back up before the NullCheck.
3514 bool Matcher::narrow_oop_use_complex_address() {
3515   return Universe::narrow_oop_shift() == 0;
3516 }
3517 
3518 bool Matcher::narrow_klass_use_complex_address() {
3519 // TODO
3520 // decide whether we need to set this to true
3521   return false;
3522 }
3523 
3524 // Is it better to copy float constants, or load them directly from
3525 // memory?  Intel can load a float constant from a direct address,
3526 // requiring no extra registers.  Most RISCs will have to materialize
3527 // an address into a register first, so they would do better to copy
3528 // the constant from stack.
3529 const bool Matcher::rematerialize_float_constants = false;
3530 
3531 // If CPU can load and store mis-aligned doubles directly then no
3532 // fixup is needed.  Else we split the double into 2 integer pieces
3533 // and move it piece-by-piece.  Only happens when passing doubles into
3534 // C code as the Java calling convention forces doubles to be aligned.
3535 const bool Matcher::misaligned_doubles_ok = true;
3536 
3537 // No-op on amd64
3538 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
3539   Unimplemented();
3540 }
3541 
3542 // Advertise here if the CPU requires explicit rounding operations to
3543 // implement the UseStrictFP mode.
3544 const bool Matcher::strict_fp_requires_explicit_rounding = false;
3545 
3546 // Are floats converted to double when stored to stack during
3547 // deoptimization?
3548 bool Matcher::float_in_double() { return true; }
3549 
3550 // Do ints take an entire long register or just half?
3551 // The relevant question is how the int is callee-saved:
3552 // the whole long is written but de-opt'ing will have to extract
3553 // the relevant 32 bits.
3554 const bool Matcher::int_in_long = true;
3555 
3556 // Return whether or not this register is ever used as an argument.
3557 // This function is used on startup to build the trampoline stubs in
3558 // generateOptoStub.  Registers not mentioned will be killed by the VM
3559 // call in the trampoline, and arguments in those registers not be
3560 // available to the callee.
3561 bool Matcher::can_be_java_arg(int reg)
3562 {
3563   return
3564     reg ==  R0_num || reg == R0_H_num ||
3565     reg ==  R1_num || reg == R1_H_num ||
3566     reg ==  R2_num || reg == R2_H_num ||
3567     reg ==  R3_num || reg == R3_H_num ||
3568     reg ==  R4_num || reg == R4_H_num ||
3569     reg ==  R5_num || reg == R5_H_num ||
3570     reg ==  R6_num || reg == R6_H_num ||
3571     reg ==  R7_num || reg == R7_H_num ||
3572     reg ==  V0_num || reg == V0_H_num ||
3573     reg ==  V1_num || reg == V1_H_num ||
3574     reg ==  V2_num || reg == V2_H_num ||
3575     reg ==  V3_num || reg == V3_H_num ||
3576     reg ==  V4_num || reg == V4_H_num ||
3577     reg ==  V5_num || reg == V5_H_num ||
3578     reg ==  V6_num || reg == V6_H_num ||
3579     reg ==  V7_num || reg == V7_H_num;
3580 }
3581 
3582 bool Matcher::is_spillable_arg(int reg)
3583 {
3584   return can_be_java_arg(reg);
3585 }
3586 
3587 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
3588   return false;
3589 }
3590 
3591 RegMask Matcher::divI_proj_mask() {
3592   ShouldNotReachHere();
3593   return RegMask();
3594 }
3595 
3596 // Register for MODI projection of divmodI.
3597 RegMask Matcher::modI_proj_mask() {
3598   ShouldNotReachHere();
3599   return RegMask();
3600 }
3601 
3602 // Register for DIVL projection of divmodL.
3603 RegMask Matcher::divL_proj_mask() {
3604   ShouldNotReachHere();
3605   return RegMask();
3606 }
3607 
3608 // Register for MODL projection of divmodL.
3609 RegMask Matcher::modL_proj_mask() {
3610   ShouldNotReachHere();
3611   return RegMask();
3612 }
3613 
3614 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
3615   return FP_REG_mask();
3616 }
3617 
3618 // helper for encoding java_to_runtime calls on sim
3619 //
3620 // this is needed to compute the extra arguments required when
3621 // planting a call to the simulator blrt instruction. the TypeFunc
3622 // can be queried to identify the counts for integral, and floating
3623 // arguments and the return type
3624 
3625 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
3626 {
3627   int gps = 0;
3628   int fps = 0;
3629   const TypeTuple *domain = tf->domain();
3630   int max = domain->cnt();
3631   for (int i = TypeFunc::Parms; i < max; i++) {
3632     const Type *t = domain->field_at(i);
3633     switch(t->basic_type()) {
3634     case T_FLOAT:
3635     case T_DOUBLE:
3636       fps++;
3637     default:
3638       gps++;
3639     }
3640   }
3641   gpcnt = gps;
3642   fpcnt = fps;
3643   BasicType rt = tf->return_type();
3644   switch (rt) {
3645   case T_VOID:
3646     rtype = MacroAssembler::ret_type_void;
3647     break;
3648   default:
3649     rtype = MacroAssembler::ret_type_integral;
3650     break;
3651   case T_FLOAT:
3652     rtype = MacroAssembler::ret_type_float;
3653     break;
3654   case T_DOUBLE:
3655     rtype = MacroAssembler::ret_type_double;
3656     break;
3657   }
3658 }
3659 
3660 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
3661   MacroAssembler _masm(&cbuf);                                              \
3662   {                                                                     \
3663     guarantee(INDEX == -1, "mode not permitted for volatile");          \
3664     guarantee(DISP == 0, "mode not permitted for volatile");            \
3665     guarantee(SCALE == 0, "mode not permitted for volatile");           \
3666     __ INSN(REG, as_Register(BASE));                                    \
3667   }
3668 
3669 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
3670 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
3671 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
3672                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
3673 
3674   // Used for all non-volatile memory accesses.  The use of
3675   // $mem->opcode() to discover whether this pattern uses sign-extended
3676   // offsets is something of a kludge.
3677   static void loadStore(MacroAssembler masm, mem_insn insn,
3678                          Register reg, int opcode,
3679                          Register base, int index, int size, int disp)
3680   {
3681     Address::extend scale;
3682 
3683     // Hooboy, this is fugly.  We need a way to communicate to the
3684     // encoder that the index needs to be sign extended, so we have to
3685     // enumerate all the cases.
3686     switch (opcode) {
3687     case INDINDEXSCALEDOFFSETI2L:
3688     case INDINDEXSCALEDI2L:
3689     case INDINDEXSCALEDOFFSETI2LN:
3690     case INDINDEXSCALEDI2LN:
3691     case INDINDEXOFFSETI2L:
3692     case INDINDEXOFFSETI2LN:
3693       scale = Address::sxtw(size);
3694       break;
3695     default:
3696       scale = Address::lsl(size);
3697     }
3698 
3699     if (index == -1) {
3700       (masm.*insn)(reg, Address(base, disp));
3701     } else {
3702       if (disp == 0) {
3703         (masm.*insn)(reg, Address(base, as_Register(index), scale));
3704       } else {
3705         masm.lea(rscratch1, Address(base, disp));
3706         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
3707       }
3708     }
3709   }
3710 
3711   static void loadStore(MacroAssembler masm, mem_float_insn insn,
3712                          FloatRegister reg, int opcode,
3713                          Register base, int index, int size, int disp)
3714   {
3715     Address::extend scale;
3716 
3717     switch (opcode) {
3718     case INDINDEXSCALEDOFFSETI2L:
3719     case INDINDEXSCALEDI2L:
3720     case INDINDEXSCALEDOFFSETI2LN:
3721     case INDINDEXSCALEDI2LN:
3722       scale = Address::sxtw(size);
3723       break;
3724     default:
3725       scale = Address::lsl(size);
3726     }
3727 
3728     if (index == -1) {
3729       (masm.*insn)(reg, Address(base, disp));
3730     } else {
3731       if (disp == 0) {
3732         (masm.*insn)(reg, Address(base, as_Register(index), scale));
3733       } else {
3734         masm.lea(rscratch1, Address(base, disp));
3735         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
3736       }
3737     }
3738   }
3739 
3740   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
3741                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
3742                          int opcode, Register base, int index, int size, int disp)
3743   {
3744     if (index == -1) {
3745       (masm.*insn)(reg, T, Address(base, disp));
3746     } else {
3747       assert(disp == 0, "unsupported address mode");
3748       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
3749     }
3750   }
3751 
3752 %}
3753 
3754 
3755 
3756 //----------ENCODING BLOCK-----------------------------------------------------
3757 // This block specifies the encoding classes used by the compiler to
3758 // output byte streams.  Encoding classes are parameterized macros
3759 // used by Machine Instruction Nodes in order to generate the bit
3760 // encoding of the instruction.  Operands specify their base encoding
3761 // interface with the interface keyword.  There are currently
3762 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
3763 // COND_INTER.  REG_INTER causes an operand to generate a function
3764 // which returns its register number when queried.  CONST_INTER causes
3765 // an operand to generate a function which returns the value of the
3766 // constant when queried.  MEMORY_INTER causes an operand to generate
3767 // four functions which return the Base Register, the Index Register,
3768 // the Scale Value, and the Offset Value of the operand when queried.
3769 // COND_INTER causes an operand to generate six functions which return
3770 // the encoding code (ie - encoding bits for the instruction)
3771 // associated with each basic boolean condition for a conditional
3772 // instruction.
3773 //
3774 // Instructions specify two basic values for encoding.  Again, a
3775 // function is available to check if the constant displacement is an
3776 // oop. They use the ins_encode keyword to specify their encoding
3777 // classes (which must be a sequence of enc_class names, and their
3778 // parameters, specified in the encoding block), and they use the
3779 // opcode keyword to specify, in order, their primary, secondary, and
3780 // tertiary opcode.  Only the opcode sections which a particular
3781 // instruction needs for encoding need to be specified.
3782 encode %{
3783   // Build emit functions for each basic byte or larger field in the
3784   // intel encoding scheme (opcode, rm, sib, immediate), and call them
3785   // from C++ code in the enc_class source block.  Emit functions will
3786   // live in the main source block for now.  In future, we can
3787   // generalize this by adding a syntax that specifies the sizes of
3788   // fields in an order, so that the adlc can build the emit functions
3789   // automagically
3790 
3791   // catch all for unimplemented encodings
3792   enc_class enc_unimplemented %{
3793     MacroAssembler _masm(&cbuf);
3794     __ unimplemented("C2 catch all");    
3795   %}
3796 
3797   // BEGIN Non-volatile memory access
3798 
3799   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
3800     Register dst_reg = as_Register($dst$$reg);
3801     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
3802                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3803   %}
3804 
3805   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
3806     Register dst_reg = as_Register($dst$$reg);
3807     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
3808                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3809   %}
3810 
3811   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
3812     Register dst_reg = as_Register($dst$$reg);
3813     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3814                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3815   %}
3816 
3817   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
3818     Register dst_reg = as_Register($dst$$reg);
3819     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3820                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3821   %}
3822 
3823   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
3824     Register dst_reg = as_Register($dst$$reg);
3825     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
3826                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3827   %}
3828 
3829   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
3830     Register dst_reg = as_Register($dst$$reg);
3831     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
3832                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3833   %}
3834 
3835   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
3836     Register dst_reg = as_Register($dst$$reg);
3837     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3838                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3839   %}
3840 
3841   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
3842     Register dst_reg = as_Register($dst$$reg);
3843     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3844                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3845   %}
3846 
3847   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
3848     Register dst_reg = as_Register($dst$$reg);
3849     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3850                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3851   %}
3852 
3853   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
3854     Register dst_reg = as_Register($dst$$reg);
3855     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3856                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3857   %}
3858 
3859   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
3860     Register dst_reg = as_Register($dst$$reg);
3861     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
3862                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3863   %}
3864 
3865   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
3866     Register dst_reg = as_Register($dst$$reg);
3867     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
3868                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3869   %}
3870 
3871   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
3872     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3873     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
3874                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3875   %}
3876 
3877   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
3878     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3879     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
3880                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3881   %}
3882 
3883   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
3884     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3885     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
3886        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3887   %}
3888 
3889   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
3890     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3891     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
3892        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3893   %}
3894 
3895   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
3896     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3897     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
3898        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3899   %}
3900 
3901   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
3902     Register src_reg = as_Register($src$$reg);
3903     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
3904                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3905   %}
3906 
3907   enc_class aarch64_enc_strb0(memory mem) %{
3908     MacroAssembler _masm(&cbuf);
3909     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3910                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3911   %}
3912 
3913   enc_class aarch64_enc_strb0_ordered(memory mem) %{
3914     MacroAssembler _masm(&cbuf);
3915     __ membar(Assembler::StoreStore);
3916     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3917                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3918   %}
3919 
3920   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
3921     Register src_reg = as_Register($src$$reg);
3922     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
3923                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3924   %}
3925 
3926   enc_class aarch64_enc_strh0(memory mem) %{
3927     MacroAssembler _masm(&cbuf);
3928     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
3929                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3930   %}
3931 
3932   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
3933     Register src_reg = as_Register($src$$reg);
3934     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
3935                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3936   %}
3937 
3938   enc_class aarch64_enc_strw0(memory mem) %{
3939     MacroAssembler _masm(&cbuf);
3940     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
3941                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3942   %}
3943 
3944   enc_class aarch64_enc_str(iRegL src, memory mem) %{
3945     Register src_reg = as_Register($src$$reg);
3946     // we sometimes get asked to store the stack pointer into the
3947     // current thread -- we cannot do that directly on AArch64
3948     if (src_reg == r31_sp) {
3949       MacroAssembler _masm(&cbuf);
3950       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
3951       __ mov(rscratch2, sp);
3952       src_reg = rscratch2;
3953     }
3954     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
3955                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3956   %}
3957 
3958   enc_class aarch64_enc_str0(memory mem) %{
3959     MacroAssembler _masm(&cbuf);
3960     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
3961                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3962   %}
3963 
3964   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
3965     FloatRegister src_reg = as_FloatRegister($src$$reg);
3966     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
3967                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3968   %}
3969 
3970   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
3971     FloatRegister src_reg = as_FloatRegister($src$$reg);
3972     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
3973                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3974   %}
3975 
3976   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
3977     FloatRegister src_reg = as_FloatRegister($src$$reg);
3978     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
3979        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3980   %}
3981 
3982   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
3983     FloatRegister src_reg = as_FloatRegister($src$$reg);
3984     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
3985        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3986   %}
3987 
3988   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
3989     FloatRegister src_reg = as_FloatRegister($src$$reg);
3990     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
3991        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3992   %}
3993 
3994   // END Non-volatile memory access
3995 
3996   // this encoding writes the address of the first instruction in the
3997   // call sequence for the runtime call into the anchor pc slot. this
3998   // address allows the runtime to i) locate the code buffer for the
3999   // caller (any address in the buffer would do) and ii) find the oop
4000   // map associated with the call (has to address the instruction
4001   // following the call). note that we have to store the address which
4002   // follows the actual call.
4003   // 
4004   // the offset from the current pc can be computed by considering
4005   // what gets generated between this point up to and including the
4006   // call. it looks like this
4007   //
4008   //   movz xscratch1 0xnnnn        <-- current pc is here
4009   //   movk xscratch1 0xnnnn
4010   //   movk xscratch1 0xnnnn
4011   //   str xscratch1, [xthread,#anchor_pc_off]
4012   //   mov xscratch2, sp
4013   //   str xscratch2, [xthread,#anchor_sp_off
4014   //   mov x0, x1
4015   //   . . .
4016   //   mov xn-1, xn
4017   //   mov xn, thread            <-- always passed
4018   //   mov xn+1, rfp             <-- optional iff primary == 1
4019   //   movz xscratch1 0xnnnn
4020   //   movk xscratch1 0xnnnn
4021   //   movk xscratch1 0xnnnn
4022   //   blrt xscratch1
4023   //   . . .
4024   //
4025   // where the called routine has n args (including the thread and,
4026   // possibly the stub's caller return address currently in rfp).  we
4027   // can compute n by looking at the number of args passed into the
4028   // stub. we assert that nargs is < 7.
4029   //
4030   // so the offset we need to add to the pc (in 32-bit words) is
4031   //   3 +        <-- load 48-bit constant return pc
4032   //   1 +        <-- write anchor pc
4033   //   1 +        <-- copy sp
4034   //   1 +        <-- write anchor sp
4035   //   nargs +    <-- java stub arg count
4036   //   1 +        <-- extra thread arg
4037   // [ 1 + ]      <-- optional ret address of stub caller
4038   //   3 +        <-- load 64 bit call target address
4039   //   1          <-- blrt instruction
4040   //
4041   // i.e we need to add (nargs + 11) * 4 bytes or (nargs + 12) * 4 bytes
4042   //
4043 
4044   enc_class aarch64_enc_save_pc() %{
4045     Compile* C = ra_->C;
4046     int nargs = C->tf()->domain()->cnt() - TypeFunc::Parms;
4047     if ($primary) { nargs++; }
4048     assert(nargs <= 8, "opto runtime stub has more than 8 args!");
4049     MacroAssembler _masm(&cbuf);
4050     address pc = __ pc();
4051     int call_offset = (nargs + 11) * 4;
4052     int field_offset = in_bytes(JavaThread::frame_anchor_offset()) +
4053                        in_bytes(JavaFrameAnchor::last_Java_pc_offset());
4054     __ lea(rscratch1, InternalAddress(pc + call_offset));
4055     __ str(rscratch1, Address(rthread, field_offset));
4056   %}
4057 
4058   // volatile loads and stores
4059 
4060   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
4061     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4062                  rscratch1, stlrb);
4063     if (VM_Version::cpu_cpuFeatures() & VM_Version::CPU_DMB_ATOMICS)
4064       __ dmb(__ ISH);
4065   %}
4066 
4067   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
4068     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4069                  rscratch1, stlrh);
4070     if (VM_Version::cpu_cpuFeatures() & VM_Version::CPU_DMB_ATOMICS)
4071       __ dmb(__ ISH);
4072   %}
4073 
4074   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
4075     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4076                  rscratch1, stlrw);
4077     if (VM_Version::cpu_cpuFeatures() & VM_Version::CPU_DMB_ATOMICS)
4078       __ dmb(__ ISH);
4079   %}
4080 
4081 
4082   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
4083     Register dst_reg = as_Register($dst$$reg);
4084     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4085                  rscratch1, ldarb);
4086     __ sxtbw(dst_reg, dst_reg);
4087   %}
4088 
4089   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
4090     Register dst_reg = as_Register($dst$$reg);
4091     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4092              rscratch1, ldarb);
4093     __ sxtb(dst_reg, dst_reg);
4094   %}
4095 
4096   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
4097     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4098              rscratch1, ldarb);
4099   %}
4100 
4101   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
4102     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4103              rscratch1, ldarb);
4104   %}
4105 
4106   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
4107     Register dst_reg = as_Register($dst$$reg);
4108     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4109              rscratch1, ldarh);
4110     __ sxthw(dst_reg, dst_reg);
4111   %}
4112 
4113   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
4114     Register dst_reg = as_Register($dst$$reg);
4115     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4116              rscratch1, ldarh);
4117     __ sxth(dst_reg, dst_reg);
4118   %}
4119 
4120   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
4121     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4122              rscratch1, ldarh);
4123   %}
4124 
4125   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
4126     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4127              rscratch1, ldarh);
4128   %}
4129 
4130   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
4131     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4132              rscratch1, ldarw);
4133   %}
4134 
4135   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
4136     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4137              rscratch1, ldarw);
4138   %}
4139 
4140   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
4141     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4142              rscratch1, ldar);
4143   %}
4144 
4145   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
4146     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4147              rscratch1, ldarw);
4148     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
4149   %}
4150 
4151   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
4152     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4153              rscratch1, ldar);
4154     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
4155   %}
4156 
4157   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
4158     Register src_reg = as_Register($src$$reg);
4159     // we sometimes get asked to store the stack pointer into the
4160     // current thread -- we cannot do that directly on AArch64
4161     if (src_reg == r31_sp) {
4162         MacroAssembler _masm(&cbuf);
4163       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4164       __ mov(rscratch2, sp);
4165       src_reg = rscratch2;
4166     }
4167     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4168                  rscratch1, stlr);
4169     if (VM_Version::cpu_cpuFeatures() & VM_Version::CPU_DMB_ATOMICS)
4170       __ dmb(__ ISH);
4171   %}
4172 
4173   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
4174     {
4175       MacroAssembler _masm(&cbuf);
4176       FloatRegister src_reg = as_FloatRegister($src$$reg);
4177       __ fmovs(rscratch2, src_reg);
4178     }
4179     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4180                  rscratch1, stlrw);
4181     if (VM_Version::cpu_cpuFeatures() & VM_Version::CPU_DMB_ATOMICS)
4182       __ dmb(__ ISH);
4183   %}
4184 
4185   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
4186     {
4187       MacroAssembler _masm(&cbuf);
4188       FloatRegister src_reg = as_FloatRegister($src$$reg);
4189       __ fmovd(rscratch2, src_reg);
4190     }
4191     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4192                  rscratch1, stlr);
4193     if (VM_Version::cpu_cpuFeatures() & VM_Version::CPU_DMB_ATOMICS)
4194       __ dmb(__ ISH);
4195   %}
4196 
4197   // synchronized read/update encodings
4198 
4199   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
4200     MacroAssembler _masm(&cbuf);
4201     Register dst_reg = as_Register($dst$$reg);
4202     Register base = as_Register($mem$$base);
4203     int index = $mem$$index;
4204     int scale = $mem$$scale;
4205     int disp = $mem$$disp;
4206     if (index == -1) {
4207        if (disp != 0) {      
4208         __ lea(rscratch1, Address(base, disp));
4209         __ ldaxr(dst_reg, rscratch1);
4210       } else {
4211         // TODO
4212         // should we ever get anything other than this case?
4213         __ ldaxr(dst_reg, base);
4214       }
4215     } else {
4216       Register index_reg = as_Register(index);
4217       if (disp == 0) {
4218         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
4219         __ ldaxr(dst_reg, rscratch1);
4220       } else {
4221         __ lea(rscratch1, Address(base, disp));
4222         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
4223         __ ldaxr(dst_reg, rscratch1);
4224       }
4225     }
4226   %}
4227 
4228   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
4229     MacroAssembler _masm(&cbuf);
4230     Register src_reg = as_Register($src$$reg);
4231     Register base = as_Register($mem$$base);
4232     int index = $mem$$index;
4233     int scale = $mem$$scale;
4234     int disp = $mem$$disp;
4235     if (index == -1) {
4236        if (disp != 0) {      
4237         __ lea(rscratch2, Address(base, disp));
4238         __ stlxr(rscratch1, src_reg, rscratch2);
4239       } else {
4240         // TODO
4241         // should we ever get anything other than this case?
4242         __ stlxr(rscratch1, src_reg, base);
4243       }
4244     } else {
4245       Register index_reg = as_Register(index);
4246       if (disp == 0) {
4247         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
4248         __ stlxr(rscratch1, src_reg, rscratch2);
4249       } else {
4250         __ lea(rscratch2, Address(base, disp));
4251         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
4252         __ stlxr(rscratch1, src_reg, rscratch2);
4253       }
4254     }
4255     __ cmpw(rscratch1, zr);
4256   %}
4257 
4258   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4259     MacroAssembler _masm(&cbuf);
4260     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4261     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4262                Assembler::xword, /*acquire*/ false, /*release*/ true);
4263   %}
4264 
4265   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4266     MacroAssembler _masm(&cbuf);
4267     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4268     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4269                Assembler::word, /*acquire*/ false, /*release*/ true);
4270   %}
4271 
4272 
4273   enc_class aarch64_enc_cmpxchg_oop_shenandoah(memory mem, iRegP oldval, iRegP newval, iRegPNoSp tmp) %{
4274     MacroAssembler _masm(&cbuf);
4275     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4276     Register tmp = $tmp$$Register;
4277     __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
4278     __ cmpxchg_oop_shenandoah($mem$$Register, tmp, $newval$$Register,
4279                               Assembler::xword, /*acquire*/ false, /*release*/ true, /*weak*/ false);
4280   %}
4281 
4282   // The only difference between aarch64_enc_cmpxchg and
4283   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
4284   // CompareAndSwap sequence to serve as a barrier on acquiring a
4285   // lock.
4286   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4287     MacroAssembler _masm(&cbuf);
4288     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4289     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4290                Assembler::xword, /*acquire*/ true, /*release*/ true);
4291   %}
4292 
4293   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4294     MacroAssembler _masm(&cbuf);
4295     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4296     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4297                Assembler::word, /*acquire*/ true, /*release*/ true);
4298   %}
4299 
4300   enc_class aarch64_enc_cmpxchg_acq_oop_shenandoah(memory mem, iRegP oldval, iRegP newval, iRegPNoSp tmp) %{
4301     MacroAssembler _masm(&cbuf);
4302     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4303     Register tmp = $tmp$$Register;
4304     __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
4305     __ cmpxchg_oop_shenandoah($mem$$Register, tmp, $newval$$Register,
4306                               Assembler::xword, /*acquire*/ true, /*release*/ true, /*weak*/ false);
4307   %}
4308 
4309   // auxiliary used for CompareAndSwapX to set result register
4310   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
4311     MacroAssembler _masm(&cbuf);
4312     Register res_reg = as_Register($res$$reg);
4313     __ cset(res_reg, Assembler::EQ);
4314   %}
4315 
4316   // prefetch encodings
4317 
4318   enc_class aarch64_enc_prefetchr(memory mem) %{
4319     MacroAssembler _masm(&cbuf);
4320     Register base = as_Register($mem$$base);
4321     int index = $mem$$index;
4322     int scale = $mem$$scale;
4323     int disp = $mem$$disp;
4324     if (index == -1) {
4325       __ prfm(Address(base, disp), PLDL1KEEP);
4326     } else {
4327       Register index_reg = as_Register(index);
4328       if (disp == 0) {
4329         __ prfm(Address(base, index_reg, Address::lsl(scale)), PLDL1KEEP);
4330       } else {
4331         __ lea(rscratch1, Address(base, disp));
4332         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PLDL1KEEP);
4333       }
4334     }
4335   %}
4336 
4337   enc_class aarch64_enc_prefetchw(memory mem) %{
4338     MacroAssembler _masm(&cbuf);
4339     Register base = as_Register($mem$$base);
4340     int index = $mem$$index;
4341     int scale = $mem$$scale;
4342     int disp = $mem$$disp;
4343     if (index == -1) {
4344       __ prfm(Address(base, disp), PSTL1KEEP);
4345     } else {
4346       Register index_reg = as_Register(index);
4347       if (disp == 0) {
4348         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
4349       } else {
4350         __ lea(rscratch1, Address(base, disp));
4351         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
4352       }
4353     }
4354   %}
4355 
4356   enc_class aarch64_enc_prefetchnta(memory mem) %{
4357     MacroAssembler _masm(&cbuf);
4358     Register base = as_Register($mem$$base);
4359     int index = $mem$$index;
4360     int scale = $mem$$scale;
4361     int disp = $mem$$disp;
4362     if (index == -1) {
4363       __ prfm(Address(base, disp), PSTL1STRM);
4364     } else {
4365       Register index_reg = as_Register(index);
4366       if (disp == 0) {
4367         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1STRM);
4368         __ nop();
4369       } else {
4370         __ lea(rscratch1, Address(base, disp));
4371         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1STRM);
4372       }
4373     }
4374   %}
4375 
4376   /// mov envcodings
4377 
4378   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
4379     MacroAssembler _masm(&cbuf);
4380     u_int32_t con = (u_int32_t)$src$$constant;
4381     Register dst_reg = as_Register($dst$$reg);
4382     if (con == 0) {
4383       __ movw(dst_reg, zr);
4384     } else {
4385       __ movw(dst_reg, con);
4386     }
4387   %}
4388 
4389   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
4390     MacroAssembler _masm(&cbuf);
4391     Register dst_reg = as_Register($dst$$reg);
4392     u_int64_t con = (u_int64_t)$src$$constant;
4393     if (con == 0) {
4394       __ mov(dst_reg, zr);
4395     } else {
4396       __ mov(dst_reg, con);
4397     }
4398   %}
4399 
4400   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
4401     MacroAssembler _masm(&cbuf);
4402     Register dst_reg = as_Register($dst$$reg);
4403     address con = (address)$src$$constant;
4404     if (con == NULL || con == (address)1) {
4405       ShouldNotReachHere();
4406     } else {
4407       relocInfo::relocType rtype = $src->constant_reloc();
4408       if (rtype == relocInfo::oop_type) {
4409         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
4410       } else if (rtype == relocInfo::metadata_type) {
4411         __ mov_metadata(dst_reg, (Metadata*)con);
4412       } else {
4413         assert(rtype == relocInfo::none, "unexpected reloc type");
4414         if (con < (address)(uintptr_t)os::vm_page_size()) {
4415           __ mov(dst_reg, con);
4416         } else {
4417           unsigned long offset;
4418           __ adrp(dst_reg, con, offset);
4419           __ add(dst_reg, dst_reg, offset);
4420         }
4421       }
4422     }
4423   %}
4424 
4425   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
4426     MacroAssembler _masm(&cbuf);
4427     Register dst_reg = as_Register($dst$$reg);
4428     __ mov(dst_reg, zr);
4429   %}
4430 
4431   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
4432     MacroAssembler _masm(&cbuf);
4433     Register dst_reg = as_Register($dst$$reg);
4434     __ mov(dst_reg, (u_int64_t)1);
4435   %}
4436 
4437   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
4438     MacroAssembler _masm(&cbuf);
4439     address page = (address)$src$$constant;
4440     Register dst_reg = as_Register($dst$$reg);
4441     unsigned long off;
4442     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
4443     assert(off == 0, "assumed offset == 0");
4444   %}
4445 
4446   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
4447     MacroAssembler _masm(&cbuf);
4448     __ load_byte_map_base($dst$$Register);
4449   %}
4450 
4451   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
4452     MacroAssembler _masm(&cbuf);
4453     Register dst_reg = as_Register($dst$$reg);
4454     address con = (address)$src$$constant;
4455     if (con == NULL) {
4456       ShouldNotReachHere();
4457     } else {
4458       relocInfo::relocType rtype = $src->constant_reloc();
4459       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
4460       __ set_narrow_oop(dst_reg, (jobject)con);
4461     }
4462   %}
4463 
4464   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
4465     MacroAssembler _masm(&cbuf);
4466     Register dst_reg = as_Register($dst$$reg);
4467     __ mov(dst_reg, zr);
4468   %}
4469 
4470   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
4471     MacroAssembler _masm(&cbuf);
4472     Register dst_reg = as_Register($dst$$reg);
4473     address con = (address)$src$$constant;
4474     if (con == NULL) {
4475       ShouldNotReachHere();
4476     } else {
4477       relocInfo::relocType rtype = $src->constant_reloc();
4478       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
4479       __ set_narrow_klass(dst_reg, (Klass *)con);
4480     }
4481   %}
4482 
4483   // arithmetic encodings
4484 
4485   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
4486     MacroAssembler _masm(&cbuf);
4487     Register dst_reg = as_Register($dst$$reg);
4488     Register src_reg = as_Register($src1$$reg);
4489     int32_t con = (int32_t)$src2$$constant;
4490     // add has primary == 0, subtract has primary == 1
4491     if ($primary) { con = -con; }
4492     if (con < 0) {
4493       __ subw(dst_reg, src_reg, -con);
4494     } else {
4495       __ addw(dst_reg, src_reg, con);
4496     }
4497   %}
4498 
4499   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
4500     MacroAssembler _masm(&cbuf);
4501     Register dst_reg = as_Register($dst$$reg);
4502     Register src_reg = as_Register($src1$$reg);
4503     int32_t con = (int32_t)$src2$$constant;
4504     // add has primary == 0, subtract has primary == 1
4505     if ($primary) { con = -con; }
4506     if (con < 0) {
4507       __ sub(dst_reg, src_reg, -con);
4508     } else {
4509       __ add(dst_reg, src_reg, con);
4510     }
4511   %}
4512 
4513   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
4514     MacroAssembler _masm(&cbuf);
4515    Register dst_reg = as_Register($dst$$reg);
4516    Register src1_reg = as_Register($src1$$reg);
4517    Register src2_reg = as_Register($src2$$reg);
4518     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
4519   %}
4520 
4521   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
4522     MacroAssembler _masm(&cbuf);
4523    Register dst_reg = as_Register($dst$$reg);
4524    Register src1_reg = as_Register($src1$$reg);
4525    Register src2_reg = as_Register($src2$$reg);
4526     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
4527   %}
4528 
4529   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
4530     MacroAssembler _masm(&cbuf);
4531    Register dst_reg = as_Register($dst$$reg);
4532    Register src1_reg = as_Register($src1$$reg);
4533    Register src2_reg = as_Register($src2$$reg);
4534     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
4535   %}
4536 
4537   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
4538     MacroAssembler _masm(&cbuf);
4539    Register dst_reg = as_Register($dst$$reg);
4540    Register src1_reg = as_Register($src1$$reg);
4541    Register src2_reg = as_Register($src2$$reg);
4542     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
4543   %}
4544 
4545   // compare instruction encodings
4546 
4547   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
4548     MacroAssembler _masm(&cbuf);
4549     Register reg1 = as_Register($src1$$reg);
4550     Register reg2 = as_Register($src2$$reg);
4551     __ cmpw(reg1, reg2);
4552   %}
4553 
4554   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
4555     MacroAssembler _masm(&cbuf);
4556     Register reg = as_Register($src1$$reg);
4557     int32_t val = $src2$$constant;
4558     if (val >= 0) {
4559       __ subsw(zr, reg, val);
4560     } else {
4561       __ addsw(zr, reg, -val);
4562     }
4563   %}
4564 
4565   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
4566     MacroAssembler _masm(&cbuf);
4567     Register reg1 = as_Register($src1$$reg);
4568     u_int32_t val = (u_int32_t)$src2$$constant;
4569     __ movw(rscratch1, val);
4570     __ cmpw(reg1, rscratch1);
4571   %}
4572 
4573   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
4574     MacroAssembler _masm(&cbuf);
4575     Register reg1 = as_Register($src1$$reg);
4576     Register reg2 = as_Register($src2$$reg);
4577     __ cmp(reg1, reg2);
4578   %}
4579 
4580   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
4581     MacroAssembler _masm(&cbuf);
4582     Register reg = as_Register($src1$$reg);
4583     int64_t val = $src2$$constant;
4584     if (val >= 0) {
4585       __ subs(zr, reg, val);
4586     } else if (val != -val) {
4587       __ adds(zr, reg, -val);
4588     } else {
4589     // aargh, Long.MIN_VALUE is a special case
4590       __ orr(rscratch1, zr, (u_int64_t)val);
4591       __ subs(zr, reg, rscratch1);
4592     }
4593   %}
4594 
4595   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
4596     MacroAssembler _masm(&cbuf);
4597     Register reg1 = as_Register($src1$$reg);
4598     u_int64_t val = (u_int64_t)$src2$$constant;
4599     __ mov(rscratch1, val);
4600     __ cmp(reg1, rscratch1);
4601   %}
4602 
4603   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
4604     MacroAssembler _masm(&cbuf);
4605     Register reg1 = as_Register($src1$$reg);
4606     Register reg2 = as_Register($src2$$reg);
4607     __ cmp(reg1, reg2);
4608   %}
4609 
4610   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
4611     MacroAssembler _masm(&cbuf);
4612     Register reg1 = as_Register($src1$$reg);
4613     Register reg2 = as_Register($src2$$reg);
4614     __ cmpw(reg1, reg2);
4615   %}
4616 
4617   enc_class aarch64_enc_testp(iRegP src) %{
4618     MacroAssembler _masm(&cbuf);
4619     Register reg = as_Register($src$$reg);
4620     __ cmp(reg, zr);
4621   %}
4622 
4623   enc_class aarch64_enc_testn(iRegN src) %{
4624     MacroAssembler _masm(&cbuf);
4625     Register reg = as_Register($src$$reg);
4626     __ cmpw(reg, zr);
4627   %}
4628 
4629   enc_class aarch64_enc_b(label lbl) %{
4630     MacroAssembler _masm(&cbuf);
4631     Label *L = $lbl$$label;
4632     __ b(*L);
4633   %}
4634 
4635   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
4636     MacroAssembler _masm(&cbuf);
4637     Label *L = $lbl$$label;
4638     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4639   %}
4640 
4641   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
4642     MacroAssembler _masm(&cbuf);
4643     Label *L = $lbl$$label;
4644     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4645   %}
4646 
4647   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
4648   %{
4649      Register sub_reg = as_Register($sub$$reg);
4650      Register super_reg = as_Register($super$$reg);
4651      Register temp_reg = as_Register($temp$$reg);
4652      Register result_reg = as_Register($result$$reg);
4653 
4654      Label miss;
4655      MacroAssembler _masm(&cbuf);
4656      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
4657                                      NULL, &miss,
4658                                      /*set_cond_codes:*/ true);
4659      if ($primary) {
4660        __ mov(result_reg, zr);
4661      }
4662      __ bind(miss);
4663   %}
4664 
4665   enc_class aarch64_enc_java_static_call(method meth) %{
4666     MacroAssembler _masm(&cbuf);
4667 
4668     address mark = __ pc();
4669     address addr = (address)$meth$$method;
4670     address call;
4671     if (!_method) {
4672       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
4673       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
4674     } else if (_optimized_virtual) {
4675       call = __ trampoline_call(Address(addr, relocInfo::opt_virtual_call_type), &cbuf);
4676     } else {
4677       call = __ trampoline_call(Address(addr, relocInfo::static_call_type), &cbuf);
4678     }
4679     if (call == NULL) {
4680       ciEnv::current()->record_failure("CodeCache is full"); 
4681       return;
4682     }
4683 
4684     if (_method) {
4685       // Emit stub for static call
4686       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf, mark);
4687       if (stub == NULL) {
4688         ciEnv::current()->record_failure("CodeCache is full"); 
4689         return;
4690       }
4691     }
4692   %}
4693 
4694   enc_class aarch64_enc_java_handle_call(method meth) %{
4695     MacroAssembler _masm(&cbuf);
4696     relocInfo::relocType reloc;
4697 
4698     // RFP is preserved across all calls, even compiled calls.
4699     // Use it to preserve SP.
4700     __ mov(rfp, sp);
4701 
4702     address mark = __ pc();
4703     address addr = (address)$meth$$method;
4704     address call;
4705     if (!_method) {
4706       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
4707       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
4708     } else if (_optimized_virtual) {
4709       call = __ trampoline_call(Address(addr, relocInfo::opt_virtual_call_type), &cbuf);
4710     } else {
4711       call = __ trampoline_call(Address(addr, relocInfo::static_call_type), &cbuf);
4712     }
4713     if (call == NULL) {
4714       ciEnv::current()->record_failure("CodeCache is full"); 
4715       return;
4716     }
4717 
4718     if (_method) {
4719       // Emit stub for static call
4720       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf, mark);
4721       if (stub == NULL) {
4722         ciEnv::current()->record_failure("CodeCache is full"); 
4723         return;
4724       }
4725     }
4726 
4727     // now restore sp
4728     __ mov(sp, rfp);
4729   %}
4730 
4731   enc_class aarch64_enc_java_dynamic_call(method meth) %{
4732     MacroAssembler _masm(&cbuf);
4733     address call = __ ic_call((address)$meth$$method);
4734     if (call == NULL) {
4735       ciEnv::current()->record_failure("CodeCache is full"); 
4736       return;
4737     }
4738   %}
4739 
4740   enc_class aarch64_enc_call_epilog() %{
4741     MacroAssembler _masm(&cbuf);
4742     if (VerifyStackAtCalls) {
4743       // Check that stack depth is unchanged: find majik cookie on stack
4744       __ call_Unimplemented();
4745     }
4746   %}
4747 
4748   enc_class aarch64_enc_java_to_runtime(method meth) %{
4749     MacroAssembler _masm(&cbuf);
4750 
4751     // some calls to generated routines (arraycopy code) are scheduled
4752     // by C2 as runtime calls. if so we can call them using a br (they
4753     // will be in a reachable segment) otherwise we have to use a blrt
4754     // which loads the absolute address into a register.
4755     address entry = (address)$meth$$method;
4756     CodeBlob *cb = CodeCache::find_blob(entry);
4757     if (cb) {
4758       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
4759       if (call == NULL) {
4760         ciEnv::current()->record_failure("CodeCache is full"); 
4761         return;
4762       }
4763     } else {
4764       int gpcnt;
4765       int fpcnt;
4766       int rtype;
4767       getCallInfo(tf(), gpcnt, fpcnt, rtype);
4768       Label retaddr;
4769       __ adr(rscratch2, retaddr);
4770       __ lea(rscratch1, RuntimeAddress(entry));
4771       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
4772       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
4773       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
4774       __ bind(retaddr);
4775       __ add(sp, sp, 2 * wordSize);
4776     }
4777   %}
4778 
4779   enc_class aarch64_enc_rethrow() %{
4780     MacroAssembler _masm(&cbuf);
4781     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
4782   %}
4783 
4784   enc_class aarch64_enc_ret() %{
4785     MacroAssembler _masm(&cbuf);
4786     __ ret(lr);
4787   %}
4788 
4789   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
4790     MacroAssembler _masm(&cbuf);
4791     Register target_reg = as_Register($jump_target$$reg);
4792     __ br(target_reg);
4793   %}
4794 
4795   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
4796     MacroAssembler _masm(&cbuf);
4797     Register target_reg = as_Register($jump_target$$reg);
4798     // exception oop should be in r0
4799     // ret addr has been popped into lr
4800     // callee expects it in r3
4801     __ mov(r3, lr);
4802     __ br(target_reg);
4803   %}
4804 
4805   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4806     MacroAssembler _masm(&cbuf);
4807     Register oop = as_Register($object$$reg);
4808     Register box = as_Register($box$$reg);
4809     Register disp_hdr = as_Register($tmp$$reg);
4810     Register tmp = as_Register($tmp2$$reg);
4811     Label cont;
4812     Label object_has_monitor;
4813     Label cas_failed;
4814 
4815     assert_different_registers(oop, box, tmp, disp_hdr);
4816 
4817     // Load markOop from object into displaced_header.
4818     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
4819 
4820     // Always do locking in runtime.
4821     if (EmitSync & 0x01) {
4822       __ cmp(oop, zr);
4823       return;
4824     }
4825     
4826     if (UseBiasedLocking && !UseOptoBiasInlining) {
4827       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
4828     }
4829 
4830     // Handle existing monitor
4831     if ((EmitSync & 0x02) == 0) {
4832       // we can use AArch64's bit test and branch here but
4833       // markoopDesc does not define a bit index just the bit value
4834       // so assert in case the bit pos changes
4835 #     define __monitor_value_log2 1
4836       assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
4837       __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
4838 #     undef __monitor_value_log2
4839     }
4840 
4841     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
4842     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
4843 
4844     // Load Compare Value application register.
4845 
4846     // Initialize the box. (Must happen before we update the object mark!)
4847     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4848 
4849     // Compare object markOop with mark and if equal exchange scratch1
4850     // with object markOop.
4851     if (UseLSE) {
4852       __ mov(tmp, disp_hdr);
4853       __ casal(Assembler::xword, tmp, box, oop);
4854       __ cmp(tmp, disp_hdr);
4855       __ br(Assembler::EQ, cont);
4856     } else {
4857       Label retry_load;
4858       if ((VM_Version::cpu_cpuFeatures() & VM_Version::CPU_STXR_PREFETCH))
4859         __ prfm(Address(oop), PSTL1STRM);
4860       __ bind(retry_load);
4861       __ ldaxr(tmp, oop);
4862       __ cmp(tmp, disp_hdr);
4863       __ br(Assembler::NE, cas_failed);
4864       // use stlxr to ensure update is immediately visible
4865       __ stlxr(tmp, box, oop);
4866       __ cbzw(tmp, cont);
4867       __ b(retry_load);
4868     }
4869 
4870     // Formerly:
4871     // __ cmpxchgptr(/*oldv=*/disp_hdr,
4872     //               /*newv=*/box,
4873     //               /*addr=*/oop,
4874     //               /*tmp=*/tmp,
4875     //               cont,
4876     //               /*fail*/NULL);
4877 
4878     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4879 
4880     // If the compare-and-exchange succeeded, then we found an unlocked
4881     // object, will have now locked it will continue at label cont
4882 
4883     __ bind(cas_failed);
4884     // We did not see an unlocked object so try the fast recursive case.
4885 
4886     // Check if the owner is self by comparing the value in the
4887     // markOop of object (disp_hdr) with the stack pointer.
4888     __ mov(rscratch1, sp);
4889     __ sub(disp_hdr, disp_hdr, rscratch1);
4890     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
4891     // If condition is true we are cont and hence we can store 0 as the
4892     // displaced header in the box, which indicates that it is a recursive lock.
4893     __ ands(tmp/*==0?*/, disp_hdr, tmp);
4894     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4895 
4896     // Handle existing monitor.
4897     if ((EmitSync & 0x02) == 0) {
4898       __ b(cont);
4899 
4900       __ bind(object_has_monitor);
4901       // The object's monitor m is unlocked iff m->owner == NULL,
4902       // otherwise m->owner may contain a thread or a stack address.
4903       //
4904       // Try to CAS m->owner from NULL to current thread.
4905       __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
4906       __ mov(disp_hdr, zr);
4907 
4908       if (UseLSE) {
4909         __ mov(rscratch1, disp_hdr);
4910         __ casal(Assembler::xword, rscratch1, rthread, tmp);
4911         __ cmp(rscratch1, disp_hdr);
4912       } else {
4913         Label retry_load, fail;
4914         if ((VM_Version::cpu_cpuFeatures() & VM_Version::CPU_STXR_PREFETCH))
4915           __ prfm(Address(tmp), PSTL1STRM);
4916         __ bind(retry_load);
4917         __ ldaxr(rscratch1, tmp);
4918         __ cmp(disp_hdr, rscratch1);
4919         __ br(Assembler::NE, fail);
4920         // use stlxr to ensure update is immediately visible
4921         __ stlxr(rscratch1, rthread, tmp);
4922         __ cbnzw(rscratch1, retry_load);
4923         __ bind(fail);
4924       }
4925 
4926       // Label next;
4927       // __ cmpxchgptr(/*oldv=*/disp_hdr,
4928       //               /*newv=*/rthread,
4929       //               /*addr=*/tmp,
4930       //               /*tmp=*/rscratch1,
4931       //               /*succeed*/next,
4932       //               /*fail*/NULL);
4933       // __ bind(next);
4934 
4935       // store a non-null value into the box.
4936       __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4937 
4938       // PPC port checks the following invariants
4939       // #ifdef ASSERT
4940       // bne(flag, cont);
4941       // We have acquired the monitor, check some invariants.
4942       // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
4943       // Invariant 1: _recursions should be 0.
4944       // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
4945       // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
4946       //                        "monitor->_recursions should be 0", -1);
4947       // Invariant 2: OwnerIsThread shouldn't be 0.
4948       // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
4949       //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
4950       //                           "monitor->OwnerIsThread shouldn't be 0", -1);
4951       // #endif
4952     }
4953 
4954     __ bind(cont);
4955     // flag == EQ indicates success
4956     // flag == NE indicates failure
4957 
4958   %}
4959 
4960   // TODO
4961   // reimplement this with custom cmpxchgptr code
4962   // which avoids some of the unnecessary branching
4963   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4964     MacroAssembler _masm(&cbuf);
4965     Register oop = as_Register($object$$reg);
4966     Register box = as_Register($box$$reg);
4967     Register disp_hdr = as_Register($tmp$$reg);
4968     Register tmp = as_Register($tmp2$$reg);
4969     Label cont;
4970     Label object_has_monitor;
4971     Label cas_failed;
4972 
4973     assert_different_registers(oop, box, tmp, disp_hdr);
4974 
4975     // Always do locking in runtime.
4976     if (EmitSync & 0x01) {
4977       __ cmp(oop, zr); // Oop can't be 0 here => always false.
4978       return;
4979     }
4980 
4981     if (UseBiasedLocking && !UseOptoBiasInlining) {
4982       __ biased_locking_exit(oop, tmp, cont);
4983     }
4984 
4985     // Find the lock address and load the displaced header from the stack.
4986     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4987 
4988     // If the displaced header is 0, we have a recursive unlock.
4989     __ cmp(disp_hdr, zr);
4990     __ br(Assembler::EQ, cont);
4991 
4992 
4993     // Handle existing monitor.
4994     if ((EmitSync & 0x02) == 0) {
4995       __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
4996       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
4997     }
4998 
4999     // Check if it is still a light weight lock, this is is true if we
5000     // see the stack address of the basicLock in the markOop of the
5001     // object.
5002 
5003       if (UseLSE) {
5004         __ mov(tmp, box);
5005         __ casl(Assembler::xword, tmp, disp_hdr, oop);
5006         __ cmp(tmp, box);
5007       } else {
5008         Label retry_load;
5009         if ((VM_Version::cpu_cpuFeatures() & VM_Version::CPU_STXR_PREFETCH))
5010           __ prfm(Address(oop), PSTL1STRM);
5011         __ bind(retry_load);
5012         __ ldxr(tmp, oop);
5013         __ cmp(box, tmp);
5014         __ br(Assembler::NE, cas_failed);
5015         // use stlxr to ensure update is immediately visible
5016         __ stlxr(tmp, disp_hdr, oop);
5017         __ cbzw(tmp, cont);
5018         __ b(retry_load);
5019       }
5020 
5021     // __ cmpxchgptr(/*compare_value=*/box,
5022     //               /*exchange_value=*/disp_hdr,
5023     //               /*where=*/oop,
5024     //               /*result=*/tmp,
5025     //               cont,
5026     //               /*cas_failed*/NULL);
5027     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
5028 
5029     __ bind(cas_failed);
5030 
5031     // Handle existing monitor.
5032     if ((EmitSync & 0x02) == 0) {
5033       __ b(cont);
5034 
5035       __ bind(object_has_monitor);
5036       __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
5037       __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
5038       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
5039       __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
5040       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
5041       __ cmp(rscratch1, zr);
5042       __ br(Assembler::NE, cont);
5043 
5044       __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
5045       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
5046       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
5047       __ cmp(rscratch1, zr);
5048       __ br(Assembler::NE, cont);
5049       // need a release store here
5050       __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
5051       __ stlr(rscratch1, tmp);
5052     }
5053 
5054     __ bind(cont);
5055     // flag == EQ indicates success
5056     // flag == NE indicates failure
5057   %}
5058 
5059 %}
5060 
5061 //----------FRAME--------------------------------------------------------------
5062 // Definition of frame structure and management information.
5063 //
5064 //  S T A C K   L A Y O U T    Allocators stack-slot number
5065 //                             |   (to get allocators register number
5066 //  G  Owned by    |        |  v    add OptoReg::stack0())
5067 //  r   CALLER     |        |
5068 //  o     |        +--------+      pad to even-align allocators stack-slot
5069 //  w     V        |  pad0  |        numbers; owned by CALLER
5070 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
5071 //  h     ^        |   in   |  5
5072 //        |        |  args  |  4   Holes in incoming args owned by SELF
5073 //  |     |        |        |  3
5074 //  |     |        +--------+
5075 //  V     |        | old out|      Empty on Intel, window on Sparc
5076 //        |    old |preserve|      Must be even aligned.
5077 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
5078 //        |        |   in   |  3   area for Intel ret address
5079 //     Owned by    |preserve|      Empty on Sparc.
5080 //       SELF      +--------+
5081 //        |        |  pad2  |  2   pad to align old SP
5082 //        |        +--------+  1
5083 //        |        | locks  |  0
5084 //        |        +--------+----> OptoReg::stack0(), even aligned
5085 //        |        |  pad1  | 11   pad to align new SP
5086 //        |        +--------+
5087 //        |        |        | 10
5088 //        |        | spills |  9   spills
5089 //        V        |        |  8   (pad0 slot for callee)
5090 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
5091 //        ^        |  out   |  7
5092 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
5093 //     Owned by    +--------+
5094 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
5095 //        |    new |preserve|      Must be even-aligned.
5096 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
5097 //        |        |        |
5098 //
5099 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
5100 //         known from SELF's arguments and the Java calling convention.
5101 //         Region 6-7 is determined per call site.
5102 // Note 2: If the calling convention leaves holes in the incoming argument
5103 //         area, those holes are owned by SELF.  Holes in the outgoing area
5104 //         are owned by the CALLEE.  Holes should not be nessecary in the
5105 //         incoming area, as the Java calling convention is completely under
5106 //         the control of the AD file.  Doubles can be sorted and packed to
5107 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
5108 //         varargs C calling conventions.
5109 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
5110 //         even aligned with pad0 as needed.
5111 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
5112 //           (the latter is true on Intel but is it false on AArch64?)
5113 //         region 6-11 is even aligned; it may be padded out more so that
5114 //         the region from SP to FP meets the minimum stack alignment.
5115 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
5116 //         alignment.  Region 11, pad1, may be dynamically extended so that
5117 //         SP meets the minimum alignment.
5118 
5119 frame %{
5120   // What direction does stack grow in (assumed to be same for C & Java)
5121   stack_direction(TOWARDS_LOW);
5122 
5123   // These three registers define part of the calling convention
5124   // between compiled code and the interpreter.
5125 
5126   // Inline Cache Register or methodOop for I2C.
5127   inline_cache_reg(R12);
5128 
5129   // Method Oop Register when calling interpreter.
5130   interpreter_method_oop_reg(R12);
5131 
5132   // Number of stack slots consumed by locking an object
5133   sync_stack_slots(2);
5134 
5135   // Compiled code's Frame Pointer
5136   frame_pointer(R31);
5137 
5138   // Interpreter stores its frame pointer in a register which is
5139   // stored to the stack by I2CAdaptors.
5140   // I2CAdaptors convert from interpreted java to compiled java.
5141   interpreter_frame_pointer(R29);
5142 
5143   // Stack alignment requirement
5144   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
5145 
5146   // Number of stack slots between incoming argument block and the start of
5147   // a new frame.  The PROLOG must add this many slots to the stack.  The
5148   // EPILOG must remove this many slots. aarch64 needs two slots for
5149   // return address and fp.
5150   // TODO think this is correct but check
5151   in_preserve_stack_slots(4);
5152 
5153   // Number of outgoing stack slots killed above the out_preserve_stack_slots
5154   // for calls to C.  Supports the var-args backing area for register parms.
5155   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
5156 
5157   // The after-PROLOG location of the return address.  Location of
5158   // return address specifies a type (REG or STACK) and a number
5159   // representing the register number (i.e. - use a register name) or
5160   // stack slot.
5161   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
5162   // Otherwise, it is above the locks and verification slot and alignment word
5163   // TODO this may well be correct but need to check why that - 2 is there
5164   // ppc port uses 0 but we definitely need to allow for fixed_slots
5165   // which folds in the space used for monitors
5166   return_addr(STACK - 2 +
5167               round_to((Compile::current()->in_preserve_stack_slots() +
5168                         Compile::current()->fixed_slots()),
5169                        stack_alignment_in_slots()));
5170 
5171   // Body of function which returns an integer array locating
5172   // arguments either in registers or in stack slots.  Passed an array
5173   // of ideal registers called "sig" and a "length" count.  Stack-slot
5174   // offsets are based on outgoing arguments, i.e. a CALLER setting up
5175   // arguments for a CALLEE.  Incoming stack arguments are
5176   // automatically biased by the preserve_stack_slots field above.
5177 
5178   calling_convention
5179   %{
5180     // No difference between ingoing/outgoing just pass false
5181     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
5182   %}
5183 
5184   c_calling_convention
5185   %{
5186     // This is obviously always outgoing
5187     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
5188   %}
5189 
5190   // Location of compiled Java return values.  Same as C for now.
5191   return_value
5192   %{
5193     // TODO do we allow ideal_reg == Op_RegN???
5194     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
5195            "only return normal values");
5196 
5197     static const int lo[Op_RegL + 1] = { // enum name
5198       0,                                 // Op_Node
5199       0,                                 // Op_Set
5200       R0_num,                            // Op_RegN
5201       R0_num,                            // Op_RegI
5202       R0_num,                            // Op_RegP
5203       V0_num,                            // Op_RegF
5204       V0_num,                            // Op_RegD
5205       R0_num                             // Op_RegL
5206     };
5207   
5208     static const int hi[Op_RegL + 1] = { // enum name
5209       0,                                 // Op_Node
5210       0,                                 // Op_Set
5211       OptoReg::Bad,                       // Op_RegN
5212       OptoReg::Bad,                      // Op_RegI
5213       R0_H_num,                          // Op_RegP
5214       OptoReg::Bad,                      // Op_RegF
5215       V0_H_num,                          // Op_RegD
5216       R0_H_num                           // Op_RegL
5217     };
5218 
5219     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
5220   %}
5221 %}
5222 
5223 //----------ATTRIBUTES---------------------------------------------------------
5224 //----------Operand Attributes-------------------------------------------------
5225 op_attrib op_cost(1);        // Required cost attribute
5226 
5227 //----------Instruction Attributes---------------------------------------------
5228 ins_attrib ins_cost(INSN_COST); // Required cost attribute
5229 ins_attrib ins_size(32);        // Required size attribute (in bits)
5230 ins_attrib ins_short_branch(0); // Required flag: is this instruction
5231                                 // a non-matching short branch variant
5232                                 // of some long branch?
5233 ins_attrib ins_alignment(4);    // Required alignment attribute (must
5234                                 // be a power of 2) specifies the
5235                                 // alignment that some part of the
5236                                 // instruction (not necessarily the
5237                                 // start) requires.  If > 1, a
5238                                 // compute_padding() function must be
5239                                 // provided for the instruction
5240 
5241 //----------OPERANDS-----------------------------------------------------------
5242 // Operand definitions must precede instruction definitions for correct parsing
5243 // in the ADLC because operands constitute user defined types which are used in
5244 // instruction definitions.
5245 
5246 //----------Simple Operands----------------------------------------------------
5247 
5248 // Integer operands 32 bit
5249 // 32 bit immediate
5250 operand immI()
5251 %{
5252   match(ConI);
5253 
5254   op_cost(0);
5255   format %{ %}
5256   interface(CONST_INTER);
5257 %}
5258 
5259 // 32 bit zero
5260 operand immI0()
5261 %{
5262   predicate(n->get_int() == 0);
5263   match(ConI);
5264 
5265   op_cost(0);
5266   format %{ %}
5267   interface(CONST_INTER);
5268 %}
5269 
5270 // 32 bit unit increment
5271 operand immI_1()
5272 %{
5273   predicate(n->get_int() == 1);
5274   match(ConI);
5275 
5276   op_cost(0);
5277   format %{ %}
5278   interface(CONST_INTER);
5279 %}
5280 
5281 // 32 bit unit decrement
5282 operand immI_M1()
5283 %{
5284   predicate(n->get_int() == -1);
5285   match(ConI);
5286 
5287   op_cost(0);
5288   format %{ %}
5289   interface(CONST_INTER);
5290 %}
5291 
5292 operand immI_le_4()
5293 %{
5294   predicate(n->get_int() <= 4);
5295   match(ConI);
5296 
5297   op_cost(0);
5298   format %{ %}
5299   interface(CONST_INTER);
5300 %}
5301 
5302 operand immI_31()
5303 %{
5304   predicate(n->get_int() == 31);
5305   match(ConI);
5306 
5307   op_cost(0);
5308   format %{ %}
5309   interface(CONST_INTER);
5310 %}
5311 
5312 operand immI_8()
5313 %{
5314   predicate(n->get_int() == 8);
5315   match(ConI);
5316 
5317   op_cost(0);
5318   format %{ %}
5319   interface(CONST_INTER);
5320 %}
5321 
5322 operand immI_16()
5323 %{
5324   predicate(n->get_int() == 16);
5325   match(ConI);
5326 
5327   op_cost(0);
5328   format %{ %}
5329   interface(CONST_INTER);
5330 %}
5331 
5332 operand immI_24()
5333 %{
5334   predicate(n->get_int() == 24);
5335   match(ConI);
5336 
5337   op_cost(0);
5338   format %{ %}
5339   interface(CONST_INTER);
5340 %}
5341 
5342 operand immI_32()
5343 %{
5344   predicate(n->get_int() == 32);
5345   match(ConI);
5346 
5347   op_cost(0);
5348   format %{ %}
5349   interface(CONST_INTER);
5350 %}
5351 
5352 operand immI_48()
5353 %{
5354   predicate(n->get_int() == 48);
5355   match(ConI);
5356 
5357   op_cost(0);
5358   format %{ %}
5359   interface(CONST_INTER);
5360 %}
5361 
5362 operand immI_56()
5363 %{
5364   predicate(n->get_int() == 56);
5365   match(ConI);
5366 
5367   op_cost(0);
5368   format %{ %}
5369   interface(CONST_INTER);
5370 %}
5371 
5372 operand immI_64()
5373 %{
5374   predicate(n->get_int() == 64);
5375   match(ConI);
5376 
5377   op_cost(0);
5378   format %{ %}
5379   interface(CONST_INTER);
5380 %}
5381 
5382 operand immI_255()
5383 %{
5384   predicate(n->get_int() == 255);
5385   match(ConI);
5386 
5387   op_cost(0);
5388   format %{ %}
5389   interface(CONST_INTER);
5390 %}
5391 
5392 operand immI_65535()
5393 %{
5394   predicate(n->get_int() == 65535);
5395   match(ConI);
5396 
5397   op_cost(0);
5398   format %{ %}
5399   interface(CONST_INTER);
5400 %}
5401 
5402 operand immL_63()
5403 %{
5404   predicate(n->get_int() == 63);
5405   match(ConI);
5406 
5407   op_cost(0);
5408   format %{ %}
5409   interface(CONST_INTER);
5410 %}
5411 
5412 operand immL_255()
5413 %{
5414   predicate(n->get_int() == 255);
5415   match(ConI);
5416 
5417   op_cost(0);
5418   format %{ %}
5419   interface(CONST_INTER);
5420 %}
5421 
5422 operand immL_65535()
5423 %{
5424   predicate(n->get_long() == 65535L);
5425   match(ConL);
5426 
5427   op_cost(0);
5428   format %{ %}
5429   interface(CONST_INTER);
5430 %}
5431 
5432 operand immL_4294967295()
5433 %{
5434   predicate(n->get_long() == 4294967295L);
5435   match(ConL);
5436 
5437   op_cost(0);
5438   format %{ %}
5439   interface(CONST_INTER);
5440 %}
5441 
5442 operand immL_bitmask()
5443 %{
5444   predicate(((n->get_long() & 0xc000000000000000l) == 0)
5445             && is_power_of_2(n->get_long() + 1));
5446   match(ConL);
5447 
5448   op_cost(0);
5449   format %{ %}
5450   interface(CONST_INTER);
5451 %}
5452 
5453 operand immI_bitmask()
5454 %{
5455   predicate(((n->get_int() & 0xc0000000) == 0)
5456             && is_power_of_2(n->get_int() + 1));
5457   match(ConI);
5458 
5459   op_cost(0);
5460   format %{ %}
5461   interface(CONST_INTER);
5462 %}
5463 
5464 // Scale values for scaled offset addressing modes (up to long but not quad)
5465 operand immIScale()
5466 %{
5467   predicate(0 <= n->get_int() && (n->get_int() <= 3));
5468   match(ConI);
5469 
5470   op_cost(0);
5471   format %{ %}
5472   interface(CONST_INTER);
5473 %}
5474 
5475 // 26 bit signed offset -- for pc-relative branches
5476 operand immI26()
5477 %{
5478   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
5479   match(ConI);
5480 
5481   op_cost(0);
5482   format %{ %}
5483   interface(CONST_INTER);
5484 %}
5485 
5486 // 19 bit signed offset -- for pc-relative loads
5487 operand immI19()
5488 %{
5489   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
5490   match(ConI);
5491 
5492   op_cost(0);
5493   format %{ %}
5494   interface(CONST_INTER);
5495 %}
5496 
5497 // 12 bit unsigned offset -- for base plus immediate loads
5498 operand immIU12()
5499 %{
5500   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
5501   match(ConI);
5502 
5503   op_cost(0);
5504   format %{ %}
5505   interface(CONST_INTER);
5506 %}
5507 
5508 operand immLU12()
5509 %{
5510   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
5511   match(ConL);
5512 
5513   op_cost(0);
5514   format %{ %}
5515   interface(CONST_INTER);
5516 %}
5517 
5518 // Offset for scaled or unscaled immediate loads and stores
5519 operand immIOffset()
5520 %{
5521   predicate(Address::offset_ok_for_immed(n->get_int()));
5522   match(ConI);
5523 
5524   op_cost(0);
5525   format %{ %}
5526   interface(CONST_INTER);
5527 %}
5528 
5529 operand immIOffset4()
5530 %{
5531   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
5532   match(ConI);
5533 
5534   op_cost(0);
5535   format %{ %}
5536   interface(CONST_INTER);
5537 %}
5538 
5539 operand immIOffset8()
5540 %{
5541   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
5542   match(ConI);
5543 
5544   op_cost(0);
5545   format %{ %}
5546   interface(CONST_INTER);
5547 %}
5548 
5549 operand immIOffset16()
5550 %{
5551   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
5552   match(ConI);
5553 
5554   op_cost(0);
5555   format %{ %}
5556   interface(CONST_INTER);
5557 %}
5558 
5559 operand immLoffset()
5560 %{
5561   predicate(Address::offset_ok_for_immed(n->get_long()));
5562   match(ConL);
5563 
5564   op_cost(0);
5565   format %{ %}
5566   interface(CONST_INTER);
5567 %}
5568 
5569 operand immLoffset4()
5570 %{
5571   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
5572   match(ConL);
5573 
5574   op_cost(0);
5575   format %{ %}
5576   interface(CONST_INTER);
5577 %}
5578 
5579 operand immLoffset8()
5580 %{
5581   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
5582   match(ConL);
5583 
5584   op_cost(0);
5585   format %{ %}
5586   interface(CONST_INTER);
5587 %}
5588 
5589 operand immLoffset16()
5590 %{
5591   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
5592   match(ConL);
5593 
5594   op_cost(0);
5595   format %{ %}
5596   interface(CONST_INTER);
5597 %}
5598 
5599 // 32 bit integer valid for add sub immediate
5600 operand immIAddSub()
5601 %{
5602   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
5603   match(ConI);
5604   op_cost(0);
5605   format %{ %}
5606   interface(CONST_INTER);
5607 %}
5608 
5609 // 32 bit unsigned integer valid for logical immediate
5610 // TODO -- check this is right when e.g the mask is 0x80000000
5611 operand immILog()
5612 %{
5613   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
5614   match(ConI);
5615 
5616   op_cost(0);
5617   format %{ %}
5618   interface(CONST_INTER);
5619 %}
5620 
5621 // Integer operands 64 bit
5622 // 64 bit immediate
5623 operand immL()
5624 %{
5625   match(ConL);
5626 
5627   op_cost(0);
5628   format %{ %}
5629   interface(CONST_INTER);
5630 %}
5631 
5632 // 64 bit zero
5633 operand immL0()
5634 %{
5635   predicate(n->get_long() == 0);
5636   match(ConL);
5637 
5638   op_cost(0);
5639   format %{ %}
5640   interface(CONST_INTER);
5641 %}
5642 
5643 // 64 bit unit increment
5644 operand immL_1()
5645 %{
5646   predicate(n->get_long() == 1);
5647   match(ConL);
5648 
5649   op_cost(0);
5650   format %{ %}
5651   interface(CONST_INTER);
5652 %}
5653 
5654 // 64 bit unit decrement
5655 operand immL_M1()
5656 %{
5657   predicate(n->get_long() == -1);
5658   match(ConL);
5659 
5660   op_cost(0);
5661   format %{ %}
5662   interface(CONST_INTER);
5663 %}
5664 
5665 // 32 bit offset of pc in thread anchor
5666 
5667 operand immL_pc_off()
5668 %{
5669   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
5670                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
5671   match(ConL);
5672 
5673   op_cost(0);
5674   format %{ %}
5675   interface(CONST_INTER);
5676 %}
5677 
5678 // 64 bit integer valid for add sub immediate
5679 operand immLAddSub()
5680 %{
5681   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
5682   match(ConL);
5683   op_cost(0);
5684   format %{ %}
5685   interface(CONST_INTER);
5686 %}
5687 
5688 // 64 bit integer valid for logical immediate
5689 operand immLLog()
5690 %{
5691   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
5692   match(ConL);
5693   op_cost(0);
5694   format %{ %}
5695   interface(CONST_INTER);
5696 %}
5697 
5698 // Long Immediate: low 32-bit mask
5699 operand immL_32bits()
5700 %{
5701   predicate(n->get_long() == 0xFFFFFFFFL);
5702   match(ConL);
5703   op_cost(0);
5704   format %{ %}
5705   interface(CONST_INTER);
5706 %}
5707 
5708 // Pointer operands
5709 // Pointer Immediate
5710 operand immP()
5711 %{
5712   match(ConP);
5713 
5714   op_cost(0);
5715   format %{ %}
5716   interface(CONST_INTER);
5717 %}
5718 
5719 // NULL Pointer Immediate
5720 operand immP0()
5721 %{
5722   predicate(n->get_ptr() == 0);
5723   match(ConP);
5724 
5725   op_cost(0);
5726   format %{ %}
5727   interface(CONST_INTER);
5728 %}
5729 
5730 // Pointer Immediate One
5731 // this is used in object initialization (initial object header)
5732 operand immP_1()
5733 %{
5734   predicate(n->get_ptr() == 1);
5735   match(ConP);
5736 
5737   op_cost(0);
5738   format %{ %}
5739   interface(CONST_INTER);
5740 %}
5741 
5742 // Polling Page Pointer Immediate
5743 operand immPollPage()
5744 %{
5745   predicate((address)n->get_ptr() == os::get_polling_page());
5746   match(ConP);
5747 
5748   op_cost(0);
5749   format %{ %}
5750   interface(CONST_INTER);
5751 %}
5752 
5753 // Card Table Byte Map Base
5754 operand immByteMapBase()
5755 %{
5756   // Get base of card map
5757   predicate(!UseShenandoahGC && // TODO: Should really check for BS::is_a, see JDK-8193193
5758     (jbyte*)n->get_ptr() == ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
5759   match(ConP);
5760 
5761   op_cost(0);
5762   format %{ %}
5763   interface(CONST_INTER);
5764 %}
5765 
5766 // Pointer Immediate Minus One
5767 // this is used when we want to write the current PC to the thread anchor
5768 operand immP_M1()
5769 %{
5770   predicate(n->get_ptr() == -1);
5771   match(ConP);
5772 
5773   op_cost(0);
5774   format %{ %}
5775   interface(CONST_INTER);
5776 %}
5777 
5778 // Pointer Immediate Minus Two
5779 // this is used when we want to write the current PC to the thread anchor
5780 operand immP_M2()
5781 %{
5782   predicate(n->get_ptr() == -2);
5783   match(ConP);
5784 
5785   op_cost(0);
5786   format %{ %}
5787   interface(CONST_INTER);
5788 %}
5789 
5790 // Float and Double operands
5791 // Double Immediate
5792 operand immD()
5793 %{
5794   match(ConD);
5795   op_cost(0);
5796   format %{ %}
5797   interface(CONST_INTER);
5798 %}
5799 
5800 // constant 'double +0.0'.
5801 operand immD0()
5802 %{
5803   predicate((n->getd() == 0) &&
5804             (fpclassify(n->getd()) == FP_ZERO) && (signbit(n->getd()) == 0));
5805   match(ConD);
5806   op_cost(0);
5807   format %{ %}
5808   interface(CONST_INTER);
5809 %}
5810 
5811 // constant 'double +0.0'.
5812 operand immDPacked()
5813 %{
5814   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
5815   match(ConD);
5816   op_cost(0);
5817   format %{ %}
5818   interface(CONST_INTER);
5819 %}
5820 
5821 // Float Immediate
5822 operand immF()
5823 %{
5824   match(ConF);
5825   op_cost(0);
5826   format %{ %}
5827   interface(CONST_INTER);
5828 %}
5829 
5830 // constant 'float +0.0'.
5831 operand immF0()
5832 %{
5833   predicate((n->getf() == 0) &&
5834             (fpclassify(n->getf()) == FP_ZERO) && (signbit(n->getf()) == 0));
5835   match(ConF);
5836   op_cost(0);
5837   format %{ %}
5838   interface(CONST_INTER);
5839 %}
5840 
5841 // 
5842 operand immFPacked()
5843 %{
5844   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
5845   match(ConF);
5846   op_cost(0);
5847   format %{ %}
5848   interface(CONST_INTER);
5849 %}
5850 
5851 // Narrow pointer operands
5852 // Narrow Pointer Immediate
5853 operand immN()
5854 %{
5855   match(ConN);
5856 
5857   op_cost(0);
5858   format %{ %}
5859   interface(CONST_INTER);
5860 %}
5861 
5862 // Narrow NULL Pointer Immediate
5863 operand immN0()
5864 %{
5865   predicate(n->get_narrowcon() == 0);
5866   match(ConN);
5867 
5868   op_cost(0);
5869   format %{ %}
5870   interface(CONST_INTER);
5871 %}
5872 
5873 operand immNKlass()
5874 %{
5875   match(ConNKlass);
5876 
5877   op_cost(0);
5878   format %{ %}
5879   interface(CONST_INTER);
5880 %}
5881 
5882 // Integer 32 bit Register Operands
5883 // Integer 32 bitRegister (excludes SP)
5884 operand iRegI()
5885 %{
5886   constraint(ALLOC_IN_RC(any_reg32));
5887   match(RegI);
5888   match(iRegINoSp);
5889   op_cost(0);
5890   format %{ %}
5891   interface(REG_INTER);
5892 %}
5893 
5894 // Integer 32 bit Register not Special
5895 operand iRegINoSp()
5896 %{
5897   constraint(ALLOC_IN_RC(no_special_reg32));
5898   match(RegI);
5899   op_cost(0);
5900   format %{ %}
5901   interface(REG_INTER);
5902 %}
5903 
5904 // Integer 64 bit Register Operands
5905 // Integer 64 bit Register (includes SP)
5906 operand iRegL()
5907 %{
5908   constraint(ALLOC_IN_RC(any_reg));
5909   match(RegL);
5910   match(iRegLNoSp);
5911   op_cost(0);
5912   format %{ %}
5913   interface(REG_INTER);
5914 %}
5915 
5916 // Integer 64 bit Register not Special
5917 operand iRegLNoSp()
5918 %{
5919   constraint(ALLOC_IN_RC(no_special_reg));
5920   match(RegL);
5921   format %{ %}
5922   interface(REG_INTER);
5923 %}
5924 
5925 // Pointer Register Operands
5926 // Pointer Register
5927 operand iRegP()
5928 %{
5929   constraint(ALLOC_IN_RC(ptr_reg));
5930   match(RegP);
5931   match(iRegPNoSp);
5932   match(iRegP_R0);
5933   //match(iRegP_R2);
5934   //match(iRegP_R4);
5935   //match(iRegP_R5);
5936   match(thread_RegP);
5937   op_cost(0);
5938   format %{ %}
5939   interface(REG_INTER);
5940 %}
5941 
5942 // Pointer 64 bit Register not Special
5943 operand iRegPNoSp()
5944 %{
5945   constraint(ALLOC_IN_RC(no_special_ptr_reg));
5946   match(RegP);
5947   // match(iRegP);
5948   // match(iRegP_R0);
5949   // match(iRegP_R2);
5950   // match(iRegP_R4);
5951   // match(iRegP_R5);
5952   // match(thread_RegP);
5953   op_cost(0);
5954   format %{ %}
5955   interface(REG_INTER);
5956 %}
5957 
5958 // Pointer 64 bit Register R0 only
5959 operand iRegP_R0()
5960 %{
5961   constraint(ALLOC_IN_RC(r0_reg));
5962   match(RegP);
5963   // match(iRegP);
5964   match(iRegPNoSp);
5965   op_cost(0);
5966   format %{ %}
5967   interface(REG_INTER);
5968 %}
5969 
5970 // Pointer 64 bit Register R1 only
5971 operand iRegP_R1()
5972 %{
5973   constraint(ALLOC_IN_RC(r1_reg));
5974   match(RegP);
5975   // match(iRegP);
5976   match(iRegPNoSp);
5977   op_cost(0);
5978   format %{ %}
5979   interface(REG_INTER);
5980 %}
5981 
5982 // Pointer 64 bit Register R2 only
5983 operand iRegP_R2()
5984 %{
5985   constraint(ALLOC_IN_RC(r2_reg));
5986   match(RegP);
5987   // match(iRegP);
5988   match(iRegPNoSp);
5989   op_cost(0);
5990   format %{ %}
5991   interface(REG_INTER);
5992 %}
5993 
5994 // Pointer 64 bit Register R3 only
5995 operand iRegP_R3()
5996 %{
5997   constraint(ALLOC_IN_RC(r3_reg));
5998   match(RegP);
5999   // match(iRegP);
6000   match(iRegPNoSp);
6001   op_cost(0);
6002   format %{ %}
6003   interface(REG_INTER);
6004 %}
6005 
6006 // Pointer 64 bit Register R4 only
6007 operand iRegP_R4()
6008 %{
6009   constraint(ALLOC_IN_RC(r4_reg));
6010   match(RegP);
6011   // match(iRegP);
6012   match(iRegPNoSp);
6013   op_cost(0);
6014   format %{ %}
6015   interface(REG_INTER);
6016 %}
6017 
6018 // Pointer 64 bit Register R5 only
6019 operand iRegP_R5()
6020 %{
6021   constraint(ALLOC_IN_RC(r5_reg));
6022   match(RegP);
6023   // match(iRegP);
6024   match(iRegPNoSp);
6025   op_cost(0);
6026   format %{ %}
6027   interface(REG_INTER);
6028 %}
6029 
6030 // Pointer 64 bit Register R10 only
6031 operand iRegP_R10()
6032 %{
6033   constraint(ALLOC_IN_RC(r10_reg));
6034   match(RegP);
6035   // match(iRegP);
6036   match(iRegPNoSp);
6037   op_cost(0);
6038   format %{ %}
6039   interface(REG_INTER);
6040 %}
6041 
6042 // Long 64 bit Register R11 only
6043 operand iRegL_R11()
6044 %{
6045   constraint(ALLOC_IN_RC(r11_reg));
6046   match(RegL);
6047   match(iRegLNoSp);
6048   op_cost(0);
6049   format %{ %}
6050   interface(REG_INTER);
6051 %}
6052 
6053 // Pointer 64 bit Register FP only
6054 operand iRegP_FP()
6055 %{
6056   constraint(ALLOC_IN_RC(fp_reg));
6057   match(RegP);
6058   // match(iRegP);
6059   op_cost(0);
6060   format %{ %}
6061   interface(REG_INTER);
6062 %}
6063 
6064 // Register R0 only
6065 operand iRegI_R0()
6066 %{
6067   constraint(ALLOC_IN_RC(int_r0_reg));
6068   match(RegI);
6069   match(iRegINoSp);
6070   op_cost(0);
6071   format %{ %}
6072   interface(REG_INTER);
6073 %}
6074 
6075 // Register R2 only
6076 operand iRegI_R2()
6077 %{
6078   constraint(ALLOC_IN_RC(int_r2_reg));
6079   match(RegI);
6080   match(iRegINoSp);
6081   op_cost(0);
6082   format %{ %}
6083   interface(REG_INTER);
6084 %}
6085 
6086 // Register R3 only
6087 operand iRegI_R3()
6088 %{
6089   constraint(ALLOC_IN_RC(int_r3_reg));
6090   match(RegI);
6091   match(iRegINoSp);
6092   op_cost(0);
6093   format %{ %}
6094   interface(REG_INTER);
6095 %}
6096 
6097 
6098 // Register R2 only
6099 operand iRegI_R4()
6100 %{
6101   constraint(ALLOC_IN_RC(int_r4_reg));
6102   match(RegI);
6103   match(iRegINoSp);
6104   op_cost(0);
6105   format %{ %}
6106   interface(REG_INTER);
6107 %}
6108 
6109 
6110 // Pointer Register Operands
6111 // Narrow Pointer Register
6112 operand iRegN()
6113 %{
6114   constraint(ALLOC_IN_RC(any_reg32));
6115   match(RegN);
6116   match(iRegNNoSp);
6117   op_cost(0);
6118   format %{ %}
6119   interface(REG_INTER);
6120 %}
6121 
6122 // Integer 64 bit Register not Special
6123 operand iRegNNoSp()
6124 %{
6125   constraint(ALLOC_IN_RC(no_special_reg32));
6126   match(RegN);
6127   op_cost(0);
6128   format %{ %}
6129   interface(REG_INTER);
6130 %}
6131 
6132 // heap base register -- used for encoding immN0
6133 
6134 operand iRegIHeapbase()
6135 %{
6136   constraint(ALLOC_IN_RC(heapbase_reg));
6137   match(RegI);
6138   op_cost(0);
6139   format %{ %}
6140   interface(REG_INTER);
6141 %}
6142 
6143 // Float Register
6144 // Float register operands
6145 operand vRegF()
6146 %{
6147   constraint(ALLOC_IN_RC(float_reg));
6148   match(RegF);
6149 
6150   op_cost(0);
6151   format %{ %}
6152   interface(REG_INTER);
6153 %}
6154 
6155 // Double Register
6156 // Double register operands
6157 operand vRegD()
6158 %{
6159   constraint(ALLOC_IN_RC(double_reg));
6160   match(RegD);
6161 
6162   op_cost(0);
6163   format %{ %}
6164   interface(REG_INTER);
6165 %}
6166 
6167 operand vecD()
6168 %{
6169   constraint(ALLOC_IN_RC(vectord_reg));
6170   match(VecD);
6171 
6172   op_cost(0);
6173   format %{ %}
6174   interface(REG_INTER);
6175 %}
6176 
6177 operand vecX()
6178 %{
6179   constraint(ALLOC_IN_RC(vectorx_reg));
6180   match(VecX);
6181 
6182   op_cost(0);
6183   format %{ %}
6184   interface(REG_INTER);
6185 %}
6186 
6187 operand vRegD_V0()
6188 %{
6189   constraint(ALLOC_IN_RC(v0_reg));
6190   match(RegD);
6191   op_cost(0);
6192   format %{ %}
6193   interface(REG_INTER);
6194 %}
6195 
6196 operand vRegD_V1()
6197 %{
6198   constraint(ALLOC_IN_RC(v1_reg));
6199   match(RegD);
6200   op_cost(0);
6201   format %{ %}
6202   interface(REG_INTER);
6203 %}
6204 
6205 operand vRegD_V2()
6206 %{
6207   constraint(ALLOC_IN_RC(v2_reg));
6208   match(RegD);
6209   op_cost(0);
6210   format %{ %}
6211   interface(REG_INTER);
6212 %}
6213 
6214 operand vRegD_V3()
6215 %{
6216   constraint(ALLOC_IN_RC(v3_reg));
6217   match(RegD);
6218   op_cost(0);
6219   format %{ %}
6220   interface(REG_INTER);
6221 %}
6222 
6223 // Flags register, used as output of signed compare instructions
6224 
6225 // note that on AArch64 we also use this register as the output for
6226 // for floating point compare instructions (CmpF CmpD). this ensures
6227 // that ordered inequality tests use GT, GE, LT or LE none of which
6228 // pass through cases where the result is unordered i.e. one or both
6229 // inputs to the compare is a NaN. this means that the ideal code can
6230 // replace e.g. a GT with an LE and not end up capturing the NaN case
6231 // (where the comparison should always fail). EQ and NE tests are
6232 // always generated in ideal code so that unordered folds into the NE
6233 // case, matching the behaviour of AArch64 NE.
6234 //
6235 // This differs from x86 where the outputs of FP compares use a
6236 // special FP flags registers and where compares based on this
6237 // register are distinguished into ordered inequalities (cmpOpUCF) and
6238 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
6239 // to explicitly handle the unordered case in branches. x86 also has
6240 // to include extra CMoveX rules to accept a cmpOpUCF input.
6241 
6242 operand rFlagsReg()
6243 %{
6244   constraint(ALLOC_IN_RC(int_flags));
6245   match(RegFlags);
6246 
6247   op_cost(0);
6248   format %{ "RFLAGS" %}
6249   interface(REG_INTER);
6250 %}
6251 
6252 // Flags register, used as output of unsigned compare instructions
6253 operand rFlagsRegU()
6254 %{
6255   constraint(ALLOC_IN_RC(int_flags));
6256   match(RegFlags);
6257 
6258   op_cost(0);
6259   format %{ "RFLAGSU" %}
6260   interface(REG_INTER);
6261 %}
6262 
6263 // Special Registers
6264 
6265 // Method Register
6266 operand inline_cache_RegP(iRegP reg)
6267 %{
6268   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
6269   match(reg);
6270   match(iRegPNoSp);
6271   op_cost(0);
6272   format %{ %}
6273   interface(REG_INTER);
6274 %}
6275 
6276 operand interpreter_method_oop_RegP(iRegP reg)
6277 %{
6278   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
6279   match(reg);
6280   match(iRegPNoSp);
6281   op_cost(0);
6282   format %{ %}
6283   interface(REG_INTER);
6284 %}
6285 
6286 // Thread Register
6287 operand thread_RegP(iRegP reg)
6288 %{
6289   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
6290   match(reg);
6291   op_cost(0);
6292   format %{ %}
6293   interface(REG_INTER);
6294 %}
6295 
6296 operand lr_RegP(iRegP reg)
6297 %{
6298   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
6299   match(reg);
6300   op_cost(0);
6301   format %{ %}
6302   interface(REG_INTER);
6303 %}
6304 
6305 //----------Memory Operands----------------------------------------------------
6306 
6307 operand indirect(iRegP reg)
6308 %{
6309   constraint(ALLOC_IN_RC(ptr_reg));
6310   match(reg);
6311   op_cost(0);
6312   format %{ "[$reg]" %}
6313   interface(MEMORY_INTER) %{
6314     base($reg);
6315     index(0xffffffff);
6316     scale(0x0);
6317     disp(0x0);
6318   %}
6319 %}
6320 
6321 operand indIndexScaledOffsetI(iRegP reg, iRegL lreg, immIScale scale, immIU12 off)
6322 %{
6323   constraint(ALLOC_IN_RC(ptr_reg));
6324   match(AddP (AddP reg (LShiftL lreg scale)) off);
6325   op_cost(INSN_COST);
6326   format %{ "$reg, $lreg lsl($scale), $off" %}
6327   interface(MEMORY_INTER) %{
6328     base($reg);
6329     index($lreg);
6330     scale($scale);
6331     disp($off);
6332   %}
6333 %}
6334 
6335 operand indIndexScaledOffsetL(iRegP reg, iRegL lreg, immIScale scale, immLU12 off)
6336 %{
6337   constraint(ALLOC_IN_RC(ptr_reg));
6338   match(AddP (AddP reg (LShiftL lreg scale)) off);
6339   op_cost(INSN_COST);
6340   format %{ "$reg, $lreg lsl($scale), $off" %}
6341   interface(MEMORY_INTER) %{
6342     base($reg);
6343     index($lreg);
6344     scale($scale);
6345     disp($off);
6346   %}
6347 %}
6348 
6349 operand indIndexOffsetI2L(iRegP reg, iRegI ireg, immLU12 off)
6350 %{
6351   constraint(ALLOC_IN_RC(ptr_reg));
6352   match(AddP (AddP reg (ConvI2L ireg)) off);
6353   op_cost(INSN_COST);
6354   format %{ "$reg, $ireg, $off I2L" %}
6355   interface(MEMORY_INTER) %{
6356     base($reg);
6357     index($ireg);
6358     scale(0x0);
6359     disp($off);
6360   %}
6361 %}
6362 
6363 operand indIndexScaledOffsetI2L(iRegP reg, iRegI ireg, immIScale scale, immLU12 off)
6364 %{
6365   constraint(ALLOC_IN_RC(ptr_reg));
6366   match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
6367   op_cost(INSN_COST);
6368   format %{ "$reg, $ireg sxtw($scale), $off I2L" %}
6369   interface(MEMORY_INTER) %{
6370     base($reg);
6371     index($ireg);
6372     scale($scale);
6373     disp($off);
6374   %}
6375 %}
6376 
6377 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
6378 %{
6379   constraint(ALLOC_IN_RC(ptr_reg));
6380   match(AddP reg (LShiftL (ConvI2L ireg) scale));
6381   op_cost(0);
6382   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
6383   interface(MEMORY_INTER) %{
6384     base($reg);
6385     index($ireg);
6386     scale($scale);
6387     disp(0x0);
6388   %}
6389 %}
6390 
6391 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
6392 %{
6393   constraint(ALLOC_IN_RC(ptr_reg));
6394   match(AddP reg (LShiftL lreg scale));
6395   op_cost(0);
6396   format %{ "$reg, $lreg lsl($scale)" %}
6397   interface(MEMORY_INTER) %{
6398     base($reg);
6399     index($lreg);
6400     scale($scale);
6401     disp(0x0);
6402   %}
6403 %}
6404 
6405 operand indIndex(iRegP reg, iRegL lreg)
6406 %{
6407   constraint(ALLOC_IN_RC(ptr_reg));
6408   match(AddP reg lreg);
6409   op_cost(0);
6410   format %{ "$reg, $lreg" %}
6411   interface(MEMORY_INTER) %{
6412     base($reg);
6413     index($lreg);
6414     scale(0x0);
6415     disp(0x0);
6416   %}
6417 %}
6418 
6419 operand indOffI(iRegP reg, immIOffset off)
6420 %{
6421   constraint(ALLOC_IN_RC(ptr_reg));
6422   match(AddP reg off);
6423   op_cost(0);
6424   format %{ "[$reg, $off]" %}
6425   interface(MEMORY_INTER) %{
6426     base($reg);
6427     index(0xffffffff);
6428     scale(0x0);
6429     disp($off);
6430   %}
6431 %}
6432 
6433 operand indOffI4(iRegP reg, immIOffset4 off)
6434 %{
6435   constraint(ALLOC_IN_RC(ptr_reg));
6436   match(AddP reg off);
6437   op_cost(0);
6438   format %{ "[$reg, $off]" %}
6439   interface(MEMORY_INTER) %{
6440     base($reg);
6441     index(0xffffffff);
6442     scale(0x0);
6443     disp($off);
6444   %}
6445 %}
6446 
6447 operand indOffI8(iRegP reg, immIOffset8 off)
6448 %{
6449   constraint(ALLOC_IN_RC(ptr_reg));
6450   match(AddP reg off);
6451   op_cost(0);
6452   format %{ "[$reg, $off]" %}
6453   interface(MEMORY_INTER) %{
6454     base($reg);
6455     index(0xffffffff);
6456     scale(0x0);
6457     disp($off);
6458   %}
6459 %}
6460 
6461 operand indOffI16(iRegP reg, immIOffset16 off)
6462 %{
6463   constraint(ALLOC_IN_RC(ptr_reg));
6464   match(AddP reg off);
6465   op_cost(0);
6466   format %{ "[$reg, $off]" %}
6467   interface(MEMORY_INTER) %{
6468     base($reg);
6469     index(0xffffffff);
6470     scale(0x0);
6471     disp($off);
6472   %}
6473 %}
6474 
6475 operand indOffL(iRegP reg, immLoffset off)
6476 %{
6477   constraint(ALLOC_IN_RC(ptr_reg));
6478   match(AddP reg off);
6479   op_cost(0);
6480   format %{ "[$reg, $off]" %}
6481   interface(MEMORY_INTER) %{
6482     base($reg);
6483     index(0xffffffff);
6484     scale(0x0);
6485     disp($off);
6486   %}
6487 %}
6488 
6489 operand indOffL4(iRegP reg, immLoffset4 off)
6490 %{
6491   constraint(ALLOC_IN_RC(ptr_reg));
6492   match(AddP reg off);
6493   op_cost(0);
6494   format %{ "[$reg, $off]" %}
6495   interface(MEMORY_INTER) %{
6496     base($reg);
6497     index(0xffffffff);
6498     scale(0x0);
6499     disp($off);
6500   %}
6501 %}
6502 
6503 operand indOffL8(iRegP reg, immLoffset8 off)
6504 %{
6505   constraint(ALLOC_IN_RC(ptr_reg));
6506   match(AddP reg off);
6507   op_cost(0);
6508   format %{ "[$reg, $off]" %}
6509   interface(MEMORY_INTER) %{
6510     base($reg);
6511     index(0xffffffff);
6512     scale(0x0);
6513     disp($off);
6514   %}
6515 %}
6516 
6517 operand indOffL16(iRegP reg, immLoffset16 off)
6518 %{
6519   constraint(ALLOC_IN_RC(ptr_reg));
6520   match(AddP reg off);
6521   op_cost(0);
6522   format %{ "[$reg, $off]" %}
6523   interface(MEMORY_INTER) %{
6524     base($reg);
6525     index(0xffffffff);
6526     scale(0x0);
6527     disp($off);
6528   %}
6529 %}
6530 
6531 operand indirectN(iRegN reg)
6532 %{
6533   predicate(Universe::narrow_oop_shift() == 0);
6534   constraint(ALLOC_IN_RC(ptr_reg));
6535   match(DecodeN reg);
6536   op_cost(0);
6537   format %{ "[$reg]\t# narrow" %}
6538   interface(MEMORY_INTER) %{
6539     base($reg);
6540     index(0xffffffff);
6541     scale(0x0);
6542     disp(0x0);
6543   %}
6544 %}
6545 
6546 operand indIndexScaledOffsetIN(iRegN reg, iRegL lreg, immIScale scale, immIU12 off)
6547 %{
6548   predicate(Universe::narrow_oop_shift() == 0);
6549   constraint(ALLOC_IN_RC(ptr_reg));
6550   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
6551   op_cost(0);
6552   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
6553   interface(MEMORY_INTER) %{
6554     base($reg);
6555     index($lreg);
6556     scale($scale);
6557     disp($off);
6558   %}
6559 %}
6560 
6561 operand indIndexScaledOffsetLN(iRegN reg, iRegL lreg, immIScale scale, immLU12 off)
6562 %{
6563   predicate(Universe::narrow_oop_shift() == 0);
6564   constraint(ALLOC_IN_RC(ptr_reg));
6565   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
6566   op_cost(INSN_COST);
6567   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
6568   interface(MEMORY_INTER) %{
6569     base($reg);
6570     index($lreg);
6571     scale($scale);
6572     disp($off);
6573   %}
6574 %}
6575 
6576 operand indIndexOffsetI2LN(iRegN reg, iRegI ireg, immLU12 off)
6577 %{
6578   predicate(Universe::narrow_oop_shift() == 0);
6579   constraint(ALLOC_IN_RC(ptr_reg));
6580   match(AddP (AddP (DecodeN reg) (ConvI2L ireg)) off);
6581   op_cost(INSN_COST);
6582   format %{ "$reg, $ireg, $off I2L\t# narrow" %}
6583   interface(MEMORY_INTER) %{
6584     base($reg);
6585     index($ireg);
6586     scale(0x0);
6587     disp($off);
6588   %}
6589 %}
6590 
6591 operand indIndexScaledOffsetI2LN(iRegN reg, iRegI ireg, immIScale scale, immLU12 off)
6592 %{
6593   predicate(Universe::narrow_oop_shift() == 0);
6594   constraint(ALLOC_IN_RC(ptr_reg));
6595   match(AddP (AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale)) off);
6596   op_cost(INSN_COST);
6597   format %{ "$reg, $ireg sxtw($scale), $off I2L\t# narrow" %}
6598   interface(MEMORY_INTER) %{
6599     base($reg);
6600     index($ireg);
6601     scale($scale);
6602     disp($off);
6603   %}
6604 %}
6605 
6606 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
6607 %{
6608   predicate(Universe::narrow_oop_shift() == 0);
6609   constraint(ALLOC_IN_RC(ptr_reg));
6610   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
6611   op_cost(0);
6612   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
6613   interface(MEMORY_INTER) %{
6614     base($reg);
6615     index($ireg);
6616     scale($scale);
6617     disp(0x0);
6618   %}
6619 %}
6620 
6621 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
6622 %{
6623   predicate(Universe::narrow_oop_shift() == 0);
6624   constraint(ALLOC_IN_RC(ptr_reg));
6625   match(AddP (DecodeN reg) (LShiftL lreg scale));
6626   op_cost(0);
6627   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
6628   interface(MEMORY_INTER) %{
6629     base($reg);
6630     index($lreg);
6631     scale($scale);
6632     disp(0x0);
6633   %}
6634 %}
6635 
6636 operand indIndexN(iRegN reg, iRegL lreg)
6637 %{
6638   predicate(Universe::narrow_oop_shift() == 0);
6639   constraint(ALLOC_IN_RC(ptr_reg));
6640   match(AddP (DecodeN reg) lreg);
6641   op_cost(0);
6642   format %{ "$reg, $lreg\t# narrow" %}
6643   interface(MEMORY_INTER) %{
6644     base($reg);
6645     index($lreg);
6646     scale(0x0);
6647     disp(0x0);
6648   %}
6649 %}
6650 
6651 operand indOffIN(iRegN reg, immIOffset off)
6652 %{
6653   predicate(Universe::narrow_oop_shift() == 0);
6654   constraint(ALLOC_IN_RC(ptr_reg));
6655   match(AddP (DecodeN reg) off);
6656   op_cost(0);
6657   format %{ "[$reg, $off]\t# narrow" %}
6658   interface(MEMORY_INTER) %{
6659     base($reg);
6660     index(0xffffffff);
6661     scale(0x0);
6662     disp($off);
6663   %}
6664 %}
6665 
6666 operand indOffLN(iRegN reg, immLoffset off)
6667 %{
6668   predicate(Universe::narrow_oop_shift() == 0);
6669   constraint(ALLOC_IN_RC(ptr_reg));
6670   match(AddP (DecodeN reg) off);
6671   op_cost(0);
6672   format %{ "[$reg, $off]\t# narrow" %}
6673   interface(MEMORY_INTER) %{
6674     base($reg);
6675     index(0xffffffff);
6676     scale(0x0);
6677     disp($off);
6678   %}
6679 %}
6680 
6681 
6682 
6683 // AArch64 opto stubs need to write to the pc slot in the thread anchor
6684 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
6685 %{
6686   constraint(ALLOC_IN_RC(ptr_reg));
6687   match(AddP reg off);
6688   op_cost(0);
6689   format %{ "[$reg, $off]" %}
6690   interface(MEMORY_INTER) %{
6691     base($reg);
6692     index(0xffffffff);
6693     scale(0x0);
6694     disp($off);
6695   %}
6696 %}
6697 
6698 //----------Special Memory Operands--------------------------------------------
6699 // Stack Slot Operand - This operand is used for loading and storing temporary
6700 //                      values on the stack where a match requires a value to
6701 //                      flow through memory.
6702 operand stackSlotP(sRegP reg)
6703 %{
6704   constraint(ALLOC_IN_RC(stack_slots));
6705   op_cost(100);
6706   // No match rule because this operand is only generated in matching
6707   // match(RegP);
6708   format %{ "[$reg]" %}
6709   interface(MEMORY_INTER) %{
6710     base(0x1e);  // RSP
6711     index(0x0);  // No Index
6712     scale(0x0);  // No Scale
6713     disp($reg);  // Stack Offset
6714   %}
6715 %}
6716 
6717 operand stackSlotI(sRegI reg)
6718 %{
6719   constraint(ALLOC_IN_RC(stack_slots));
6720   // No match rule because this operand is only generated in matching
6721   // match(RegI);
6722   format %{ "[$reg]" %}
6723   interface(MEMORY_INTER) %{
6724     base(0x1e);  // RSP
6725     index(0x0);  // No Index
6726     scale(0x0);  // No Scale
6727     disp($reg);  // Stack Offset
6728   %}
6729 %}
6730 
6731 operand stackSlotF(sRegF reg)
6732 %{
6733   constraint(ALLOC_IN_RC(stack_slots));
6734   // No match rule because this operand is only generated in matching
6735   // match(RegF);
6736   format %{ "[$reg]" %}
6737   interface(MEMORY_INTER) %{
6738     base(0x1e);  // RSP
6739     index(0x0);  // No Index
6740     scale(0x0);  // No Scale
6741     disp($reg);  // Stack Offset
6742   %}
6743 %}
6744 
6745 operand stackSlotD(sRegD reg)
6746 %{
6747   constraint(ALLOC_IN_RC(stack_slots));
6748   // No match rule because this operand is only generated in matching
6749   // match(RegD);
6750   format %{ "[$reg]" %}
6751   interface(MEMORY_INTER) %{
6752     base(0x1e);  // RSP
6753     index(0x0);  // No Index
6754     scale(0x0);  // No Scale
6755     disp($reg);  // Stack Offset
6756   %}
6757 %}
6758 
6759 operand stackSlotL(sRegL reg)
6760 %{
6761   constraint(ALLOC_IN_RC(stack_slots));
6762   // No match rule because this operand is only generated in matching
6763   // match(RegL);
6764   format %{ "[$reg]" %}
6765   interface(MEMORY_INTER) %{
6766     base(0x1e);  // RSP
6767     index(0x0);  // No Index
6768     scale(0x0);  // No Scale
6769     disp($reg);  // Stack Offset
6770   %}
6771 %}
6772 
6773 // Operands for expressing Control Flow
6774 // NOTE: Label is a predefined operand which should not be redefined in
6775 //       the AD file. It is generically handled within the ADLC.
6776 
6777 //----------Conditional Branch Operands----------------------------------------
6778 // Comparison Op  - This is the operation of the comparison, and is limited to
6779 //                  the following set of codes:
6780 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
6781 //
6782 // Other attributes of the comparison, such as unsignedness, are specified
6783 // by the comparison instruction that sets a condition code flags register.
6784 // That result is represented by a flags operand whose subtype is appropriate
6785 // to the unsignedness (etc.) of the comparison.
6786 //
6787 // Later, the instruction which matches both the Comparison Op (a Bool) and
6788 // the flags (produced by the Cmp) specifies the coding of the comparison op
6789 // by matching a specific subtype of Bool operand below, such as cmpOpU.
6790 
6791 // used for signed integral comparisons and fp comparisons
6792 
6793 operand cmpOp()
6794 %{
6795   match(Bool);
6796 
6797   format %{ "" %}
6798   interface(COND_INTER) %{
6799     equal(0x0, "eq");
6800     not_equal(0x1, "ne");
6801     less(0xb, "lt");
6802     greater_equal(0xa, "ge");
6803     less_equal(0xd, "le");
6804     greater(0xc, "gt");
6805     overflow(0x6, "vs");
6806     no_overflow(0x7, "vc");
6807   %}
6808 %}
6809 
6810 // used for unsigned integral comparisons
6811 
6812 operand cmpOpU()
6813 %{
6814   match(Bool);
6815 
6816   format %{ "" %}
6817   interface(COND_INTER) %{
6818     equal(0x0, "eq");
6819     not_equal(0x1, "ne");
6820     less(0x3, "lo");
6821     greater_equal(0x2, "hs");
6822     less_equal(0x9, "ls");
6823     greater(0x8, "hi");
6824     overflow(0x6, "vs");
6825     no_overflow(0x7, "vc");
6826   %}
6827 %}
6828 
6829 // Special operand allowing long args to int ops to be truncated for free
6830 
6831 operand iRegL2I(iRegL reg) %{
6832 
6833   op_cost(0);
6834 
6835   match(ConvL2I reg);
6836 
6837   format %{ "l2i($reg)" %}
6838 
6839   interface(REG_INTER)
6840 %}
6841 
6842 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
6843 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
6844 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
6845 
6846 //----------OPERAND CLASSES----------------------------------------------------
6847 // Operand Classes are groups of operands that are used as to simplify
6848 // instruction definitions by not requiring the AD writer to specify
6849 // separate instructions for every form of operand when the
6850 // instruction accepts multiple operand types with the same basic
6851 // encoding and format. The classic case of this is memory operands.
6852 
6853 // memory is used to define read/write location for load/store
6854 // instruction defs. we can turn a memory op into an Address
6855 
6856 opclass memory(indirect, indIndexScaledOffsetI, indIndexScaledOffsetL, indIndexOffsetI2L, indIndexScaledOffsetI2L, indIndexScaled, indIndexScaledI2L, indIndex, indOffI, indOffL,
6857                indirectN, indIndexScaledOffsetIN, indIndexScaledOffsetLN, indIndexOffsetI2LN, indIndexScaledOffsetI2LN, indIndexScaledN, indIndexScaledI2LN, indIndexN, indOffIN, indOffLN);
6858  
6859  // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
6860 
6861 
6862 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
6863 // operations. it allows the src to be either an iRegI or a (ConvL2I
6864 // iRegL). in the latter case the l2i normally planted for a ConvL2I
6865 // can be elided because the 32-bit instruction will just employ the
6866 // lower 32 bits anyway.
6867 //
6868 // n.b. this does not elide all L2I conversions. if the truncated
6869 // value is consumed by more than one operation then the ConvL2I
6870 // cannot be bundled into the consuming nodes so an l2i gets planted
6871 // (actually a movw $dst $src) and the downstream instructions consume
6872 // the result of the l2i as an iRegI input. That's a shame since the
6873 // movw is actually redundant but its not too costly.
6874 
6875 opclass iRegIorL2I(iRegI, iRegL2I);
6876 
6877 //----------PIPELINE-----------------------------------------------------------
6878 // Rules which define the behavior of the target architectures pipeline.
6879 
6880 // For specific pipelines, eg A53, define the stages of that pipeline
6881 //pipe_desc(ISS, EX1, EX2, WR);
6882 #define ISS S0
6883 #define EX1 S1
6884 #define EX2 S2
6885 #define WR  S3
6886 
6887 // Integer ALU reg operation
6888 pipeline %{
6889 
6890 attributes %{
6891   // ARM instructions are of fixed length
6892   fixed_size_instructions;        // Fixed size instructions TODO does
6893   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
6894   // ARM instructions come in 32-bit word units
6895   instruction_unit_size = 4;         // An instruction is 4 bytes long
6896   instruction_fetch_unit_size = 64;  // The processor fetches one line
6897   instruction_fetch_units = 1;       // of 64 bytes
6898 
6899   // List of nop instructions
6900   nops( MachNop );
6901 %}
6902 
6903 // We don't use an actual pipeline model so don't care about resources
6904 // or description. we do use pipeline classes to introduce fixed
6905 // latencies
6906 
6907 //----------RESOURCES----------------------------------------------------------
6908 // Resources are the functional units available to the machine
6909 
6910 resources( INS0, INS1, INS01 = INS0 | INS1,
6911            ALU0, ALU1, ALU = ALU0 | ALU1,
6912            MAC,
6913            DIV,
6914            BRANCH,
6915            LDST,
6916            NEON_FP);
6917 
6918 //----------PIPELINE DESCRIPTION-----------------------------------------------
6919 // Pipeline Description specifies the stages in the machine's pipeline
6920 
6921 // Define the pipeline as a generic 6 stage pipeline
6922 pipe_desc(S0, S1, S2, S3, S4, S5);
6923 
6924 //----------PIPELINE CLASSES---------------------------------------------------
6925 // Pipeline Classes describe the stages in which input and output are
6926 // referenced by the hardware pipeline.
6927 
6928 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
6929 %{
6930   single_instruction;
6931   src1   : S1(read);
6932   src2   : S2(read);
6933   dst    : S5(write);
6934   INS01  : ISS;
6935   NEON_FP : S5;
6936 %}
6937 
6938 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
6939 %{
6940   single_instruction;
6941   src1   : S1(read);
6942   src2   : S2(read);
6943   dst    : S5(write);
6944   INS01  : ISS;
6945   NEON_FP : S5;
6946 %}
6947 
6948 pipe_class fp_uop_s(vRegF dst, vRegF src)
6949 %{
6950   single_instruction;
6951   src    : S1(read);
6952   dst    : S5(write);
6953   INS01  : ISS;
6954   NEON_FP : S5;
6955 %}
6956 
6957 pipe_class fp_uop_d(vRegD dst, vRegD src)
6958 %{
6959   single_instruction;
6960   src    : S1(read);
6961   dst    : S5(write);
6962   INS01  : ISS;
6963   NEON_FP : S5;
6964 %}
6965 
6966 pipe_class fp_d2f(vRegF dst, vRegD src)
6967 %{
6968   single_instruction;
6969   src    : S1(read);
6970   dst    : S5(write);
6971   INS01  : ISS;
6972   NEON_FP : S5;
6973 %}
6974 
6975 pipe_class fp_f2d(vRegD dst, vRegF src)
6976 %{
6977   single_instruction;
6978   src    : S1(read);
6979   dst    : S5(write);
6980   INS01  : ISS;
6981   NEON_FP : S5;
6982 %}
6983 
6984 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
6985 %{
6986   single_instruction;
6987   src    : S1(read);
6988   dst    : S5(write);
6989   INS01  : ISS;
6990   NEON_FP : S5;
6991 %}
6992 
6993 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
6994 %{
6995   single_instruction;
6996   src    : S1(read);
6997   dst    : S5(write);
6998   INS01  : ISS;
6999   NEON_FP : S5;
7000 %}
7001 
7002 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
7003 %{
7004   single_instruction;
7005   src    : S1(read);
7006   dst    : S5(write);
7007   INS01  : ISS;
7008   NEON_FP : S5;
7009 %}
7010 
7011 pipe_class fp_l2f(vRegF dst, iRegL src)
7012 %{
7013   single_instruction;
7014   src    : S1(read);
7015   dst    : S5(write);
7016   INS01  : ISS;
7017   NEON_FP : S5;
7018 %}
7019 
7020 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
7021 %{
7022   single_instruction;
7023   src    : S1(read);
7024   dst    : S5(write);
7025   INS01  : ISS;
7026   NEON_FP : S5;
7027 %}
7028 
7029 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
7030 %{
7031   single_instruction;
7032   src    : S1(read);
7033   dst    : S5(write);
7034   INS01  : ISS;
7035   NEON_FP : S5;
7036 %}
7037 
7038 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
7039 %{
7040   single_instruction;
7041   src    : S1(read);
7042   dst    : S5(write);
7043   INS01  : ISS;
7044   NEON_FP : S5;
7045 %}
7046 
7047 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
7048 %{
7049   single_instruction;
7050   src    : S1(read);
7051   dst    : S5(write);
7052   INS01  : ISS;
7053   NEON_FP : S5;
7054 %}
7055 
7056 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
7057 %{
7058   single_instruction;
7059   src1   : S1(read);
7060   src2   : S2(read);
7061   dst    : S5(write);
7062   INS0   : ISS;
7063   NEON_FP : S5;
7064 %}
7065 
7066 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
7067 %{
7068   single_instruction;
7069   src1   : S1(read);
7070   src2   : S2(read);
7071   dst    : S5(write);
7072   INS0   : ISS;
7073   NEON_FP : S5;
7074 %}
7075 
7076 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
7077 %{
7078   single_instruction;
7079   cr     : S1(read);
7080   src1   : S1(read);
7081   src2   : S1(read);
7082   dst    : S3(write);
7083   INS01  : ISS;
7084   NEON_FP : S3;
7085 %}
7086 
7087 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
7088 %{
7089   single_instruction;
7090   cr     : S1(read);
7091   src1   : S1(read);
7092   src2   : S1(read);
7093   dst    : S3(write);
7094   INS01  : ISS;
7095   NEON_FP : S3;
7096 %}
7097 
7098 pipe_class fp_imm_s(vRegF dst)
7099 %{
7100   single_instruction;
7101   dst    : S3(write);
7102   INS01  : ISS;
7103   NEON_FP : S3;
7104 %}
7105 
7106 pipe_class fp_imm_d(vRegD dst)
7107 %{
7108   single_instruction;
7109   dst    : S3(write);
7110   INS01  : ISS;
7111   NEON_FP : S3;
7112 %}
7113 
7114 pipe_class fp_load_constant_s(vRegF dst)
7115 %{
7116   single_instruction;
7117   dst    : S4(write);
7118   INS01  : ISS;
7119   NEON_FP : S4;
7120 %}
7121 
7122 pipe_class fp_load_constant_d(vRegD dst)
7123 %{
7124   single_instruction;
7125   dst    : S4(write);
7126   INS01  : ISS;
7127   NEON_FP : S4;
7128 %}
7129 
7130 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
7131 %{
7132   single_instruction;
7133   dst    : S5(write);
7134   src1   : S1(read);
7135   src2   : S1(read);
7136   INS01  : ISS;
7137   NEON_FP : S5;
7138 %}
7139 
7140 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
7141 %{
7142   single_instruction;
7143   dst    : S5(write);
7144   src1   : S1(read);
7145   src2   : S1(read);
7146   INS0   : ISS;
7147   NEON_FP : S5;
7148 %}
7149 
7150 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
7151 %{
7152   single_instruction;
7153   dst    : S5(write);
7154   src1   : S1(read);
7155   src2   : S1(read);
7156   dst    : S1(read);
7157   INS01  : ISS;
7158   NEON_FP : S5;
7159 %}
7160 
7161 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
7162 %{
7163   single_instruction;
7164   dst    : S5(write);
7165   src1   : S1(read);
7166   src2   : S1(read);
7167   dst    : S1(read);
7168   INS0   : ISS;
7169   NEON_FP : S5;
7170 %}
7171 
7172 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
7173 %{
7174   single_instruction;
7175   dst    : S4(write);
7176   src1   : S2(read);
7177   src2   : S2(read);
7178   INS01  : ISS;
7179   NEON_FP : S4;
7180 %}
7181 
7182 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
7183 %{
7184   single_instruction;
7185   dst    : S4(write);
7186   src1   : S2(read);
7187   src2   : S2(read);
7188   INS0   : ISS;
7189   NEON_FP : S4;
7190 %}
7191 
7192 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
7193 %{
7194   single_instruction;
7195   dst    : S3(write);
7196   src1   : S2(read);
7197   src2   : S2(read);
7198   INS01  : ISS;
7199   NEON_FP : S3;
7200 %}
7201 
7202 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
7203 %{
7204   single_instruction;
7205   dst    : S3(write);
7206   src1   : S2(read);
7207   src2   : S2(read);
7208   INS0   : ISS;
7209   NEON_FP : S3;
7210 %}
7211 
7212 pipe_class vshift64(vecD dst, vecD src, vecX shift)
7213 %{
7214   single_instruction;
7215   dst    : S3(write);
7216   src    : S1(read);
7217   shift  : S1(read);
7218   INS01  : ISS;
7219   NEON_FP : S3;
7220 %}
7221 
7222 pipe_class vshift128(vecX dst, vecX src, vecX shift)
7223 %{
7224   single_instruction;
7225   dst    : S3(write);
7226   src    : S1(read);
7227   shift  : S1(read);
7228   INS0   : ISS;
7229   NEON_FP : S3;
7230 %}
7231 
7232 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
7233 %{
7234   single_instruction;
7235   dst    : S3(write);
7236   src    : S1(read);
7237   INS01  : ISS;
7238   NEON_FP : S3;
7239 %}
7240 
7241 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
7242 %{
7243   single_instruction;
7244   dst    : S3(write);
7245   src    : S1(read);
7246   INS0   : ISS;
7247   NEON_FP : S3;
7248 %}
7249 
7250 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
7251 %{
7252   single_instruction;
7253   dst    : S5(write);
7254   src1   : S1(read);
7255   src2   : S1(read);
7256   INS01  : ISS;
7257   NEON_FP : S5;
7258 %}
7259 
7260 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
7261 %{
7262   single_instruction;
7263   dst    : S5(write);
7264   src1   : S1(read);
7265   src2   : S1(read);
7266   INS0   : ISS;
7267   NEON_FP : S5;
7268 %}
7269 
7270 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
7271 %{
7272   single_instruction;
7273   dst    : S5(write);
7274   src1   : S1(read);
7275   src2   : S1(read);
7276   INS0   : ISS;
7277   NEON_FP : S5;
7278 %}
7279 
7280 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
7281 %{
7282   single_instruction;
7283   dst    : S5(write);
7284   src1   : S1(read);
7285   src2   : S1(read);
7286   INS0   : ISS;
7287   NEON_FP : S5;
7288 %}
7289 
7290 pipe_class vsqrt_fp128(vecX dst, vecX src)
7291 %{
7292   single_instruction;
7293   dst    : S5(write);
7294   src    : S1(read);
7295   INS0   : ISS;
7296   NEON_FP : S5;
7297 %}
7298 
7299 pipe_class vunop_fp64(vecD dst, vecD src)
7300 %{
7301   single_instruction;
7302   dst    : S5(write);
7303   src    : S1(read);
7304   INS01  : ISS;
7305   NEON_FP : S5;
7306 %}
7307 
7308 pipe_class vunop_fp128(vecX dst, vecX src)
7309 %{
7310   single_instruction;
7311   dst    : S5(write);
7312   src    : S1(read);
7313   INS0   : ISS;
7314   NEON_FP : S5;
7315 %}
7316 
7317 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
7318 %{
7319   single_instruction;
7320   dst    : S3(write);
7321   src    : S1(read);
7322   INS01  : ISS;
7323   NEON_FP : S3;
7324 %}
7325 
7326 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
7327 %{
7328   single_instruction;
7329   dst    : S3(write);
7330   src    : S1(read);
7331   INS01  : ISS;
7332   NEON_FP : S3;
7333 %}
7334 
7335 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
7336 %{
7337   single_instruction;
7338   dst    : S3(write);
7339   src    : S1(read);
7340   INS01  : ISS;
7341   NEON_FP : S3;
7342 %}
7343 
7344 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
7345 %{
7346   single_instruction;
7347   dst    : S3(write);
7348   src    : S1(read);
7349   INS01  : ISS;
7350   NEON_FP : S3;
7351 %}
7352 
7353 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
7354 %{
7355   single_instruction;
7356   dst    : S3(write);
7357   src    : S1(read);
7358   INS01  : ISS;
7359   NEON_FP : S3;
7360 %}
7361 
7362 pipe_class vmovi_reg_imm64(vecD dst)
7363 %{
7364   single_instruction;
7365   dst    : S3(write);
7366   INS01  : ISS;
7367   NEON_FP : S3;
7368 %}
7369 
7370 pipe_class vmovi_reg_imm128(vecX dst)
7371 %{
7372   single_instruction;
7373   dst    : S3(write);
7374   INS0   : ISS;
7375   NEON_FP : S3;
7376 %}
7377 
7378 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
7379 %{
7380   single_instruction;
7381   dst    : S5(write);
7382   mem    : ISS(read);
7383   INS01  : ISS;
7384   NEON_FP : S3;
7385 %}
7386 
7387 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
7388 %{
7389   single_instruction;
7390   dst    : S5(write);
7391   mem    : ISS(read);
7392   INS01  : ISS;
7393   NEON_FP : S3;
7394 %}
7395 
7396 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
7397 %{
7398   single_instruction;
7399   mem    : ISS(read);
7400   src    : S2(read);
7401   INS01  : ISS;
7402   NEON_FP : S3;
7403 %}
7404 
7405 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
7406 %{
7407   single_instruction;
7408   mem    : ISS(read);
7409   src    : S2(read);
7410   INS01  : ISS;
7411   NEON_FP : S3;
7412 %}
7413 
7414 //------- Integer ALU operations --------------------------
7415 
7416 // Integer ALU reg-reg operation
7417 // Operands needed in EX1, result generated in EX2
7418 // Eg.  ADD     x0, x1, x2
7419 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7420 %{
7421   single_instruction;
7422   dst    : EX2(write);
7423   src1   : EX1(read);
7424   src2   : EX1(read);
7425   INS01  : ISS; // Dual issue as instruction 0 or 1
7426   ALU    : EX2;
7427 %}
7428 
7429 // Integer ALU reg-reg operation with constant shift
7430 // Shifted register must be available in LATE_ISS instead of EX1
7431 // Eg.  ADD     x0, x1, x2, LSL #2
7432 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
7433 %{
7434   single_instruction;
7435   dst    : EX2(write);
7436   src1   : EX1(read);
7437   src2   : ISS(read);
7438   INS01  : ISS;
7439   ALU    : EX2;
7440 %}
7441 
7442 // Integer ALU reg operation with constant shift
7443 // Eg.  LSL     x0, x1, #shift
7444 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
7445 %{
7446   single_instruction;
7447   dst    : EX2(write);
7448   src1   : ISS(read);
7449   INS01  : ISS;
7450   ALU    : EX2;
7451 %}
7452 
7453 // Integer ALU reg-reg operation with variable shift
7454 // Both operands must be available in LATE_ISS instead of EX1
7455 // Result is available in EX1 instead of EX2
7456 // Eg.  LSLV    x0, x1, x2
7457 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
7458 %{
7459   single_instruction;
7460   dst    : EX1(write);
7461   src1   : ISS(read);
7462   src2   : ISS(read);
7463   INS01  : ISS;
7464   ALU    : EX1;
7465 %}
7466 
7467 // Integer ALU reg-reg operation with extract
7468 // As for _vshift above, but result generated in EX2
7469 // Eg.  EXTR    x0, x1, x2, #N
7470 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
7471 %{
7472   single_instruction;
7473   dst    : EX2(write);
7474   src1   : ISS(read);
7475   src2   : ISS(read);
7476   INS1   : ISS; // Can only dual issue as Instruction 1
7477   ALU    : EX1;
7478 %}
7479 
7480 // Integer ALU reg operation
7481 // Eg.  NEG     x0, x1
7482 pipe_class ialu_reg(iRegI dst, iRegI src)
7483 %{
7484   single_instruction;
7485   dst    : EX2(write);
7486   src    : EX1(read);
7487   INS01  : ISS;
7488   ALU    : EX2;
7489 %}
7490 
7491 // Integer ALU reg mmediate operation
7492 // Eg.  ADD     x0, x1, #N
7493 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
7494 %{
7495   single_instruction;
7496   dst    : EX2(write);
7497   src1   : EX1(read);
7498   INS01  : ISS;
7499   ALU    : EX2;
7500 %}
7501 
7502 // Integer ALU immediate operation (no source operands)
7503 // Eg.  MOV     x0, #N
7504 pipe_class ialu_imm(iRegI dst)
7505 %{
7506   single_instruction;
7507   dst    : EX1(write);
7508   INS01  : ISS;
7509   ALU    : EX1;
7510 %}
7511 
7512 //------- Compare operation -------------------------------
7513 
7514 // Compare reg-reg
7515 // Eg.  CMP     x0, x1
7516 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
7517 %{
7518   single_instruction;
7519 //  fixed_latency(16);
7520   cr     : EX2(write);
7521   op1    : EX1(read);
7522   op2    : EX1(read);
7523   INS01  : ISS;
7524   ALU    : EX2;
7525 %}
7526 
7527 // Compare reg-reg
7528 // Eg.  CMP     x0, #N
7529 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
7530 %{
7531   single_instruction;
7532 //  fixed_latency(16);
7533   cr     : EX2(write);
7534   op1    : EX1(read);
7535   INS01  : ISS;
7536   ALU    : EX2;
7537 %}
7538 
7539 //------- Conditional instructions ------------------------
7540 
7541 // Conditional no operands
7542 // Eg.  CSINC   x0, zr, zr, <cond>
7543 pipe_class icond_none(iRegI dst, rFlagsReg cr)
7544 %{
7545   single_instruction;
7546   cr     : EX1(read);
7547   dst    : EX2(write);
7548   INS01  : ISS;
7549   ALU    : EX2;
7550 %}
7551 
7552 // Conditional 2 operand
7553 // EG.  CSEL    X0, X1, X2, <cond>
7554 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
7555 %{
7556   single_instruction;
7557   cr     : EX1(read);
7558   src1   : EX1(read);
7559   src2   : EX1(read);
7560   dst    : EX2(write);
7561   INS01  : ISS;
7562   ALU    : EX2;
7563 %}
7564 
7565 // Conditional 2 operand
7566 // EG.  CSEL    X0, X1, X2, <cond>
7567 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
7568 %{
7569   single_instruction;
7570   cr     : EX1(read);
7571   src    : EX1(read);
7572   dst    : EX2(write);
7573   INS01  : ISS;
7574   ALU    : EX2;
7575 %}
7576 
7577 //------- Multiply pipeline operations --------------------
7578 
7579 // Multiply reg-reg
7580 // Eg.  MUL     w0, w1, w2
7581 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7582 %{
7583   single_instruction;
7584   dst    : WR(write);
7585   src1   : ISS(read);
7586   src2   : ISS(read);
7587   INS01  : ISS;
7588   MAC    : WR;
7589 %}
7590 
7591 // Multiply accumulate
7592 // Eg.  MADD    w0, w1, w2, w3
7593 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7594 %{
7595   single_instruction;
7596   dst    : WR(write);
7597   src1   : ISS(read);
7598   src2   : ISS(read);
7599   src3   : ISS(read);
7600   INS01  : ISS;
7601   MAC    : WR;
7602 %}
7603 
7604 // Eg.  MUL     w0, w1, w2
7605 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7606 %{
7607   single_instruction;
7608   fixed_latency(3); // Maximum latency for 64 bit mul
7609   dst    : WR(write);
7610   src1   : ISS(read);
7611   src2   : ISS(read);
7612   INS01  : ISS;
7613   MAC    : WR;
7614 %}
7615 
7616 // Multiply accumulate
7617 // Eg.  MADD    w0, w1, w2, w3
7618 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7619 %{
7620   single_instruction;
7621   fixed_latency(3); // Maximum latency for 64 bit mul
7622   dst    : WR(write);
7623   src1   : ISS(read);
7624   src2   : ISS(read);
7625   src3   : ISS(read);
7626   INS01  : ISS;
7627   MAC    : WR;
7628 %}
7629 
7630 //------- Divide pipeline operations --------------------
7631 
7632 // Eg.  SDIV    w0, w1, w2
7633 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7634 %{
7635   single_instruction;
7636   fixed_latency(8); // Maximum latency for 32 bit divide
7637   dst    : WR(write);
7638   src1   : ISS(read);
7639   src2   : ISS(read);
7640   INS0   : ISS; // Can only dual issue as instruction 0
7641   DIV    : WR;
7642 %}
7643 
7644 // Eg.  SDIV    x0, x1, x2
7645 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7646 %{
7647   single_instruction;
7648   fixed_latency(16); // Maximum latency for 64 bit divide
7649   dst    : WR(write);
7650   src1   : ISS(read);
7651   src2   : ISS(read);
7652   INS0   : ISS; // Can only dual issue as instruction 0
7653   DIV    : WR;
7654 %}
7655 
7656 //------- Load pipeline operations ------------------------
7657 
7658 // Load - prefetch
7659 // Eg.  PFRM    <mem>
7660 pipe_class iload_prefetch(memory mem)
7661 %{
7662   single_instruction;
7663   mem    : ISS(read);
7664   INS01  : ISS;
7665   LDST   : WR;
7666 %}
7667 
7668 // Load - reg, mem
7669 // Eg.  LDR     x0, <mem>
7670 pipe_class iload_reg_mem(iRegI dst, memory mem)
7671 %{
7672   single_instruction;
7673   dst    : WR(write);
7674   mem    : ISS(read);
7675   INS01  : ISS;
7676   LDST   : WR;
7677 %}
7678 
7679 // Load - reg, reg
7680 // Eg.  LDR     x0, [sp, x1]
7681 pipe_class iload_reg_reg(iRegI dst, iRegI src)
7682 %{
7683   single_instruction;
7684   dst    : WR(write);
7685   src    : ISS(read);
7686   INS01  : ISS;
7687   LDST   : WR;
7688 %}
7689 
7690 //------- Store pipeline operations -----------------------
7691 
7692 // Store - zr, mem
7693 // Eg.  STR     zr, <mem>
7694 pipe_class istore_mem(memory mem)
7695 %{
7696   single_instruction;
7697   mem    : ISS(read);
7698   INS01  : ISS;
7699   LDST   : WR;
7700 %}
7701 
7702 // Store - reg, mem
7703 // Eg.  STR     x0, <mem>
7704 pipe_class istore_reg_mem(iRegI src, memory mem)
7705 %{
7706   single_instruction;
7707   mem    : ISS(read);
7708   src    : EX2(read);
7709   INS01  : ISS;
7710   LDST   : WR;
7711 %}
7712 
7713 // Store - reg, reg
7714 // Eg. STR      x0, [sp, x1]
7715 pipe_class istore_reg_reg(iRegI dst, iRegI src)
7716 %{
7717   single_instruction;
7718   dst    : ISS(read);
7719   src    : EX2(read);
7720   INS01  : ISS;
7721   LDST   : WR;
7722 %}
7723 
7724 //------- Store pipeline operations -----------------------
7725 
7726 // Branch
7727 pipe_class pipe_branch()
7728 %{
7729   single_instruction;
7730   INS01  : ISS;
7731   BRANCH : EX1;
7732 %}
7733 
7734 // Conditional branch
7735 pipe_class pipe_branch_cond(rFlagsReg cr)
7736 %{
7737   single_instruction;
7738   cr     : EX1(read);
7739   INS01  : ISS;
7740   BRANCH : EX1;
7741 %}
7742 
7743 // Compare & Branch
7744 // EG.  CBZ/CBNZ
7745 pipe_class pipe_cmp_branch(iRegI op1)
7746 %{
7747   single_instruction;
7748   op1    : EX1(read);
7749   INS01  : ISS;
7750   BRANCH : EX1;
7751 %}
7752 
7753 //------- Synchronisation operations ----------------------
7754 
7755 // Any operation requiring serialization.
7756 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
7757 pipe_class pipe_serial()
7758 %{
7759   single_instruction;
7760   force_serialization;
7761   fixed_latency(16);
7762   INS01  : ISS(2); // Cannot dual issue with any other instruction
7763   LDST   : WR;
7764 %}
7765 
7766 // Generic big/slow expanded idiom - also serialized
7767 pipe_class pipe_slow()
7768 %{
7769   instruction_count(10);
7770   multiple_bundles;
7771   force_serialization;
7772   fixed_latency(16);
7773   INS01  : ISS(2); // Cannot dual issue with any other instruction
7774   LDST   : WR;
7775 %}
7776 
7777 // Empty pipeline class
7778 pipe_class pipe_class_empty()
7779 %{
7780   single_instruction;
7781   fixed_latency(0);
7782 %}
7783 
7784 // Default pipeline class.
7785 pipe_class pipe_class_default()
7786 %{
7787   single_instruction;
7788   fixed_latency(2);
7789 %}
7790 
7791 // Pipeline class for compares.
7792 pipe_class pipe_class_compare()
7793 %{
7794   single_instruction;
7795   fixed_latency(16);
7796 %}
7797 
7798 // Pipeline class for memory operations.
7799 pipe_class pipe_class_memory()
7800 %{
7801   single_instruction;
7802   fixed_latency(16);
7803 %}
7804 
7805 // Pipeline class for call.
7806 pipe_class pipe_class_call()
7807 %{
7808   single_instruction;
7809   fixed_latency(100);
7810 %}
7811 
7812 // Define the class for the Nop node.
7813 define %{
7814    MachNop = pipe_class_empty;
7815 %}
7816 
7817 %}
7818 //----------INSTRUCTIONS-------------------------------------------------------
7819 //
7820 // match      -- States which machine-independent subtree may be replaced
7821 //               by this instruction.
7822 // ins_cost   -- The estimated cost of this instruction is used by instruction
7823 //               selection to identify a minimum cost tree of machine
7824 //               instructions that matches a tree of machine-independent
7825 //               instructions.
7826 // format     -- A string providing the disassembly for this instruction.
7827 //               The value of an instruction's operand may be inserted
7828 //               by referring to it with a '$' prefix.
7829 // opcode     -- Three instruction opcodes may be provided.  These are referred
7830 //               to within an encode class as $primary, $secondary, and $tertiary
7831 //               rrspectively.  The primary opcode is commonly used to
7832 //               indicate the type of machine instruction, while secondary
7833 //               and tertiary are often used for prefix options or addressing
7834 //               modes.
7835 // ins_encode -- A list of encode classes with parameters. The encode class
7836 //               name must have been defined in an 'enc_class' specification
7837 //               in the encode section of the architecture description.
7838 
7839 // ============================================================================
7840 // Memory (Load/Store) Instructions
7841 
7842 // Load Instructions
7843 
7844 // Load Byte (8 bit signed)
7845 instruct loadB(iRegINoSp dst, memory mem)
7846 %{
7847   match(Set dst (LoadB mem));
7848   predicate(!needs_acquiring_load(n));
7849 
7850   ins_cost(4 * INSN_COST);
7851   format %{ "ldrsbw  $dst, $mem\t# byte" %}
7852 
7853   ins_encode(aarch64_enc_ldrsbw(dst, mem));
7854 
7855   ins_pipe(iload_reg_mem);
7856 %}
7857 
7858 // Load Byte (8 bit signed) into long
7859 instruct loadB2L(iRegLNoSp dst, memory mem)
7860 %{
7861   match(Set dst (ConvI2L (LoadB mem)));
7862   predicate(!needs_acquiring_load(n->in(1)));
7863 
7864   ins_cost(4 * INSN_COST);
7865   format %{ "ldrsb  $dst, $mem\t# byte" %}
7866 
7867   ins_encode(aarch64_enc_ldrsb(dst, mem));
7868 
7869   ins_pipe(iload_reg_mem);
7870 %}
7871 
7872 // Load Byte (8 bit unsigned)
7873 instruct loadUB(iRegINoSp dst, memory mem)
7874 %{
7875   match(Set dst (LoadUB mem));
7876   predicate(!needs_acquiring_load(n));
7877 
7878   ins_cost(4 * INSN_COST);
7879   format %{ "ldrbw  $dst, $mem\t# byte" %}
7880 
7881   ins_encode(aarch64_enc_ldrb(dst, mem));
7882 
7883   ins_pipe(iload_reg_mem);
7884 %}
7885 
7886 // Load Byte (8 bit unsigned) into long
7887 instruct loadUB2L(iRegLNoSp dst, memory mem)
7888 %{
7889   match(Set dst (ConvI2L (LoadUB mem)));
7890   predicate(!needs_acquiring_load(n->in(1)));
7891 
7892   ins_cost(4 * INSN_COST);
7893   format %{ "ldrb  $dst, $mem\t# byte" %}
7894 
7895   ins_encode(aarch64_enc_ldrb(dst, mem));
7896 
7897   ins_pipe(iload_reg_mem);
7898 %}
7899 
7900 // Load Short (16 bit signed)
7901 instruct loadS(iRegINoSp dst, memory mem)
7902 %{
7903   match(Set dst (LoadS mem));
7904   predicate(!needs_acquiring_load(n));
7905 
7906   ins_cost(4 * INSN_COST);
7907   format %{ "ldrshw  $dst, $mem\t# short" %}
7908 
7909   ins_encode(aarch64_enc_ldrshw(dst, mem));
7910 
7911   ins_pipe(iload_reg_mem);
7912 %}
7913 
7914 // Load Short (16 bit signed) into long
7915 instruct loadS2L(iRegLNoSp dst, memory mem)
7916 %{
7917   match(Set dst (ConvI2L (LoadS mem)));
7918   predicate(!needs_acquiring_load(n->in(1)));
7919 
7920   ins_cost(4 * INSN_COST);
7921   format %{ "ldrsh  $dst, $mem\t# short" %}
7922 
7923   ins_encode(aarch64_enc_ldrsh(dst, mem));
7924 
7925   ins_pipe(iload_reg_mem);
7926 %}
7927 
7928 // Load Char (16 bit unsigned)
7929 instruct loadUS(iRegINoSp dst, memory mem)
7930 %{
7931   match(Set dst (LoadUS mem));
7932   predicate(!needs_acquiring_load(n));
7933 
7934   ins_cost(4 * INSN_COST);
7935   format %{ "ldrh  $dst, $mem\t# short" %}
7936 
7937   ins_encode(aarch64_enc_ldrh(dst, mem));
7938 
7939   ins_pipe(iload_reg_mem);
7940 %}
7941 
7942 // Load Short/Char (16 bit unsigned) into long
7943 instruct loadUS2L(iRegLNoSp dst, memory mem)
7944 %{
7945   match(Set dst (ConvI2L (LoadUS mem)));
7946   predicate(!needs_acquiring_load(n->in(1)));
7947 
7948   ins_cost(4 * INSN_COST);
7949   format %{ "ldrh  $dst, $mem\t# short" %}
7950 
7951   ins_encode(aarch64_enc_ldrh(dst, mem));
7952 
7953   ins_pipe(iload_reg_mem);
7954 %}
7955 
7956 // Load Integer (32 bit signed)
7957 instruct loadI(iRegINoSp dst, memory mem)
7958 %{
7959   match(Set dst (LoadI mem));
7960   predicate(!needs_acquiring_load(n));
7961 
7962   ins_cost(4 * INSN_COST);
7963   format %{ "ldrw  $dst, $mem\t# int" %}
7964 
7965   ins_encode(aarch64_enc_ldrw(dst, mem));
7966 
7967   ins_pipe(iload_reg_mem);
7968 %}
7969 
7970 // Load Integer (32 bit signed) into long
7971 instruct loadI2L(iRegLNoSp dst, memory mem)
7972 %{
7973   match(Set dst (ConvI2L (LoadI mem)));
7974   predicate(!needs_acquiring_load(n->in(1)));
7975 
7976   ins_cost(4 * INSN_COST);
7977   format %{ "ldrsw  $dst, $mem\t# int" %}
7978 
7979   ins_encode(aarch64_enc_ldrsw(dst, mem));
7980 
7981   ins_pipe(iload_reg_mem);
7982 %}
7983 
7984 // Load Integer (32 bit unsigned) into long
7985 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
7986 %{
7987   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7988   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
7989 
7990   ins_cost(4 * INSN_COST);
7991   format %{ "ldrw  $dst, $mem\t# int" %}
7992 
7993   ins_encode(aarch64_enc_ldrw(dst, mem));
7994 
7995   ins_pipe(iload_reg_mem);
7996 %}
7997 
7998 // Load Long (64 bit signed)
7999 instruct loadL(iRegLNoSp dst, memory mem)
8000 %{
8001   match(Set dst (LoadL mem));
8002   predicate(!needs_acquiring_load(n));
8003 
8004   ins_cost(4 * INSN_COST);
8005   format %{ "ldr  $dst, $mem\t# int" %}
8006 
8007   ins_encode(aarch64_enc_ldr(dst, mem));
8008 
8009   ins_pipe(iload_reg_mem);
8010 %}
8011 
8012 // Load Range
8013 instruct loadRange(iRegINoSp dst, memory mem)
8014 %{
8015   match(Set dst (LoadRange mem));
8016 
8017   ins_cost(4 * INSN_COST);
8018   format %{ "ldrw  $dst, $mem\t# range" %}
8019 
8020   ins_encode(aarch64_enc_ldrw(dst, mem));
8021 
8022   ins_pipe(iload_reg_mem);
8023 %}
8024 
8025 // Load Pointer
8026 instruct loadP(iRegPNoSp dst, memory mem)
8027 %{
8028   match(Set dst (LoadP mem));
8029   predicate(!needs_acquiring_load(n));
8030 
8031   ins_cost(4 * INSN_COST);
8032   format %{ "ldr  $dst, $mem\t# ptr" %}
8033 
8034   ins_encode(aarch64_enc_ldr(dst, mem));
8035 
8036   ins_pipe(iload_reg_mem);
8037 %}
8038 
8039 // Load Compressed Pointer
8040 instruct loadN(iRegNNoSp dst, memory mem)
8041 %{
8042   match(Set dst (LoadN mem));
8043   predicate(!needs_acquiring_load(n));
8044 
8045   ins_cost(4 * INSN_COST);
8046   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
8047 
8048   ins_encode(aarch64_enc_ldrw(dst, mem));
8049 
8050   ins_pipe(iload_reg_mem);
8051 %}
8052 
8053 // Load Klass Pointer
8054 instruct loadKlass(iRegPNoSp dst, memory mem)
8055 %{
8056   match(Set dst (LoadKlass mem));
8057   predicate(!needs_acquiring_load(n));
8058 
8059   ins_cost(4 * INSN_COST);
8060   format %{ "ldr  $dst, $mem\t# class" %}
8061 
8062   ins_encode(aarch64_enc_ldr(dst, mem));
8063 
8064   ins_pipe(iload_reg_mem);
8065 %}
8066 
8067 // Load Narrow Klass Pointer
8068 instruct loadNKlass(iRegNNoSp dst, memory mem)
8069 %{
8070   match(Set dst (LoadNKlass mem));
8071   predicate(!needs_acquiring_load(n));
8072 
8073   ins_cost(4 * INSN_COST);
8074   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
8075 
8076   ins_encode(aarch64_enc_ldrw(dst, mem));
8077 
8078   ins_pipe(iload_reg_mem);
8079 %}
8080 
8081 // Load Float
8082 instruct loadF(vRegF dst, memory mem)
8083 %{
8084   match(Set dst (LoadF mem));
8085   predicate(!needs_acquiring_load(n));
8086 
8087   ins_cost(4 * INSN_COST);
8088   format %{ "ldrs  $dst, $mem\t# float" %}
8089 
8090   ins_encode( aarch64_enc_ldrs(dst, mem) );
8091 
8092   ins_pipe(pipe_class_memory);
8093 %}
8094 
8095 // Load Double
8096 instruct loadD(vRegD dst, memory mem)
8097 %{
8098   match(Set dst (LoadD mem));
8099   predicate(!needs_acquiring_load(n));
8100 
8101   ins_cost(4 * INSN_COST);
8102   format %{ "ldrd  $dst, $mem\t# double" %}
8103 
8104   ins_encode( aarch64_enc_ldrd(dst, mem) );
8105 
8106   ins_pipe(pipe_class_memory);
8107 %}
8108 
8109 
8110 // Load Int Constant
8111 instruct loadConI(iRegINoSp dst, immI src)
8112 %{
8113   match(Set dst src);
8114 
8115   ins_cost(INSN_COST);
8116   format %{ "mov $dst, $src\t# int" %}
8117 
8118   ins_encode( aarch64_enc_movw_imm(dst, src) );
8119 
8120   ins_pipe(ialu_imm);
8121 %}
8122 
8123 // Load Long Constant
8124 instruct loadConL(iRegLNoSp dst, immL src)
8125 %{
8126   match(Set dst src);
8127 
8128   ins_cost(INSN_COST);
8129   format %{ "mov $dst, $src\t# long" %}
8130 
8131   ins_encode( aarch64_enc_mov_imm(dst, src) );
8132 
8133   ins_pipe(ialu_imm);
8134 %}
8135 
8136 // Load Pointer Constant
8137 
8138 instruct loadConP(iRegPNoSp dst, immP con)
8139 %{
8140   match(Set dst con);
8141 
8142   ins_cost(INSN_COST * 4);
8143   format %{
8144     "mov  $dst, $con\t# ptr\n\t"
8145   %}
8146 
8147   ins_encode(aarch64_enc_mov_p(dst, con));
8148 
8149   ins_pipe(ialu_imm);
8150 %}
8151 
8152 // Load Null Pointer Constant
8153 
8154 instruct loadConP0(iRegPNoSp dst, immP0 con)
8155 %{
8156   match(Set dst con);
8157 
8158   ins_cost(INSN_COST);
8159   format %{ "mov  $dst, $con\t# NULL ptr" %}
8160 
8161   ins_encode(aarch64_enc_mov_p0(dst, con));
8162 
8163   ins_pipe(ialu_imm);
8164 %}
8165 
8166 // Load Pointer Constant One
8167 
8168 instruct loadConP1(iRegPNoSp dst, immP_1 con)
8169 %{
8170   match(Set dst con);
8171 
8172   ins_cost(INSN_COST);
8173   format %{ "mov  $dst, $con\t# NULL ptr" %}
8174 
8175   ins_encode(aarch64_enc_mov_p1(dst, con));
8176 
8177   ins_pipe(ialu_imm);
8178 %}
8179 
8180 // Load Poll Page Constant
8181 
8182 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
8183 %{
8184   match(Set dst con);
8185 
8186   ins_cost(INSN_COST);
8187   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
8188 
8189   ins_encode(aarch64_enc_mov_poll_page(dst, con));
8190 
8191   ins_pipe(ialu_imm);
8192 %}
8193 
8194 // Load Byte Map Base Constant
8195 
8196 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
8197 %{
8198   match(Set dst con);
8199 
8200   ins_cost(INSN_COST);
8201   format %{ "adr  $dst, $con\t# Byte Map Base" %}
8202 
8203   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
8204 
8205   ins_pipe(ialu_imm);
8206 %}
8207 
8208 // Load Narrow Pointer Constant
8209 
8210 instruct loadConN(iRegNNoSp dst, immN con)
8211 %{
8212   match(Set dst con);
8213 
8214   ins_cost(INSN_COST * 4);
8215   format %{ "mov  $dst, $con\t# compressed ptr" %}
8216 
8217   ins_encode(aarch64_enc_mov_n(dst, con));
8218 
8219   ins_pipe(ialu_imm);
8220 %}
8221 
8222 // Load Narrow Null Pointer Constant
8223 
8224 instruct loadConN0(iRegNNoSp dst, immN0 con)
8225 %{
8226   match(Set dst con);
8227 
8228   ins_cost(INSN_COST);
8229   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
8230 
8231   ins_encode(aarch64_enc_mov_n0(dst, con));
8232 
8233   ins_pipe(ialu_imm);
8234 %}
8235 
8236 // Load Narrow Klass Constant
8237 
8238 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
8239 %{
8240   match(Set dst con);
8241 
8242   ins_cost(INSN_COST);
8243   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
8244 
8245   ins_encode(aarch64_enc_mov_nk(dst, con));
8246 
8247   ins_pipe(ialu_imm);
8248 %}
8249 
8250 // Load Packed Float Constant
8251 
8252 instruct loadConF_packed(vRegF dst, immFPacked con) %{
8253   match(Set dst con);
8254   ins_cost(INSN_COST * 4);
8255   format %{ "fmovs  $dst, $con"%}
8256   ins_encode %{
8257     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
8258   %}
8259 
8260   ins_pipe(fp_imm_s);
8261 %}
8262 
8263 // Load Float Constant
8264 
8265 instruct loadConF(vRegF dst, immF con) %{
8266   match(Set dst con);
8267 
8268   ins_cost(INSN_COST * 4);
8269 
8270   format %{
8271     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
8272   %}
8273 
8274   ins_encode %{
8275     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
8276   %}
8277 
8278   ins_pipe(fp_load_constant_s);
8279 %}
8280 
8281 // Load Packed Double Constant
8282 
8283 instruct loadConD_packed(vRegD dst, immDPacked con) %{
8284   match(Set dst con);
8285   ins_cost(INSN_COST);
8286   format %{ "fmovd  $dst, $con"%}
8287   ins_encode %{
8288     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
8289   %}
8290 
8291   ins_pipe(fp_imm_d);
8292 %}
8293 
8294 // Load Double Constant
8295 
8296 instruct loadConD(vRegD dst, immD con) %{
8297   match(Set dst con);
8298 
8299   ins_cost(INSN_COST * 5);
8300   format %{
8301     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
8302   %}
8303 
8304   ins_encode %{
8305     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
8306   %}
8307 
8308   ins_pipe(fp_load_constant_d);
8309 %}
8310 
8311 // Store Instructions
8312 
8313 // Store CMS card-mark Immediate
8314 instruct storeimmCM0(immI0 zero, memory mem)
8315 %{
8316   match(Set mem (StoreCM mem zero));
8317   predicate(unnecessary_storestore(n));
8318 
8319   ins_cost(INSN_COST);
8320   format %{ "strb zr, $mem\t# byte" %}
8321 
8322   ins_encode(aarch64_enc_strb0(mem));
8323 
8324   ins_pipe(istore_mem);
8325 %}
8326 
8327 // Store CMS card-mark Immediate with intervening StoreStore
8328 // needed when using CMS with no conditional card marking
8329 instruct storeimmCM0_ordered(immI0 zero, memory mem)
8330 %{
8331   match(Set mem (StoreCM mem zero));
8332 
8333   ins_cost(INSN_COST * 2);
8334   format %{ "dmb ishst"
8335       "\n\tstrb zr, $mem\t# byte" %}
8336 
8337   ins_encode(aarch64_enc_strb0_ordered(mem));
8338 
8339   ins_pipe(istore_mem);
8340 %}
8341 
8342 // Store Byte
8343 instruct storeB(iRegIorL2I src, memory mem)
8344 %{
8345   match(Set mem (StoreB mem src));
8346   predicate(!needs_releasing_store(n));
8347 
8348   ins_cost(INSN_COST);
8349   format %{ "strb  $src, $mem\t# byte" %}
8350 
8351   ins_encode(aarch64_enc_strb(src, mem));
8352 
8353   ins_pipe(istore_reg_mem);
8354 %}
8355 
8356 
8357 instruct storeimmB0(immI0 zero, memory mem)
8358 %{
8359   match(Set mem (StoreB mem zero));
8360   predicate(!needs_releasing_store(n));
8361 
8362   ins_cost(INSN_COST);
8363   format %{ "strb zr, $mem\t# byte" %}
8364 
8365   ins_encode(aarch64_enc_strb0(mem));
8366 
8367   ins_pipe(istore_mem);
8368 %}
8369 
8370 // Store Char/Short
8371 instruct storeC(iRegIorL2I src, memory mem)
8372 %{
8373   match(Set mem (StoreC mem src));
8374   predicate(!needs_releasing_store(n));
8375 
8376   ins_cost(INSN_COST);
8377   format %{ "strh  $src, $mem\t# short" %}
8378 
8379   ins_encode(aarch64_enc_strh(src, mem));
8380 
8381   ins_pipe(istore_reg_mem);
8382 %}
8383 
8384 instruct storeimmC0(immI0 zero, memory mem)
8385 %{
8386   match(Set mem (StoreC mem zero));
8387   predicate(!needs_releasing_store(n));
8388 
8389   ins_cost(INSN_COST);
8390   format %{ "strh  zr, $mem\t# short" %}
8391 
8392   ins_encode(aarch64_enc_strh0(mem));
8393 
8394   ins_pipe(istore_mem);
8395 %}
8396 
8397 // Store Integer
8398 
8399 instruct storeI(iRegIorL2I src, memory mem)
8400 %{
8401   match(Set mem(StoreI mem src));
8402   predicate(!needs_releasing_store(n));
8403 
8404   ins_cost(INSN_COST);
8405   format %{ "strw  $src, $mem\t# int" %}
8406 
8407   ins_encode(aarch64_enc_strw(src, mem));
8408 
8409   ins_pipe(istore_reg_mem);
8410 %}
8411 
8412 instruct storeimmI0(immI0 zero, memory mem)
8413 %{
8414   match(Set mem(StoreI mem zero));
8415   predicate(!needs_releasing_store(n));
8416 
8417   ins_cost(INSN_COST);
8418   format %{ "strw  zr, $mem\t# int" %}
8419 
8420   ins_encode(aarch64_enc_strw0(mem));
8421 
8422   ins_pipe(istore_mem);
8423 %}
8424 
8425 // Store Long (64 bit signed)
8426 instruct storeL(iRegL src, memory mem)
8427 %{
8428   match(Set mem (StoreL mem src));
8429   predicate(!needs_releasing_store(n));
8430 
8431   ins_cost(INSN_COST);
8432   format %{ "str  $src, $mem\t# int" %}
8433 
8434   ins_encode(aarch64_enc_str(src, mem));
8435 
8436   ins_pipe(istore_reg_mem);
8437 %}
8438 
8439 // Store Long (64 bit signed)
8440 instruct storeimmL0(immL0 zero, memory mem)
8441 %{
8442   match(Set mem (StoreL mem zero));
8443   predicate(!needs_releasing_store(n));
8444 
8445   ins_cost(INSN_COST);
8446   format %{ "str  zr, $mem\t# int" %}
8447 
8448   ins_encode(aarch64_enc_str0(mem));
8449 
8450   ins_pipe(istore_mem);
8451 %}
8452 
8453 // Store Pointer
8454 instruct storeP(iRegP src, memory mem)
8455 %{
8456   match(Set mem (StoreP mem src));
8457   predicate(!needs_releasing_store(n));
8458 
8459   ins_cost(INSN_COST);
8460   format %{ "str  $src, $mem\t# ptr" %}
8461 
8462   ins_encode %{
8463     int opcode = $mem->opcode();
8464     Register base = as_Register($mem$$base);
8465     int index = $mem$$index;
8466     int size = $mem$$scale;
8467     int disp = $mem$$disp;
8468     Register reg = as_Register($src$$reg);
8469 
8470     // we sometimes get asked to store the stack pointer into the
8471     // current thread -- we cannot do that directly on AArch64
8472     if (reg == r31_sp) {
8473       MacroAssembler _masm(&cbuf);
8474       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
8475       __ mov(rscratch2, sp);
8476       reg = rscratch2;
8477     }
8478     Address::extend scale;
8479 
8480     // Hooboy, this is fugly.  We need a way to communicate to the
8481     // encoder that the index needs to be sign extended, so we have to
8482     // enumerate all the cases.
8483     switch (opcode) {
8484     case INDINDEXSCALEDOFFSETI2L:
8485     case INDINDEXSCALEDI2L:
8486     case INDINDEXSCALEDOFFSETI2LN:
8487     case INDINDEXSCALEDI2LN:
8488     case INDINDEXOFFSETI2L:
8489     case INDINDEXOFFSETI2LN:
8490       scale = Address::sxtw(size);
8491       break;
8492     default:
8493       scale = Address::lsl(size);
8494     }
8495 
8496     Address adr;
8497     if (index == -1) {
8498       adr = Address(base, disp);
8499     } else {
8500       if (disp == 0) {
8501         adr = Address(base, as_Register(index), scale);
8502       } else {
8503         __ lea(rscratch1, Address(base, disp));
8504         adr = Address(rscratch1, as_Register(index), scale);
8505       }
8506     }
8507 
8508     __ str(reg, adr);
8509   %}
8510 
8511   ins_pipe(istore_reg_mem);
8512 %}
8513 
8514 // Store Pointer
8515 instruct storeimmP0(immP0 zero, memory mem)
8516 %{
8517   match(Set mem (StoreP mem zero));
8518   predicate(!needs_releasing_store(n));
8519 
8520   ins_cost(INSN_COST);
8521   format %{ "str zr, $mem\t# ptr" %}
8522 
8523   ins_encode(aarch64_enc_str0(mem));
8524 
8525   ins_pipe(istore_mem);
8526 %}
8527 
8528 // Store Compressed Pointer
8529 instruct storeN(iRegN src, memory mem)
8530 %{
8531   match(Set mem (StoreN mem src));
8532   predicate(!needs_releasing_store(n));
8533 
8534   ins_cost(INSN_COST);
8535   format %{ "strw  $src, $mem\t# compressed ptr" %}
8536 
8537   ins_encode(aarch64_enc_strw(src, mem));
8538 
8539   ins_pipe(istore_reg_mem);
8540 %}
8541 
8542 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
8543 %{
8544   match(Set mem (StoreN mem zero));
8545   predicate(Universe::narrow_oop_base() == NULL &&
8546             Universe::narrow_klass_base() == NULL  &&
8547             (!needs_releasing_store(n)));
8548 
8549   ins_cost(INSN_COST);
8550   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
8551 
8552   ins_encode(aarch64_enc_strw(heapbase, mem));
8553 
8554   ins_pipe(istore_reg_mem);
8555 %}
8556 
8557 // Store Float
8558 instruct storeF(vRegF src, memory mem)
8559 %{
8560   match(Set mem (StoreF mem src));
8561   predicate(!needs_releasing_store(n));
8562 
8563   ins_cost(INSN_COST);
8564   format %{ "strs  $src, $mem\t# float" %}
8565 
8566   ins_encode( aarch64_enc_strs(src, mem) );
8567 
8568   ins_pipe(pipe_class_memory);
8569 %}
8570 
8571 // TODO
8572 // implement storeImmF0 and storeFImmPacked
8573 
8574 // Store Double
8575 instruct storeD(vRegD src, memory mem)
8576 %{
8577   match(Set mem (StoreD mem src));
8578   predicate(!needs_releasing_store(n));
8579 
8580   ins_cost(INSN_COST);
8581   format %{ "strd  $src, $mem\t# double" %}
8582 
8583   ins_encode( aarch64_enc_strd(src, mem) );
8584 
8585   ins_pipe(pipe_class_memory);
8586 %}
8587 
8588 // Store Compressed Klass Pointer
8589 instruct storeNKlass(iRegN src, memory mem)
8590 %{
8591   predicate(!needs_releasing_store(n));
8592   match(Set mem (StoreNKlass mem src));
8593 
8594   ins_cost(INSN_COST);
8595   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
8596 
8597   ins_encode(aarch64_enc_strw(src, mem));
8598 
8599   ins_pipe(istore_reg_mem);
8600 %}
8601 
8602 // TODO
8603 // implement storeImmD0 and storeDImmPacked
8604 
8605 // prefetch instructions
8606 // Must be safe to execute with invalid address (cannot fault).
8607 
8608 instruct prefetchr( memory mem ) %{
8609   match(PrefetchRead mem);
8610 
8611   ins_cost(INSN_COST);
8612   format %{ "prfm $mem, PLDL1KEEP\t# Prefetch into level 1 cache read keep" %}
8613 
8614   ins_encode( aarch64_enc_prefetchr(mem) );
8615 
8616   ins_pipe(iload_prefetch);
8617 %}
8618 
8619 instruct prefetchw( memory mem ) %{
8620   match(PrefetchAllocation mem);
8621 
8622   ins_cost(INSN_COST);
8623   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
8624 
8625   ins_encode( aarch64_enc_prefetchw(mem) );
8626 
8627   ins_pipe(iload_prefetch);
8628 %}
8629 
8630 instruct prefetchnta( memory mem ) %{
8631   match(PrefetchWrite mem);
8632 
8633   ins_cost(INSN_COST);
8634   format %{ "prfm $mem, PSTL1STRM\t# Prefetch into level 1 cache write streaming" %}
8635 
8636   ins_encode( aarch64_enc_prefetchnta(mem) );
8637 
8638   ins_pipe(iload_prefetch);
8639 %}
8640 
8641 //  ---------------- volatile loads and stores ----------------
8642 
8643 // Load Byte (8 bit signed)
8644 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8645 %{
8646   match(Set dst (LoadB mem));
8647 
8648   ins_cost(VOLATILE_REF_COST);
8649   format %{ "ldarsb  $dst, $mem\t# byte" %}
8650 
8651   ins_encode(aarch64_enc_ldarsb(dst, mem));
8652 
8653   ins_pipe(pipe_serial);
8654 %}
8655 
8656 // Load Byte (8 bit signed) into long
8657 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8658 %{
8659   match(Set dst (ConvI2L (LoadB mem)));
8660 
8661   ins_cost(VOLATILE_REF_COST);
8662   format %{ "ldarsb  $dst, $mem\t# byte" %}
8663 
8664   ins_encode(aarch64_enc_ldarsb(dst, mem));
8665 
8666   ins_pipe(pipe_serial);
8667 %}
8668 
8669 // Load Byte (8 bit unsigned)
8670 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8671 %{
8672   match(Set dst (LoadUB mem));
8673 
8674   ins_cost(VOLATILE_REF_COST);
8675   format %{ "ldarb  $dst, $mem\t# byte" %}
8676 
8677   ins_encode(aarch64_enc_ldarb(dst, mem));
8678 
8679   ins_pipe(pipe_serial);
8680 %}
8681 
8682 // Load Byte (8 bit unsigned) into long
8683 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8684 %{
8685   match(Set dst (ConvI2L (LoadUB mem)));
8686 
8687   ins_cost(VOLATILE_REF_COST);
8688   format %{ "ldarb  $dst, $mem\t# byte" %}
8689 
8690   ins_encode(aarch64_enc_ldarb(dst, mem));
8691 
8692   ins_pipe(pipe_serial);
8693 %}
8694 
8695 // Load Short (16 bit signed)
8696 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8697 %{
8698   match(Set dst (LoadS mem));
8699 
8700   ins_cost(VOLATILE_REF_COST);
8701   format %{ "ldarshw  $dst, $mem\t# short" %}
8702 
8703   ins_encode(aarch64_enc_ldarshw(dst, mem));
8704 
8705   ins_pipe(pipe_serial);
8706 %}
8707 
8708 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8709 %{
8710   match(Set dst (LoadUS mem));
8711 
8712   ins_cost(VOLATILE_REF_COST);
8713   format %{ "ldarhw  $dst, $mem\t# short" %}
8714 
8715   ins_encode(aarch64_enc_ldarhw(dst, mem));
8716 
8717   ins_pipe(pipe_serial);
8718 %}
8719 
8720 // Load Short/Char (16 bit unsigned) into long
8721 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8722 %{
8723   match(Set dst (ConvI2L (LoadUS mem)));
8724 
8725   ins_cost(VOLATILE_REF_COST);
8726   format %{ "ldarh  $dst, $mem\t# short" %}
8727 
8728   ins_encode(aarch64_enc_ldarh(dst, mem));
8729 
8730   ins_pipe(pipe_serial);
8731 %}
8732 
8733 // Load Short/Char (16 bit signed) into long
8734 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8735 %{
8736   match(Set dst (ConvI2L (LoadS mem)));
8737 
8738   ins_cost(VOLATILE_REF_COST);
8739   format %{ "ldarh  $dst, $mem\t# short" %}
8740 
8741   ins_encode(aarch64_enc_ldarsh(dst, mem));
8742 
8743   ins_pipe(pipe_serial);
8744 %}
8745 
8746 // Load Integer (32 bit signed)
8747 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8748 %{
8749   match(Set dst (LoadI mem));
8750 
8751   ins_cost(VOLATILE_REF_COST);
8752   format %{ "ldarw  $dst, $mem\t# int" %}
8753 
8754   ins_encode(aarch64_enc_ldarw(dst, mem));
8755 
8756   ins_pipe(pipe_serial);
8757 %}
8758 
8759 // Load Integer (32 bit unsigned) into long
8760 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
8761 %{
8762   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
8763 
8764   ins_cost(VOLATILE_REF_COST);
8765   format %{ "ldarw  $dst, $mem\t# int" %}
8766 
8767   ins_encode(aarch64_enc_ldarw(dst, mem));
8768 
8769   ins_pipe(pipe_serial);
8770 %}
8771 
8772 // Load Long (64 bit signed)
8773 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8774 %{
8775   match(Set dst (LoadL mem));
8776 
8777   ins_cost(VOLATILE_REF_COST);
8778   format %{ "ldar  $dst, $mem\t# int" %}
8779 
8780   ins_encode(aarch64_enc_ldar(dst, mem));
8781 
8782   ins_pipe(pipe_serial);
8783 %}
8784 
8785 // Load Pointer
8786 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
8787 %{
8788   match(Set dst (LoadP mem));
8789 
8790   ins_cost(VOLATILE_REF_COST);
8791   format %{ "ldar  $dst, $mem\t# ptr" %}
8792 
8793   ins_encode(aarch64_enc_ldar(dst, mem));
8794 
8795   ins_pipe(pipe_serial);
8796 %}
8797 
8798 // Load Compressed Pointer
8799 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
8800 %{
8801   match(Set dst (LoadN mem));
8802 
8803   ins_cost(VOLATILE_REF_COST);
8804   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
8805 
8806   ins_encode(aarch64_enc_ldarw(dst, mem));
8807 
8808   ins_pipe(pipe_serial);
8809 %}
8810 
8811 // Load Float
8812 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
8813 %{
8814   match(Set dst (LoadF mem));
8815 
8816   ins_cost(VOLATILE_REF_COST);
8817   format %{ "ldars  $dst, $mem\t# float" %}
8818 
8819   ins_encode( aarch64_enc_fldars(dst, mem) );
8820 
8821   ins_pipe(pipe_serial);
8822 %}
8823 
8824 // Load Double
8825 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
8826 %{
8827   match(Set dst (LoadD mem));
8828 
8829   ins_cost(VOLATILE_REF_COST);
8830   format %{ "ldard  $dst, $mem\t# double" %}
8831 
8832   ins_encode( aarch64_enc_fldard(dst, mem) );
8833 
8834   ins_pipe(pipe_serial);
8835 %}
8836 
8837 // Store Byte
8838 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8839 %{
8840   match(Set mem (StoreB mem src));
8841 
8842   ins_cost(VOLATILE_REF_COST);
8843   format %{ "stlrb  $src, $mem\t# byte" %}
8844 
8845   ins_encode(aarch64_enc_stlrb(src, mem));
8846 
8847   ins_pipe(pipe_class_memory);
8848 %}
8849 
8850 // Store Char/Short
8851 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8852 %{
8853   match(Set mem (StoreC mem src));
8854 
8855   ins_cost(VOLATILE_REF_COST);
8856   format %{ "stlrh  $src, $mem\t# short" %}
8857 
8858   ins_encode(aarch64_enc_stlrh(src, mem));
8859 
8860   ins_pipe(pipe_class_memory);
8861 %}
8862 
8863 // Store Integer
8864 
8865 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8866 %{
8867   match(Set mem(StoreI mem src));
8868 
8869   ins_cost(VOLATILE_REF_COST);
8870   format %{ "stlrw  $src, $mem\t# int" %}
8871 
8872   ins_encode(aarch64_enc_stlrw(src, mem));
8873 
8874   ins_pipe(pipe_class_memory);
8875 %}
8876 
8877 // Store Long (64 bit signed)
8878 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
8879 %{
8880   match(Set mem (StoreL mem src));
8881 
8882   ins_cost(VOLATILE_REF_COST);
8883   format %{ "stlr  $src, $mem\t# int" %}
8884 
8885   ins_encode(aarch64_enc_stlr(src, mem));
8886 
8887   ins_pipe(pipe_class_memory);
8888 %}
8889 
8890 // Store Pointer
8891 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
8892 %{
8893   match(Set mem (StoreP mem src));
8894 
8895   ins_cost(VOLATILE_REF_COST);
8896   format %{ "stlr  $src, $mem\t# ptr" %}
8897 
8898   ins_encode(aarch64_enc_stlr(src, mem));
8899 
8900   ins_pipe(pipe_class_memory);
8901 %}
8902 
8903 // Store Compressed Pointer
8904 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
8905 %{
8906   match(Set mem (StoreN mem src));
8907 
8908   ins_cost(VOLATILE_REF_COST);
8909   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
8910 
8911   ins_encode(aarch64_enc_stlrw(src, mem));
8912 
8913   ins_pipe(pipe_class_memory);
8914 %}
8915 
8916 // Store Float
8917 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
8918 %{
8919   match(Set mem (StoreF mem src));
8920 
8921   ins_cost(VOLATILE_REF_COST);
8922   format %{ "stlrs  $src, $mem\t# float" %}
8923 
8924   ins_encode( aarch64_enc_fstlrs(src, mem) );
8925 
8926   ins_pipe(pipe_class_memory);
8927 %}
8928 
8929 // TODO
8930 // implement storeImmF0 and storeFImmPacked
8931 
8932 // Store Double
8933 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
8934 %{
8935   match(Set mem (StoreD mem src));
8936 
8937   ins_cost(VOLATILE_REF_COST);
8938   format %{ "stlrd  $src, $mem\t# double" %}
8939 
8940   ins_encode( aarch64_enc_fstlrd(src, mem) );
8941 
8942   ins_pipe(pipe_class_memory);
8943 %}
8944 
8945 //  ---------------- end of volatile loads and stores ----------------
8946 
8947 // ============================================================================
8948 // BSWAP Instructions
8949 
8950 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
8951   match(Set dst (ReverseBytesI src));
8952 
8953   ins_cost(INSN_COST);
8954   format %{ "revw  $dst, $src" %}
8955 
8956   ins_encode %{
8957     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
8958   %}
8959 
8960   ins_pipe(ialu_reg);
8961 %}
8962 
8963 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
8964   match(Set dst (ReverseBytesL src));
8965 
8966   ins_cost(INSN_COST);
8967   format %{ "rev  $dst, $src" %}
8968 
8969   ins_encode %{
8970     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
8971   %}
8972 
8973   ins_pipe(ialu_reg);
8974 %}
8975 
8976 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
8977   match(Set dst (ReverseBytesUS src));
8978 
8979   ins_cost(INSN_COST);
8980   format %{ "rev16w  $dst, $src" %}
8981 
8982   ins_encode %{
8983     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8984   %}
8985 
8986   ins_pipe(ialu_reg);
8987 %}
8988 
8989 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
8990   match(Set dst (ReverseBytesS src));
8991 
8992   ins_cost(INSN_COST);
8993   format %{ "rev16w  $dst, $src\n\t"
8994             "sbfmw $dst, $dst, #0, #15" %}
8995 
8996   ins_encode %{
8997     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8998     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
8999   %}
9000 
9001   ins_pipe(ialu_reg);
9002 %}
9003 
9004 // ============================================================================
9005 // Zero Count Instructions
9006 
9007 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
9008   match(Set dst (CountLeadingZerosI src));
9009 
9010   ins_cost(INSN_COST);
9011   format %{ "clzw  $dst, $src" %}
9012   ins_encode %{
9013     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
9014   %}
9015 
9016   ins_pipe(ialu_reg);
9017 %}
9018 
9019 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
9020   match(Set dst (CountLeadingZerosL src));
9021 
9022   ins_cost(INSN_COST);
9023   format %{ "clz   $dst, $src" %}
9024   ins_encode %{
9025     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
9026   %}
9027 
9028   ins_pipe(ialu_reg);
9029 %}
9030 
9031 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
9032   match(Set dst (CountTrailingZerosI src));
9033 
9034   ins_cost(INSN_COST * 2);
9035   format %{ "rbitw  $dst, $src\n\t"
9036             "clzw   $dst, $dst" %}
9037   ins_encode %{
9038     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
9039     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
9040   %}
9041 
9042   ins_pipe(ialu_reg);
9043 %}
9044 
9045 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
9046   match(Set dst (CountTrailingZerosL src));
9047 
9048   ins_cost(INSN_COST * 2);
9049   format %{ "rbit   $dst, $src\n\t"
9050             "clz    $dst, $dst" %}
9051   ins_encode %{
9052     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
9053     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
9054   %}
9055 
9056   ins_pipe(ialu_reg);
9057 %}
9058 
9059 //---------- Population Count Instructions -------------------------------------
9060 //
9061 
9062 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
9063   predicate(UsePopCountInstruction);
9064   match(Set dst (PopCountI src));
9065   effect(TEMP tmp);
9066   ins_cost(INSN_COST * 13);
9067 
9068   format %{ "movw   $src, $src\n\t"
9069             "mov    $tmp, $src\t# vector (1D)\n\t"
9070             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
9071             "addv   $tmp, $tmp\t# vector (8B)\n\t"
9072             "mov    $dst, $tmp\t# vector (1D)" %}
9073   ins_encode %{
9074     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
9075     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
9076     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9077     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9078     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
9079   %}
9080 
9081   ins_pipe(pipe_class_default);
9082 %}
9083 
9084 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
9085   predicate(UsePopCountInstruction);
9086   match(Set dst (PopCountI (LoadI mem)));
9087   effect(TEMP tmp);
9088   ins_cost(INSN_COST * 13);
9089 
9090   format %{ "ldrs   $tmp, $mem\n\t"
9091             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
9092             "addv   $tmp, $tmp\t# vector (8B)\n\t"
9093             "mov    $dst, $tmp\t# vector (1D)" %}
9094   ins_encode %{
9095     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
9096     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
9097                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
9098     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9099     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9100     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
9101   %}
9102 
9103   ins_pipe(pipe_class_default);
9104 %}
9105 
9106 // Note: Long.bitCount(long) returns an int.
9107 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
9108   predicate(UsePopCountInstruction);
9109   match(Set dst (PopCountL src));
9110   effect(TEMP tmp);
9111   ins_cost(INSN_COST * 13);
9112 
9113   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
9114             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
9115             "addv   $tmp, $tmp\t# vector (8B)\n\t"
9116             "mov    $dst, $tmp\t# vector (1D)" %}
9117   ins_encode %{
9118     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
9119     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9120     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9121     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
9122   %}
9123 
9124   ins_pipe(pipe_class_default);
9125 %}
9126 
9127 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
9128   predicate(UsePopCountInstruction);
9129   match(Set dst (PopCountL (LoadL mem)));
9130   effect(TEMP tmp);
9131   ins_cost(INSN_COST * 13);
9132 
9133   format %{ "ldrd   $tmp, $mem\n\t"
9134             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
9135             "addv   $tmp, $tmp\t# vector (8B)\n\t"
9136             "mov    $dst, $tmp\t# vector (1D)" %}
9137   ins_encode %{
9138     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
9139     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
9140                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
9141     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9142     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9143     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
9144   %}
9145 
9146   ins_pipe(pipe_class_default);
9147 %}
9148 
9149 // ============================================================================
9150 // MemBar Instruction
9151 
9152 instruct load_fence() %{
9153   match(LoadFence);
9154   ins_cost(VOLATILE_REF_COST);
9155 
9156   format %{ "load_fence" %}
9157 
9158   ins_encode %{
9159     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
9160   %}
9161   ins_pipe(pipe_serial);
9162 %}
9163 
9164 instruct unnecessary_membar_acquire() %{
9165   predicate(unnecessary_acquire(n));
9166   match(MemBarAcquire);
9167   ins_cost(0);
9168 
9169   format %{ "membar_acquire (elided)" %}
9170 
9171   ins_encode %{
9172     __ block_comment("membar_acquire (elided)");
9173   %}
9174 
9175   ins_pipe(pipe_class_empty);
9176 %}
9177 
9178 instruct membar_acquire() %{
9179   match(MemBarAcquire);
9180   ins_cost(VOLATILE_REF_COST);
9181 
9182   format %{ "membar_acquire" %}
9183 
9184   ins_encode %{
9185     __ block_comment("membar_acquire");
9186     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
9187   %}
9188 
9189   ins_pipe(pipe_serial);
9190 %}
9191 
9192 
9193 instruct membar_acquire_lock() %{
9194   match(MemBarAcquireLock);
9195   ins_cost(VOLATILE_REF_COST);
9196 
9197   format %{ "membar_acquire_lock (elided)" %}
9198 
9199   ins_encode %{
9200     __ block_comment("membar_acquire_lock (elided)");
9201   %}
9202 
9203   ins_pipe(pipe_serial);
9204 %}
9205 
9206 instruct store_fence() %{
9207   match(StoreFence);
9208   ins_cost(VOLATILE_REF_COST);
9209 
9210   format %{ "store_fence" %}
9211 
9212   ins_encode %{
9213     __ membar(Assembler::LoadStore|Assembler::StoreStore);
9214   %}
9215   ins_pipe(pipe_serial);
9216 %}
9217 
9218 instruct unnecessary_membar_release() %{
9219   predicate(unnecessary_release(n));
9220   match(MemBarRelease);
9221   ins_cost(0);
9222 
9223   format %{ "membar_release (elided)" %}
9224 
9225   ins_encode %{
9226     __ block_comment("membar_release (elided)");
9227   %}
9228   ins_pipe(pipe_serial);
9229 %}
9230 
9231 instruct membar_release() %{
9232   match(MemBarRelease);
9233   ins_cost(VOLATILE_REF_COST);
9234 
9235   format %{ "membar_release" %}
9236 
9237   ins_encode %{
9238     __ block_comment("membar_release");
9239     __ membar(Assembler::LoadStore|Assembler::StoreStore);
9240   %}
9241   ins_pipe(pipe_serial);
9242 %}
9243 
9244 instruct membar_storestore() %{
9245   match(MemBarStoreStore);
9246   ins_cost(VOLATILE_REF_COST);
9247 
9248   format %{ "MEMBAR-store-store" %}
9249 
9250   ins_encode %{
9251     __ membar(Assembler::StoreStore);
9252   %}
9253   ins_pipe(pipe_serial);
9254 %}
9255 
9256 instruct membar_release_lock() %{
9257   match(MemBarReleaseLock);
9258   ins_cost(VOLATILE_REF_COST);
9259 
9260   format %{ "membar_release_lock (elided)" %}
9261 
9262   ins_encode %{
9263     __ block_comment("membar_release_lock (elided)");
9264   %}
9265 
9266   ins_pipe(pipe_serial);
9267 %}
9268 
9269 instruct unnecessary_membar_volatile() %{
9270   predicate(unnecessary_volatile(n));
9271   match(MemBarVolatile);
9272   ins_cost(0);
9273 
9274   format %{ "membar_volatile (elided)" %}
9275 
9276   ins_encode %{
9277     __ block_comment("membar_volatile (elided)");
9278   %}
9279 
9280   ins_pipe(pipe_serial);
9281 %}
9282 
9283 instruct membar_volatile() %{
9284   match(MemBarVolatile);
9285   ins_cost(VOLATILE_REF_COST*100);
9286 
9287   format %{ "membar_volatile" %}
9288 
9289   ins_encode %{
9290     __ block_comment("membar_volatile");
9291     __ membar(Assembler::StoreLoad);
9292     %}
9293 
9294   ins_pipe(pipe_serial);
9295 %}
9296 
9297 // ============================================================================
9298 // Cast/Convert Instructions
9299 
9300 instruct castX2P(iRegPNoSp dst, iRegL src) %{
9301   match(Set dst (CastX2P src));
9302 
9303   ins_cost(INSN_COST);
9304   format %{ "mov $dst, $src\t# long -> ptr" %}
9305 
9306   ins_encode %{
9307     if ($dst$$reg != $src$$reg) {
9308       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
9309     }
9310   %}
9311 
9312   ins_pipe(ialu_reg);
9313 %}
9314 
9315 instruct castP2X(iRegLNoSp dst, iRegP src) %{
9316   match(Set dst (CastP2X src));
9317 
9318   ins_cost(INSN_COST);
9319   format %{ "mov $dst, $src\t# ptr -> long" %}
9320 
9321   ins_encode %{
9322     if ($dst$$reg != $src$$reg) {
9323       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
9324     }
9325   %}
9326 
9327   ins_pipe(ialu_reg);
9328 %}
9329 
9330 // Convert oop into int for vectors alignment masking
9331 instruct convP2I(iRegINoSp dst, iRegP src) %{
9332   match(Set dst (ConvL2I (CastP2X src)));
9333 
9334   ins_cost(INSN_COST);
9335   format %{ "movw $dst, $src\t# ptr -> int" %}
9336   ins_encode %{
9337     __ movw($dst$$Register, $src$$Register);
9338   %}
9339 
9340   ins_pipe(ialu_reg);
9341 %}
9342 
9343 // Convert compressed oop into int for vectors alignment masking
9344 // in case of 32bit oops (heap < 4Gb).
9345 instruct convN2I(iRegINoSp dst, iRegN src)
9346 %{
9347   predicate(Universe::narrow_oop_shift() == 0);
9348   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
9349 
9350   ins_cost(INSN_COST);
9351   format %{ "mov dst, $src\t# compressed ptr -> int" %}
9352   ins_encode %{
9353     __ movw($dst$$Register, $src$$Register);
9354   %}
9355 
9356   ins_pipe(ialu_reg);
9357 %}
9358 
9359 instruct shenandoahRB(iRegPNoSp dst, iRegP src, rFlagsReg cr) %{
9360   match(Set dst (ShenandoahReadBarrier src));
9361   format %{ "shenandoah_rb $dst,$src" %}
9362   ins_encode %{
9363     Register s = $src$$Register;
9364     Register d = $dst$$Register;
9365     __ ldr(d, Address(s, BrooksPointer::byte_offset()));
9366   %}
9367   ins_pipe(pipe_class_memory);
9368 %}
9369 
9370 instruct shenandoahWB(iRegP_R0 dst, iRegP src, rFlagsReg cr) %{
9371   match(Set dst (ShenandoahWriteBarrier src));
9372   effect(KILL cr);
9373 
9374   format %{ "shenandoah_wb $dst,$src" %}
9375   ins_encode %{
9376     Label done;
9377     Register s = $src$$Register;
9378     Register d = $dst$$Register;
9379     assert(d == r0, "result in r0");
9380     __ block_comment("Shenandoah write barrier {");
9381     // We need that first read barrier in order to trigger a SEGV/NPE on incoming NULL.
9382     // Also, it brings s into d in preparation for the call to shenandoah_write_barrier().
9383     __ ldr(d, Address(s, BrooksPointer::byte_offset()));
9384     __ shenandoah_write_barrier(d);
9385     __ block_comment("} Shenandoah write barrier");
9386   %}
9387   ins_pipe(pipe_slow);
9388 %}
9389 
9390 // Convert oop pointer into compressed form
9391 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
9392   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
9393   match(Set dst (EncodeP src));
9394   effect(KILL cr);
9395   ins_cost(INSN_COST * 3);
9396   format %{ "encode_heap_oop $dst, $src" %}
9397   ins_encode %{
9398     Register s = $src$$Register;
9399     Register d = $dst$$Register;
9400     __ encode_heap_oop(d, s);
9401   %}
9402   ins_pipe(ialu_reg);
9403 %}
9404 
9405 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
9406   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
9407   match(Set dst (EncodeP src));
9408   ins_cost(INSN_COST * 3);
9409   format %{ "encode_heap_oop_not_null $dst, $src" %}
9410   ins_encode %{
9411     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
9412   %}
9413   ins_pipe(ialu_reg);
9414 %}
9415 
9416 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
9417   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
9418             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
9419   match(Set dst (DecodeN src));
9420   ins_cost(INSN_COST * 3);
9421   format %{ "decode_heap_oop $dst, $src" %}
9422   ins_encode %{
9423     Register s = $src$$Register;
9424     Register d = $dst$$Register;
9425     __ decode_heap_oop(d, s);
9426   %}
9427   ins_pipe(ialu_reg);
9428 %}
9429 
9430 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
9431   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
9432             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
9433   match(Set dst (DecodeN src));
9434   ins_cost(INSN_COST * 3);
9435   format %{ "decode_heap_oop_not_null $dst, $src" %}
9436   ins_encode %{
9437     Register s = $src$$Register;
9438     Register d = $dst$$Register;
9439     __ decode_heap_oop_not_null(d, s);
9440   %}
9441   ins_pipe(ialu_reg);
9442 %}
9443 
9444 // n.b. AArch64 implementations of encode_klass_not_null and
9445 // decode_klass_not_null do not modify the flags register so, unlike
9446 // Intel, we don't kill CR as a side effect here
9447 
9448 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
9449   match(Set dst (EncodePKlass src));
9450 
9451   ins_cost(INSN_COST * 3);
9452   format %{ "encode_klass_not_null $dst,$src" %}
9453 
9454   ins_encode %{
9455     Register src_reg = as_Register($src$$reg);
9456     Register dst_reg = as_Register($dst$$reg);
9457     __ encode_klass_not_null(dst_reg, src_reg);
9458   %}
9459 
9460    ins_pipe(ialu_reg);
9461 %}
9462 
9463 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
9464   match(Set dst (DecodeNKlass src));
9465 
9466   ins_cost(INSN_COST * 3);
9467   format %{ "decode_klass_not_null $dst,$src" %}
9468 
9469   ins_encode %{
9470     Register src_reg = as_Register($src$$reg);
9471     Register dst_reg = as_Register($dst$$reg);
9472     if (dst_reg != src_reg) {
9473       __ decode_klass_not_null(dst_reg, src_reg);
9474     } else {
9475       __ decode_klass_not_null(dst_reg);
9476     }
9477   %}
9478 
9479    ins_pipe(ialu_reg);
9480 %}
9481 
9482 instruct checkCastPP(iRegPNoSp dst)
9483 %{
9484   match(Set dst (CheckCastPP dst));
9485 
9486   size(0);
9487   format %{ "# checkcastPP of $dst" %}
9488   ins_encode(/* empty encoding */);
9489   ins_pipe(pipe_class_empty);
9490 %}
9491 
9492 instruct castPP(iRegPNoSp dst)
9493 %{
9494   match(Set dst (CastPP dst));
9495 
9496   size(0);
9497   format %{ "# castPP of $dst" %}
9498   ins_encode(/* empty encoding */);
9499   ins_pipe(pipe_class_empty);
9500 %}
9501 
9502 instruct castII(iRegI dst)
9503 %{
9504   match(Set dst (CastII dst));
9505 
9506   size(0);
9507   format %{ "# castII of $dst" %}
9508   ins_encode(/* empty encoding */);
9509   ins_cost(0);
9510   ins_pipe(pipe_class_empty);
9511 %}
9512 
9513 // ============================================================================
9514 // Atomic operation instructions
9515 //
9516 // Intel and SPARC both implement Ideal Node LoadPLocked and
9517 // Store{PIL}Conditional instructions using a normal load for the
9518 // LoadPLocked and a CAS for the Store{PIL}Conditional.
9519 //
9520 // The ideal code appears only to use LoadPLocked/StorePLocked as a
9521 // pair to lock object allocations from Eden space when not using
9522 // TLABs.
9523 //
9524 // There does not appear to be a Load{IL}Locked Ideal Node and the
9525 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
9526 // and to use StoreIConditional only for 32-bit and StoreLConditional
9527 // only for 64-bit.
9528 //
9529 // We implement LoadPLocked and StorePLocked instructions using,
9530 // respectively the AArch64 hw load-exclusive and store-conditional
9531 // instructions. Whereas we must implement each of
9532 // Store{IL}Conditional using a CAS which employs a pair of
9533 // instructions comprising a load-exclusive followed by a
9534 // store-conditional.
9535 
9536 
9537 // Locked-load (linked load) of the current heap-top
9538 // used when updating the eden heap top
9539 // implemented using ldaxr on AArch64
9540 
9541 instruct loadPLocked(iRegPNoSp dst, indirect mem)
9542 %{
9543   match(Set dst (LoadPLocked mem));
9544 
9545   ins_cost(VOLATILE_REF_COST);
9546 
9547   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
9548 
9549   ins_encode(aarch64_enc_ldaxr(dst, mem));
9550 
9551   ins_pipe(pipe_serial);
9552 %}
9553 
9554 // Conditional-store of the updated heap-top.
9555 // Used during allocation of the shared heap.
9556 // Sets flag (EQ) on success.
9557 // implemented using stlxr on AArch64.
9558 
9559 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr) 
9560 %{
9561   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
9562 
9563   ins_cost(VOLATILE_REF_COST);
9564 
9565  // TODO
9566  // do we need to do a store-conditional release or can we just use a
9567  // plain store-conditional?
9568 
9569   format %{
9570     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
9571     "cmpw rscratch1, zr\t# EQ on successful write"
9572   %}
9573 
9574   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
9575 
9576   ins_pipe(pipe_serial);
9577 %}
9578 
9579 
9580 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
9581 // when attempting to rebias a lock towards the current thread.  We
9582 // must use the acquire form of cmpxchg in order to guarantee acquire
9583 // semantics in this case.
9584 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) 
9585 %{
9586   match(Set cr (StoreLConditional mem (Binary oldval newval)));
9587 
9588   ins_cost(VOLATILE_REF_COST);
9589 
9590   format %{
9591     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9592     "cmpw rscratch1, zr\t# EQ on successful write"
9593   %}
9594 
9595   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
9596 
9597   ins_pipe(pipe_slow);
9598 %}
9599 
9600 // storeIConditional also has acquire semantics, for no better reason
9601 // than matching storeLConditional.  At the time of writing this
9602 // comment storeIConditional was not used anywhere by AArch64.
9603 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) 
9604 %{
9605   match(Set cr (StoreIConditional mem (Binary oldval newval)));
9606 
9607   ins_cost(VOLATILE_REF_COST);
9608 
9609   format %{
9610     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9611     "cmpw rscratch1, zr\t# EQ on successful write"
9612   %}
9613 
9614   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
9615 
9616   ins_pipe(pipe_slow);
9617 %}
9618 
9619 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
9620 // can't match them
9621 
9622 // standard CompareAndSwapX when we are using barriers
9623 // these have higher priority than the rules selected by a predicate
9624 
9625 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9626 
9627   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9628   ins_cost(2 * VOLATILE_REF_COST);
9629 
9630   effect(KILL cr);
9631 
9632  format %{
9633     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9634     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9635  %}
9636 
9637  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9638             aarch64_enc_cset_eq(res));
9639 
9640   ins_pipe(pipe_slow);
9641 %}
9642 
9643 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9644 
9645   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9646   ins_cost(2 * VOLATILE_REF_COST);
9647 
9648   effect(KILL cr);
9649 
9650  format %{
9651     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9652     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9653  %}
9654 
9655  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9656             aarch64_enc_cset_eq(res));
9657 
9658   ins_pipe(pipe_slow);
9659 %}
9660 
9661 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9662 
9663   predicate(!UseShenandoahGC || !ShenandoahCASBarrier || n->in(3)->in(1)->bottom_type() == TypePtr::NULL_PTR);
9664   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9665   ins_cost(2 * VOLATILE_REF_COST);
9666 
9667   effect(KILL cr);
9668 
9669  format %{
9670     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9671     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9672  %}
9673 
9674  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9675             aarch64_enc_cset_eq(res));
9676 
9677   ins_pipe(pipe_slow);
9678 %}
9679 
9680 instruct compareAndSwapP_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
9681 
9682   predicate(UseShenandoahGC && ShenandoahCASBarrier);
9683   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9684   ins_cost(3 * VOLATILE_REF_COST);
9685 
9686   effect(TEMP tmp, KILL cr);
9687 
9688   format %{
9689     "cmpxchg_oop_shenandoah $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp"
9690     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9691   %}
9692 
9693   ins_encode(aarch64_enc_cmpxchg_oop_shenandoah(mem, oldval, newval, tmp),
9694              aarch64_enc_cset_eq(res));
9695 
9696   ins_pipe(pipe_slow);
9697 %}
9698 
9699 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9700 
9701   predicate(!UseShenandoahGC || !ShenandoahCASBarrier);
9702   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9703   ins_cost(2 * VOLATILE_REF_COST);
9704 
9705   effect(KILL cr);
9706 
9707  format %{
9708     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9709     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9710  %}
9711 
9712  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9713             aarch64_enc_cset_eq(res));
9714 
9715   ins_pipe(pipe_slow);
9716 %}
9717 
9718 instruct compareAndSwapN_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
9719 
9720   predicate(UseShenandoahGC && ShenandoahCASBarrier);
9721   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9722   ins_cost(3 * VOLATILE_REF_COST);
9723 
9724   effect(TEMP tmp, KILL cr);
9725 
9726   format %{
9727     "cmpxchg_narrow_oop_shenandoah $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp"
9728     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9729   %}
9730 
9731   ins_encode %{
9732     Register tmp = $tmp$$Register;
9733     __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
9734     __ cmpxchg_oop_shenandoah($mem$$Register, tmp, $newval$$Register, Assembler::word, /*acquire*/ false, /*release*/ true, /*weak*/ false);
9735     __ cset($res$$Register, Assembler::EQ);
9736   %}
9737 
9738   ins_pipe(pipe_slow);
9739 %}
9740 
9741 // alternative CompareAndSwapX when we are eliding barriers
9742 
9743 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9744 
9745   predicate(needs_acquiring_load_exclusive(n));
9746   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9747   ins_cost(VOLATILE_REF_COST);
9748 
9749   effect(KILL cr);
9750 
9751  format %{
9752     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9753     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9754  %}
9755 
9756  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9757             aarch64_enc_cset_eq(res));
9758 
9759   ins_pipe(pipe_slow);
9760 %}
9761 
9762 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9763 
9764   predicate(needs_acquiring_load_exclusive(n));
9765   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9766   ins_cost(VOLATILE_REF_COST);
9767 
9768   effect(KILL cr);
9769 
9770  format %{
9771     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9772     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9773  %}
9774 
9775  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9776             aarch64_enc_cset_eq(res));
9777 
9778   ins_pipe(pipe_slow);
9779 %}
9780 
9781 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9782 
9783   predicate(needs_acquiring_load_exclusive(n) && (!UseShenandoahGC || !ShenandoahCASBarrier || n->in(3)->in(1)->bottom_type() == TypePtr::NULL_PTR));
9784   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9785   ins_cost(VOLATILE_REF_COST);
9786 
9787   effect(KILL cr);
9788 
9789  format %{
9790     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9791     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9792  %}
9793 
9794  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9795             aarch64_enc_cset_eq(res));
9796 
9797   ins_pipe(pipe_slow);
9798 %}
9799 
9800 instruct compareAndSwapPAcq_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
9801 
9802   predicate(needs_acquiring_load_exclusive(n) && UseShenandoahGC && ShenandoahCASBarrier);
9803   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9804   ins_cost(2 * VOLATILE_REF_COST);
9805 
9806   effect(TEMP tmp, KILL cr);
9807 
9808   format %{
9809     "cmpxchg_acq_oop_shenandoah $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp"
9810     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9811   %}
9812 
9813   ins_encode(aarch64_enc_cmpxchg_acq_oop_shenandoah(mem, oldval, newval, tmp),
9814              aarch64_enc_cset_eq(res));
9815 
9816   ins_pipe(pipe_slow);
9817 %}
9818 
9819 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9820 
9821   predicate(needs_acquiring_load_exclusive(n) && (!UseShenandoahGC || !ShenandoahCASBarrier));
9822   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9823   ins_cost(VOLATILE_REF_COST);
9824 
9825   effect(KILL cr);
9826 
9827  format %{
9828     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9829     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9830  %}
9831 
9832  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9833             aarch64_enc_cset_eq(res));
9834 
9835   ins_pipe(pipe_slow);
9836 %}
9837 
9838 instruct compareAndSwapNAcq_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
9839 
9840   predicate(needs_acquiring_load_exclusive(n) && UseShenandoahGC && ShenandoahCASBarrier);
9841   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9842   ins_cost(3 * VOLATILE_REF_COST);
9843 
9844   effect(TEMP tmp, KILL cr);
9845 
9846  format %{
9847     "cmpxchg_narrow_oop_shenandoah $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp"
9848     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9849  %}
9850 
9851   ins_encode %{
9852     Register tmp = $tmp$$Register;
9853     __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
9854     __ cmpxchg_oop_shenandoah($mem$$Register, tmp, $newval$$Register, Assembler::word, /*acquire*/ true, /*release*/ true, /*weak*/ false);
9855     __ cset($res$$Register, Assembler::EQ);
9856   %}
9857 
9858   ins_pipe(pipe_slow);
9859 %}
9860 
9861 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
9862   match(Set prev (GetAndSetI mem newv));
9863   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
9864   ins_encode %{
9865     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9866   %}
9867   ins_pipe(pipe_serial);
9868 %}
9869 
9870 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
9871   match(Set prev (GetAndSetL mem newv));
9872   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9873   ins_encode %{
9874     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9875   %}
9876   ins_pipe(pipe_serial);
9877 %}
9878 
9879 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
9880   match(Set prev (GetAndSetN mem newv));
9881   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
9882   ins_encode %{
9883     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9884   %}
9885   ins_pipe(pipe_serial);
9886 %}
9887 
9888 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
9889   match(Set prev (GetAndSetP mem newv));
9890   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9891   ins_encode %{
9892     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9893   %}
9894   ins_pipe(pipe_serial);
9895 %}
9896 
9897 
9898 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
9899   match(Set newval (GetAndAddL mem incr));
9900   ins_cost(INSN_COST * 10);
9901   format %{ "get_and_addL $newval, [$mem], $incr" %}
9902   ins_encode %{
9903     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
9904   %}
9905   ins_pipe(pipe_serial);
9906 %}
9907 
9908 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
9909   predicate(n->as_LoadStore()->result_not_used());
9910   match(Set dummy (GetAndAddL mem incr));
9911   ins_cost(INSN_COST * 9);
9912   format %{ "get_and_addL [$mem], $incr" %}
9913   ins_encode %{
9914     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
9915   %}
9916   ins_pipe(pipe_serial);
9917 %}
9918 
9919 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9920   match(Set newval (GetAndAddL mem incr));
9921   ins_cost(INSN_COST * 10);
9922   format %{ "get_and_addL $newval, [$mem], $incr" %}
9923   ins_encode %{
9924     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
9925   %}
9926   ins_pipe(pipe_serial);
9927 %}
9928 
9929 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
9930   predicate(n->as_LoadStore()->result_not_used());
9931   match(Set dummy (GetAndAddL mem incr));
9932   ins_cost(INSN_COST * 9);
9933   format %{ "get_and_addL [$mem], $incr" %}
9934   ins_encode %{
9935     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
9936   %}
9937   ins_pipe(pipe_serial);
9938 %}
9939 
9940 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9941   match(Set newval (GetAndAddI mem incr));
9942   ins_cost(INSN_COST * 10);
9943   format %{ "get_and_addI $newval, [$mem], $incr" %}
9944   ins_encode %{
9945     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9946   %}
9947   ins_pipe(pipe_serial);
9948 %}
9949 
9950 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
9951   predicate(n->as_LoadStore()->result_not_used());
9952   match(Set dummy (GetAndAddI mem incr));
9953   ins_cost(INSN_COST * 9);
9954   format %{ "get_and_addI [$mem], $incr" %}
9955   ins_encode %{
9956     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
9957   %}
9958   ins_pipe(pipe_serial);
9959 %}
9960 
9961 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9962   match(Set newval (GetAndAddI mem incr));
9963   ins_cost(INSN_COST * 10);
9964   format %{ "get_and_addI $newval, [$mem], $incr" %}
9965   ins_encode %{
9966     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9967   %}
9968   ins_pipe(pipe_serial);
9969 %}
9970 
9971 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
9972   predicate(n->as_LoadStore()->result_not_used());
9973   match(Set dummy (GetAndAddI mem incr));
9974   ins_cost(INSN_COST * 9);
9975   format %{ "get_and_addI [$mem], $incr" %}
9976   ins_encode %{
9977     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
9978   %}
9979   ins_pipe(pipe_serial);
9980 %}
9981 
9982 // ============================================================================
9983 // Conditional Move Instructions
9984 
9985 // n.b. we have identical rules for both a signed compare op (cmpOp)
9986 // and an unsigned compare op (cmpOpU). it would be nice if we could
9987 // define an op class which merged both inputs and use it to type the
9988 // argument to a single rule. unfortunatelyt his fails because the
9989 // opclass does not live up to the COND_INTER interface of its
9990 // component operands. When the generic code tries to negate the
9991 // operand it ends up running the generci Machoper::negate method
9992 // which throws a ShouldNotHappen. So, we have to provide two flavours
9993 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
9994 
9995 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9996   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9997 
9998   ins_cost(INSN_COST * 2);
9999   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
10000 
10001   ins_encode %{
10002     __ cselw(as_Register($dst$$reg),
10003              as_Register($src2$$reg),
10004              as_Register($src1$$reg),
10005              (Assembler::Condition)$cmp$$cmpcode);
10006   %}
10007 
10008   ins_pipe(icond_reg_reg);
10009 %}
10010 
10011 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10012   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
10013 
10014   ins_cost(INSN_COST * 2);
10015   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
10016 
10017   ins_encode %{
10018     __ cselw(as_Register($dst$$reg),
10019              as_Register($src2$$reg),
10020              as_Register($src1$$reg),
10021              (Assembler::Condition)$cmp$$cmpcode);
10022   %}
10023 
10024   ins_pipe(icond_reg_reg);
10025 %}
10026 
10027 // special cases where one arg is zero
10028 
10029 // n.b. this is selected in preference to the rule above because it
10030 // avoids loading constant 0 into a source register
10031 
10032 // TODO
10033 // we ought only to be able to cull one of these variants as the ideal
10034 // transforms ought always to order the zero consistently (to left/right?)
10035 
10036 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
10037   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
10038 
10039   ins_cost(INSN_COST * 2);
10040   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
10041 
10042   ins_encode %{
10043     __ cselw(as_Register($dst$$reg),
10044              as_Register($src$$reg),
10045              zr,
10046              (Assembler::Condition)$cmp$$cmpcode);
10047   %}
10048 
10049   ins_pipe(icond_reg);
10050 %}
10051 
10052 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
10053   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
10054 
10055   ins_cost(INSN_COST * 2);
10056   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
10057 
10058   ins_encode %{
10059     __ cselw(as_Register($dst$$reg),
10060              as_Register($src$$reg),
10061              zr,
10062              (Assembler::Condition)$cmp$$cmpcode);
10063   %}
10064 
10065   ins_pipe(icond_reg);
10066 %}
10067 
10068 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
10069   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
10070 
10071   ins_cost(INSN_COST * 2);
10072   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
10073 
10074   ins_encode %{
10075     __ cselw(as_Register($dst$$reg),
10076              zr,
10077              as_Register($src$$reg),
10078              (Assembler::Condition)$cmp$$cmpcode);
10079   %}
10080 
10081   ins_pipe(icond_reg);
10082 %}
10083 
10084 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
10085   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
10086 
10087   ins_cost(INSN_COST * 2);
10088   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
10089 
10090   ins_encode %{
10091     __ cselw(as_Register($dst$$reg),
10092              zr,
10093              as_Register($src$$reg),
10094              (Assembler::Condition)$cmp$$cmpcode);
10095   %}
10096 
10097   ins_pipe(icond_reg);
10098 %}
10099 
10100 // special case for creating a boolean 0 or 1
10101 
10102 // n.b. this is selected in preference to the rule above because it
10103 // avoids loading constants 0 and 1 into a source register
10104 
10105 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
10106   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
10107 
10108   ins_cost(INSN_COST * 2);
10109   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
10110 
10111   ins_encode %{
10112     // equivalently
10113     // cset(as_Register($dst$$reg),
10114     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
10115     __ csincw(as_Register($dst$$reg),
10116              zr,
10117              zr,
10118              (Assembler::Condition)$cmp$$cmpcode);
10119   %}
10120 
10121   ins_pipe(icond_none);
10122 %}
10123 
10124 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
10125   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
10126 
10127   ins_cost(INSN_COST * 2);
10128   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
10129 
10130   ins_encode %{
10131     // equivalently
10132     // cset(as_Register($dst$$reg),
10133     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
10134     __ csincw(as_Register($dst$$reg),
10135              zr,
10136              zr,
10137              (Assembler::Condition)$cmp$$cmpcode);
10138   %}
10139 
10140   ins_pipe(icond_none);
10141 %}
10142 
10143 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
10144   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
10145 
10146   ins_cost(INSN_COST * 2);
10147   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
10148 
10149   ins_encode %{
10150     __ csel(as_Register($dst$$reg),
10151             as_Register($src2$$reg),
10152             as_Register($src1$$reg),
10153             (Assembler::Condition)$cmp$$cmpcode);
10154   %}
10155 
10156   ins_pipe(icond_reg_reg);
10157 %}
10158 
10159 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
10160   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
10161 
10162   ins_cost(INSN_COST * 2);
10163   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
10164 
10165   ins_encode %{
10166     __ csel(as_Register($dst$$reg),
10167             as_Register($src2$$reg),
10168             as_Register($src1$$reg),
10169             (Assembler::Condition)$cmp$$cmpcode);
10170   %}
10171 
10172   ins_pipe(icond_reg_reg);
10173 %}
10174 
10175 // special cases where one arg is zero
10176 
10177 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
10178   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
10179 
10180   ins_cost(INSN_COST * 2);
10181   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
10182 
10183   ins_encode %{
10184     __ csel(as_Register($dst$$reg),
10185             zr,
10186             as_Register($src$$reg),
10187             (Assembler::Condition)$cmp$$cmpcode);
10188   %}
10189 
10190   ins_pipe(icond_reg);
10191 %}
10192 
10193 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
10194   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
10195 
10196   ins_cost(INSN_COST * 2);
10197   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
10198 
10199   ins_encode %{
10200     __ csel(as_Register($dst$$reg),
10201             zr,
10202             as_Register($src$$reg),
10203             (Assembler::Condition)$cmp$$cmpcode);
10204   %}
10205 
10206   ins_pipe(icond_reg);
10207 %}
10208 
10209 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
10210   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
10211 
10212   ins_cost(INSN_COST * 2);
10213   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
10214 
10215   ins_encode %{
10216     __ csel(as_Register($dst$$reg),
10217             as_Register($src$$reg),
10218             zr,
10219             (Assembler::Condition)$cmp$$cmpcode);
10220   %}
10221 
10222   ins_pipe(icond_reg);
10223 %}
10224 
10225 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
10226   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
10227 
10228   ins_cost(INSN_COST * 2);
10229   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
10230 
10231   ins_encode %{
10232     __ csel(as_Register($dst$$reg),
10233             as_Register($src$$reg),
10234             zr,
10235             (Assembler::Condition)$cmp$$cmpcode);
10236   %}
10237 
10238   ins_pipe(icond_reg);
10239 %}
10240 
10241 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10242   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10243 
10244   ins_cost(INSN_COST * 2);
10245   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
10246 
10247   ins_encode %{
10248     __ csel(as_Register($dst$$reg),
10249             as_Register($src2$$reg),
10250             as_Register($src1$$reg),
10251             (Assembler::Condition)$cmp$$cmpcode);
10252   %}
10253 
10254   ins_pipe(icond_reg_reg);
10255 %}
10256 
10257 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10258   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10259 
10260   ins_cost(INSN_COST * 2);
10261   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
10262 
10263   ins_encode %{
10264     __ csel(as_Register($dst$$reg),
10265             as_Register($src2$$reg),
10266             as_Register($src1$$reg),
10267             (Assembler::Condition)$cmp$$cmpcode);
10268   %}
10269 
10270   ins_pipe(icond_reg_reg);
10271 %}
10272 
10273 // special cases where one arg is zero
10274 
10275 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10276   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10277 
10278   ins_cost(INSN_COST * 2);
10279   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
10280 
10281   ins_encode %{
10282     __ csel(as_Register($dst$$reg),
10283             zr,
10284             as_Register($src$$reg),
10285             (Assembler::Condition)$cmp$$cmpcode);
10286   %}
10287 
10288   ins_pipe(icond_reg);
10289 %}
10290 
10291 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10292   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10293 
10294   ins_cost(INSN_COST * 2);
10295   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
10296 
10297   ins_encode %{
10298     __ csel(as_Register($dst$$reg),
10299             zr,
10300             as_Register($src$$reg),
10301             (Assembler::Condition)$cmp$$cmpcode);
10302   %}
10303 
10304   ins_pipe(icond_reg);
10305 %}
10306 
10307 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10308   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10309 
10310   ins_cost(INSN_COST * 2);
10311   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
10312 
10313   ins_encode %{
10314     __ csel(as_Register($dst$$reg),
10315             as_Register($src$$reg),
10316             zr,
10317             (Assembler::Condition)$cmp$$cmpcode);
10318   %}
10319 
10320   ins_pipe(icond_reg);
10321 %}
10322 
10323 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10324   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10325 
10326   ins_cost(INSN_COST * 2);
10327   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
10328 
10329   ins_encode %{
10330     __ csel(as_Register($dst$$reg),
10331             as_Register($src$$reg),
10332             zr,
10333             (Assembler::Condition)$cmp$$cmpcode);
10334   %}
10335 
10336   ins_pipe(icond_reg);
10337 %}
10338 
10339 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10340   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10341 
10342   ins_cost(INSN_COST * 2);
10343   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10344 
10345   ins_encode %{
10346     __ cselw(as_Register($dst$$reg),
10347              as_Register($src2$$reg),
10348              as_Register($src1$$reg),
10349              (Assembler::Condition)$cmp$$cmpcode);
10350   %}
10351 
10352   ins_pipe(icond_reg_reg);
10353 %}
10354 
10355 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10356   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10357 
10358   ins_cost(INSN_COST * 2);
10359   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10360 
10361   ins_encode %{
10362     __ cselw(as_Register($dst$$reg),
10363              as_Register($src2$$reg),
10364              as_Register($src1$$reg),
10365              (Assembler::Condition)$cmp$$cmpcode);
10366   %}
10367 
10368   ins_pipe(icond_reg_reg);
10369 %}
10370 
10371 // special cases where one arg is zero
10372 
10373 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10374   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10375 
10376   ins_cost(INSN_COST * 2);
10377   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
10378 
10379   ins_encode %{
10380     __ cselw(as_Register($dst$$reg),
10381              zr,
10382              as_Register($src$$reg),
10383              (Assembler::Condition)$cmp$$cmpcode);
10384   %}
10385 
10386   ins_pipe(icond_reg);
10387 %}
10388 
10389 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10390   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10391 
10392   ins_cost(INSN_COST * 2);
10393   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
10394 
10395   ins_encode %{
10396     __ cselw(as_Register($dst$$reg),
10397              zr,
10398              as_Register($src$$reg),
10399              (Assembler::Condition)$cmp$$cmpcode);
10400   %}
10401 
10402   ins_pipe(icond_reg);
10403 %}
10404 
10405 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10406   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10407 
10408   ins_cost(INSN_COST * 2);
10409   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
10410 
10411   ins_encode %{
10412     __ cselw(as_Register($dst$$reg),
10413              as_Register($src$$reg),
10414              zr,
10415              (Assembler::Condition)$cmp$$cmpcode);
10416   %}
10417 
10418   ins_pipe(icond_reg);
10419 %}
10420 
10421 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10422   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10423 
10424   ins_cost(INSN_COST * 2);
10425   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
10426 
10427   ins_encode %{
10428     __ cselw(as_Register($dst$$reg),
10429              as_Register($src$$reg),
10430              zr,
10431              (Assembler::Condition)$cmp$$cmpcode);
10432   %}
10433 
10434   ins_pipe(icond_reg);
10435 %}
10436 
10437 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
10438 %{
10439   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10440 
10441   ins_cost(INSN_COST * 3);
10442 
10443   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10444   ins_encode %{
10445     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10446     __ fcsels(as_FloatRegister($dst$$reg),
10447               as_FloatRegister($src2$$reg),
10448               as_FloatRegister($src1$$reg),
10449               cond);
10450   %}
10451 
10452   ins_pipe(fp_cond_reg_reg_s);
10453 %}
10454 
10455 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
10456 %{
10457   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10458 
10459   ins_cost(INSN_COST * 3);
10460 
10461   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10462   ins_encode %{
10463     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10464     __ fcsels(as_FloatRegister($dst$$reg),
10465               as_FloatRegister($src2$$reg),
10466               as_FloatRegister($src1$$reg),
10467               cond);
10468   %}
10469 
10470   ins_pipe(fp_cond_reg_reg_s);
10471 %}
10472 
10473 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
10474 %{
10475   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10476 
10477   ins_cost(INSN_COST * 3);
10478 
10479   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10480   ins_encode %{
10481     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10482     __ fcseld(as_FloatRegister($dst$$reg),
10483               as_FloatRegister($src2$$reg),
10484               as_FloatRegister($src1$$reg),
10485               cond);
10486   %}
10487 
10488   ins_pipe(fp_cond_reg_reg_d);
10489 %}
10490 
10491 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
10492 %{
10493   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10494 
10495   ins_cost(INSN_COST * 3);
10496 
10497   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10498   ins_encode %{
10499     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10500     __ fcseld(as_FloatRegister($dst$$reg),
10501               as_FloatRegister($src2$$reg),
10502               as_FloatRegister($src1$$reg),
10503               cond);
10504   %}
10505 
10506   ins_pipe(fp_cond_reg_reg_d);
10507 %}
10508 
10509 // ============================================================================
10510 // Arithmetic Instructions
10511 //
10512 
10513 // Integer Addition
10514 
10515 // TODO
10516 // these currently employ operations which do not set CR and hence are
10517 // not flagged as killing CR but we would like to isolate the cases
10518 // where we want to set flags from those where we don't. need to work
10519 // out how to do that.
10520 
10521 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10522   match(Set dst (AddI src1 src2));
10523 
10524   ins_cost(INSN_COST);
10525   format %{ "addw  $dst, $src1, $src2" %}
10526 
10527   ins_encode %{
10528     __ addw(as_Register($dst$$reg),
10529             as_Register($src1$$reg),
10530             as_Register($src2$$reg));
10531   %}
10532 
10533   ins_pipe(ialu_reg_reg);
10534 %}
10535 
10536 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10537   match(Set dst (AddI src1 src2));
10538 
10539   ins_cost(INSN_COST);
10540   format %{ "addw $dst, $src1, $src2" %}
10541 
10542   // use opcode to indicate that this is an add not a sub
10543   opcode(0x0);
10544 
10545   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10546 
10547   ins_pipe(ialu_reg_imm);
10548 %}
10549 
10550 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10551   match(Set dst (AddI (ConvL2I src1) src2));
10552 
10553   ins_cost(INSN_COST);
10554   format %{ "addw $dst, $src1, $src2" %}
10555 
10556   // use opcode to indicate that this is an add not a sub
10557   opcode(0x0);
10558 
10559   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10560 
10561   ins_pipe(ialu_reg_imm);
10562 %}
10563 
10564 // Pointer Addition
10565 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10566   match(Set dst (AddP src1 src2));
10567 
10568   ins_cost(INSN_COST);
10569   format %{ "add $dst, $src1, $src2\t# ptr" %}
10570 
10571   ins_encode %{
10572     __ add(as_Register($dst$$reg),
10573            as_Register($src1$$reg),
10574            as_Register($src2$$reg));
10575   %}
10576 
10577   ins_pipe(ialu_reg_reg);
10578 %}
10579 
10580 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10581   match(Set dst (AddP src1 (ConvI2L src2)));
10582 
10583   ins_cost(1.9 * INSN_COST);
10584   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10585 
10586   ins_encode %{
10587     __ add(as_Register($dst$$reg),
10588            as_Register($src1$$reg),
10589            as_Register($src2$$reg), ext::sxtw);
10590   %}
10591 
10592   ins_pipe(ialu_reg_reg);
10593 %}
10594 
10595 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10596   match(Set dst (AddP src1 (LShiftL src2 scale)));
10597 
10598   ins_cost(1.9 * INSN_COST);
10599   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10600 
10601   ins_encode %{
10602     __ lea(as_Register($dst$$reg),
10603            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10604                    Address::lsl($scale$$constant)));
10605   %}
10606 
10607   ins_pipe(ialu_reg_reg_shift);
10608 %}
10609 
10610 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10611   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10612 
10613   ins_cost(1.9 * INSN_COST);
10614   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10615 
10616   ins_encode %{
10617     __ lea(as_Register($dst$$reg),
10618            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10619                    Address::sxtw($scale$$constant)));
10620   %}
10621 
10622   ins_pipe(ialu_reg_reg_shift);
10623 %}
10624 
10625 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10626   match(Set dst (LShiftL (ConvI2L src) scale));
10627 
10628   ins_cost(INSN_COST);
10629   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10630 
10631   ins_encode %{
10632     __ sbfiz(as_Register($dst$$reg),
10633           as_Register($src$$reg),
10634           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
10635   %}
10636 
10637   ins_pipe(ialu_reg_shift);
10638 %}
10639 
10640 // Pointer Immediate Addition
10641 // n.b. this needs to be more expensive than using an indirect memory
10642 // operand
10643 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10644   match(Set dst (AddP src1 src2));
10645 
10646   ins_cost(INSN_COST);
10647   format %{ "add $dst, $src1, $src2\t# ptr" %}
10648 
10649   // use opcode to indicate that this is an add not a sub
10650   opcode(0x0);
10651 
10652   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10653 
10654   ins_pipe(ialu_reg_imm);
10655 %}
10656 
10657 // Long Addition
10658 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10659 
10660   match(Set dst (AddL src1 src2));
10661 
10662   ins_cost(INSN_COST);
10663   format %{ "add  $dst, $src1, $src2" %}
10664 
10665   ins_encode %{
10666     __ add(as_Register($dst$$reg),
10667            as_Register($src1$$reg),
10668            as_Register($src2$$reg));
10669   %}
10670 
10671   ins_pipe(ialu_reg_reg);
10672 %}
10673 
10674 // No constant pool entries requiredLong Immediate Addition.
10675 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10676   match(Set dst (AddL src1 src2));
10677 
10678   ins_cost(INSN_COST);
10679   format %{ "add $dst, $src1, $src2" %}
10680 
10681   // use opcode to indicate that this is an add not a sub
10682   opcode(0x0);
10683 
10684   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10685 
10686   ins_pipe(ialu_reg_imm);
10687 %}
10688 
10689 // Integer Subtraction
10690 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10691   match(Set dst (SubI src1 src2));
10692 
10693   ins_cost(INSN_COST);
10694   format %{ "subw  $dst, $src1, $src2" %}
10695 
10696   ins_encode %{
10697     __ subw(as_Register($dst$$reg),
10698             as_Register($src1$$reg),
10699             as_Register($src2$$reg));
10700   %}
10701 
10702   ins_pipe(ialu_reg_reg);
10703 %}
10704 
10705 // Immediate Subtraction
10706 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10707   match(Set dst (SubI src1 src2));
10708 
10709   ins_cost(INSN_COST);
10710   format %{ "subw $dst, $src1, $src2" %}
10711 
10712   // use opcode to indicate that this is a sub not an add
10713   opcode(0x1);
10714 
10715   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10716 
10717   ins_pipe(ialu_reg_imm);
10718 %}
10719 
10720 // Long Subtraction
10721 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10722 
10723   match(Set dst (SubL src1 src2));
10724 
10725   ins_cost(INSN_COST);
10726   format %{ "sub  $dst, $src1, $src2" %}
10727 
10728   ins_encode %{
10729     __ sub(as_Register($dst$$reg),
10730            as_Register($src1$$reg),
10731            as_Register($src2$$reg));
10732   %}
10733 
10734   ins_pipe(ialu_reg_reg);
10735 %}
10736 
10737 // No constant pool entries requiredLong Immediate Subtraction.
10738 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10739   match(Set dst (SubL src1 src2));
10740 
10741   ins_cost(INSN_COST);
10742   format %{ "sub$dst, $src1, $src2" %}
10743 
10744   // use opcode to indicate that this is a sub not an add
10745   opcode(0x1);
10746 
10747   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10748 
10749   ins_pipe(ialu_reg_imm);
10750 %}
10751 
10752 // Integer Negation (special case for sub)
10753 
10754 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10755   match(Set dst (SubI zero src));
10756 
10757   ins_cost(INSN_COST);
10758   format %{ "negw $dst, $src\t# int" %}
10759 
10760   ins_encode %{
10761     __ negw(as_Register($dst$$reg),
10762             as_Register($src$$reg));
10763   %}
10764 
10765   ins_pipe(ialu_reg);
10766 %}
10767 
10768 // Long Negation
10769 
10770 instruct negL_reg(iRegLNoSp dst, iRegIorL2I src, immL0 zero, rFlagsReg cr) %{
10771   match(Set dst (SubL zero src));
10772 
10773   ins_cost(INSN_COST);
10774   format %{ "neg $dst, $src\t# long" %}
10775 
10776   ins_encode %{
10777     __ neg(as_Register($dst$$reg),
10778            as_Register($src$$reg));
10779   %}
10780 
10781   ins_pipe(ialu_reg);
10782 %}
10783 
10784 // Integer Multiply
10785 
10786 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10787   match(Set dst (MulI src1 src2));
10788 
10789   ins_cost(INSN_COST * 3);
10790   format %{ "mulw  $dst, $src1, $src2" %}
10791 
10792   ins_encode %{
10793     __ mulw(as_Register($dst$$reg),
10794             as_Register($src1$$reg),
10795             as_Register($src2$$reg));
10796   %}
10797 
10798   ins_pipe(imul_reg_reg);
10799 %}
10800 
10801 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10802   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10803 
10804   ins_cost(INSN_COST * 3);
10805   format %{ "smull  $dst, $src1, $src2" %}
10806 
10807   ins_encode %{
10808     __ smull(as_Register($dst$$reg),
10809              as_Register($src1$$reg),
10810              as_Register($src2$$reg));
10811   %}
10812 
10813   ins_pipe(imul_reg_reg);
10814 %}
10815 
10816 // Long Multiply
10817 
10818 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10819   match(Set dst (MulL src1 src2));
10820 
10821   ins_cost(INSN_COST * 5);
10822   format %{ "mul  $dst, $src1, $src2" %}
10823 
10824   ins_encode %{
10825     __ mul(as_Register($dst$$reg),
10826            as_Register($src1$$reg),
10827            as_Register($src2$$reg));
10828   %}
10829 
10830   ins_pipe(lmul_reg_reg);
10831 %}
10832 
10833 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10834 %{
10835   match(Set dst (MulHiL src1 src2));
10836 
10837   ins_cost(INSN_COST * 7);
10838   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
10839 
10840   ins_encode %{
10841     __ smulh(as_Register($dst$$reg),
10842              as_Register($src1$$reg),
10843              as_Register($src2$$reg));
10844   %}
10845 
10846   ins_pipe(lmul_reg_reg);
10847 %}
10848 
10849 // Combined Integer Multiply & Add/Sub
10850 
10851 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10852   match(Set dst (AddI src3 (MulI src1 src2)));
10853 
10854   ins_cost(INSN_COST * 3);
10855   format %{ "madd  $dst, $src1, $src2, $src3" %}
10856 
10857   ins_encode %{
10858     __ maddw(as_Register($dst$$reg),
10859              as_Register($src1$$reg),
10860              as_Register($src2$$reg),
10861              as_Register($src3$$reg));
10862   %}
10863 
10864   ins_pipe(imac_reg_reg);
10865 %}
10866 
10867 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10868   match(Set dst (SubI src3 (MulI src1 src2)));
10869 
10870   ins_cost(INSN_COST * 3);
10871   format %{ "msub  $dst, $src1, $src2, $src3" %}
10872 
10873   ins_encode %{
10874     __ msubw(as_Register($dst$$reg),
10875              as_Register($src1$$reg),
10876              as_Register($src2$$reg),
10877              as_Register($src3$$reg));
10878   %}
10879 
10880   ins_pipe(imac_reg_reg);
10881 %}
10882 
10883 // Combined Long Multiply & Add/Sub
10884 
10885 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10886   match(Set dst (AddL src3 (MulL src1 src2)));
10887 
10888   ins_cost(INSN_COST * 5);
10889   format %{ "madd  $dst, $src1, $src2, $src3" %}
10890 
10891   ins_encode %{
10892     __ madd(as_Register($dst$$reg),
10893             as_Register($src1$$reg),
10894             as_Register($src2$$reg),
10895             as_Register($src3$$reg));
10896   %}
10897 
10898   ins_pipe(lmac_reg_reg);
10899 %}
10900 
10901 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10902   match(Set dst (SubL src3 (MulL src1 src2)));
10903 
10904   ins_cost(INSN_COST * 5);
10905   format %{ "msub  $dst, $src1, $src2, $src3" %}
10906 
10907   ins_encode %{
10908     __ msub(as_Register($dst$$reg),
10909             as_Register($src1$$reg),
10910             as_Register($src2$$reg),
10911             as_Register($src3$$reg));
10912   %}
10913 
10914   ins_pipe(lmac_reg_reg);
10915 %}
10916 
10917 // Integer Divide
10918 
10919 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10920   match(Set dst (DivI src1 src2));
10921 
10922   ins_cost(INSN_COST * 19);
10923   format %{ "sdivw  $dst, $src1, $src2" %}
10924 
10925   ins_encode(aarch64_enc_divw(dst, src1, src2));
10926   ins_pipe(idiv_reg_reg);
10927 %}
10928 
10929 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
10930   match(Set dst (URShiftI (RShiftI src1 div1) div2));
10931   ins_cost(INSN_COST);
10932   format %{ "lsrw $dst, $src1, $div1" %}
10933   ins_encode %{
10934     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
10935   %}
10936   ins_pipe(ialu_reg_shift);
10937 %}
10938 
10939 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
10940   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
10941   ins_cost(INSN_COST);
10942   format %{ "addw $dst, $src, LSR $div1" %}
10943 
10944   ins_encode %{
10945     __ addw(as_Register($dst$$reg),
10946               as_Register($src$$reg),
10947               as_Register($src$$reg),
10948               Assembler::LSR, 31);
10949   %}
10950   ins_pipe(ialu_reg);
10951 %}
10952 
10953 // Long Divide
10954 
10955 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10956   match(Set dst (DivL src1 src2));
10957 
10958   ins_cost(INSN_COST * 35);
10959   format %{ "sdiv   $dst, $src1, $src2" %}
10960 
10961   ins_encode(aarch64_enc_div(dst, src1, src2));
10962   ins_pipe(ldiv_reg_reg);
10963 %}
10964 
10965 instruct signExtractL(iRegLNoSp dst, iRegL src1, immL_63 div1, immL_63 div2) %{
10966   match(Set dst (URShiftL (RShiftL src1 div1) div2));
10967   ins_cost(INSN_COST);
10968   format %{ "lsr $dst, $src1, $div1" %}
10969   ins_encode %{
10970     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
10971   %}
10972   ins_pipe(ialu_reg_shift);
10973 %}
10974 
10975 instruct div2RoundL(iRegLNoSp dst, iRegL src, immL_63 div1, immL_63 div2) %{
10976   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
10977   ins_cost(INSN_COST);
10978   format %{ "add $dst, $src, $div1" %}
10979 
10980   ins_encode %{
10981     __ add(as_Register($dst$$reg),
10982               as_Register($src$$reg),
10983               as_Register($src$$reg),
10984               Assembler::LSR, 63);
10985   %}
10986   ins_pipe(ialu_reg);
10987 %}
10988 
10989 // Integer Remainder
10990 
10991 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10992   match(Set dst (ModI src1 src2));
10993 
10994   ins_cost(INSN_COST * 22);
10995   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10996             "msubw($dst, rscratch1, $src2, $src1" %}
10997 
10998   ins_encode(aarch64_enc_modw(dst, src1, src2));
10999   ins_pipe(idiv_reg_reg);
11000 %}
11001 
11002 // Long Remainder
11003 
11004 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11005   match(Set dst (ModL src1 src2));
11006 
11007   ins_cost(INSN_COST * 38);
11008   format %{ "sdiv   rscratch1, $src1, $src2\n"
11009             "msub($dst, rscratch1, $src2, $src1" %}
11010 
11011   ins_encode(aarch64_enc_mod(dst, src1, src2));
11012   ins_pipe(ldiv_reg_reg);
11013 %}
11014 
11015 // Integer Shifts
11016 
11017 // Shift Left Register
11018 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11019   match(Set dst (LShiftI src1 src2));
11020 
11021   ins_cost(INSN_COST * 2);
11022   format %{ "lslvw  $dst, $src1, $src2" %}
11023 
11024   ins_encode %{
11025     __ lslvw(as_Register($dst$$reg),
11026              as_Register($src1$$reg),
11027              as_Register($src2$$reg));
11028   %}
11029 
11030   ins_pipe(ialu_reg_reg_vshift);
11031 %}
11032 
11033 // Shift Left Immediate
11034 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11035   match(Set dst (LShiftI src1 src2));
11036 
11037   ins_cost(INSN_COST);
11038   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
11039 
11040   ins_encode %{
11041     __ lslw(as_Register($dst$$reg),
11042             as_Register($src1$$reg),
11043             $src2$$constant & 0x1f);
11044   %}
11045 
11046   ins_pipe(ialu_reg_shift);
11047 %}
11048 
11049 // Shift Right Logical Register
11050 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11051   match(Set dst (URShiftI src1 src2));
11052 
11053   ins_cost(INSN_COST * 2);
11054   format %{ "lsrvw  $dst, $src1, $src2" %}
11055 
11056   ins_encode %{
11057     __ lsrvw(as_Register($dst$$reg),
11058              as_Register($src1$$reg),
11059              as_Register($src2$$reg));
11060   %}
11061 
11062   ins_pipe(ialu_reg_reg_vshift);
11063 %}
11064 
11065 // Shift Right Logical Immediate
11066 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11067   match(Set dst (URShiftI src1 src2));
11068 
11069   ins_cost(INSN_COST);
11070   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
11071 
11072   ins_encode %{
11073     __ lsrw(as_Register($dst$$reg),
11074             as_Register($src1$$reg),
11075             $src2$$constant & 0x1f);
11076   %}
11077 
11078   ins_pipe(ialu_reg_shift);
11079 %}
11080 
11081 // Shift Right Arithmetic Register
11082 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11083   match(Set dst (RShiftI src1 src2));
11084 
11085   ins_cost(INSN_COST * 2);
11086   format %{ "asrvw  $dst, $src1, $src2" %}
11087 
11088   ins_encode %{
11089     __ asrvw(as_Register($dst$$reg),
11090              as_Register($src1$$reg),
11091              as_Register($src2$$reg));
11092   %}
11093 
11094   ins_pipe(ialu_reg_reg_vshift);
11095 %}
11096 
11097 // Shift Right Arithmetic Immediate
11098 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11099   match(Set dst (RShiftI src1 src2));
11100 
11101   ins_cost(INSN_COST);
11102   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
11103 
11104   ins_encode %{
11105     __ asrw(as_Register($dst$$reg),
11106             as_Register($src1$$reg),
11107             $src2$$constant & 0x1f);
11108   %}
11109 
11110   ins_pipe(ialu_reg_shift);
11111 %}
11112 
11113 // Combined Int Mask and Right Shift (using UBFM)
11114 // TODO
11115 
11116 // Long Shifts
11117 
11118 // Shift Left Register
11119 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11120   match(Set dst (LShiftL src1 src2));
11121 
11122   ins_cost(INSN_COST * 2);
11123   format %{ "lslv  $dst, $src1, $src2" %}
11124 
11125   ins_encode %{
11126     __ lslv(as_Register($dst$$reg),
11127             as_Register($src1$$reg),
11128             as_Register($src2$$reg));
11129   %}
11130 
11131   ins_pipe(ialu_reg_reg_vshift);
11132 %}
11133 
11134 // Shift Left Immediate
11135 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11136   match(Set dst (LShiftL src1 src2));
11137 
11138   ins_cost(INSN_COST);
11139   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
11140 
11141   ins_encode %{
11142     __ lsl(as_Register($dst$$reg),
11143             as_Register($src1$$reg),
11144             $src2$$constant & 0x3f);
11145   %}
11146 
11147   ins_pipe(ialu_reg_shift);
11148 %}
11149 
11150 // Shift Right Logical Register
11151 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11152   match(Set dst (URShiftL src1 src2));
11153 
11154   ins_cost(INSN_COST * 2);
11155   format %{ "lsrv  $dst, $src1, $src2" %}
11156 
11157   ins_encode %{
11158     __ lsrv(as_Register($dst$$reg),
11159             as_Register($src1$$reg),
11160             as_Register($src2$$reg));
11161   %}
11162 
11163   ins_pipe(ialu_reg_reg_vshift);
11164 %}
11165 
11166 // Shift Right Logical Immediate
11167 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11168   match(Set dst (URShiftL src1 src2));
11169 
11170   ins_cost(INSN_COST);
11171   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
11172 
11173   ins_encode %{
11174     __ lsr(as_Register($dst$$reg),
11175            as_Register($src1$$reg),
11176            $src2$$constant & 0x3f);
11177   %}
11178 
11179   ins_pipe(ialu_reg_shift);
11180 %}
11181 
11182 // A special-case pattern for card table stores.
11183 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
11184   match(Set dst (URShiftL (CastP2X src1) src2));
11185 
11186   ins_cost(INSN_COST);
11187   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
11188 
11189   ins_encode %{
11190     __ lsr(as_Register($dst$$reg),
11191            as_Register($src1$$reg),
11192            $src2$$constant & 0x3f);
11193   %}
11194 
11195   ins_pipe(ialu_reg_shift);
11196 %}
11197 
11198 // Shift Right Arithmetic Register
11199 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11200   match(Set dst (RShiftL src1 src2));
11201 
11202   ins_cost(INSN_COST * 2);
11203   format %{ "asrv  $dst, $src1, $src2" %}
11204 
11205   ins_encode %{
11206     __ asrv(as_Register($dst$$reg),
11207             as_Register($src1$$reg),
11208             as_Register($src2$$reg));
11209   %}
11210 
11211   ins_pipe(ialu_reg_reg_vshift);
11212 %}
11213 
11214 // Shift Right Arithmetic Immediate
11215 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11216   match(Set dst (RShiftL src1 src2));
11217 
11218   ins_cost(INSN_COST);
11219   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
11220 
11221   ins_encode %{
11222     __ asr(as_Register($dst$$reg),
11223            as_Register($src1$$reg),
11224            $src2$$constant & 0x3f);
11225   %}
11226 
11227   ins_pipe(ialu_reg_shift);
11228 %}
11229 
11230 // BEGIN This section of the file is automatically generated. Do not edit --------------
11231 
11232 instruct regL_not_reg(iRegLNoSp dst,
11233                          iRegL src1, immL_M1 m1,
11234                          rFlagsReg cr) %{
11235   match(Set dst (XorL src1 m1));
11236   ins_cost(INSN_COST);
11237   format %{ "eon  $dst, $src1, zr" %}
11238 
11239   ins_encode %{
11240     __ eon(as_Register($dst$$reg),
11241               as_Register($src1$$reg),
11242               zr,
11243               Assembler::LSL, 0);
11244   %}
11245 
11246   ins_pipe(ialu_reg);
11247 %}
11248 instruct regI_not_reg(iRegINoSp dst,
11249                          iRegIorL2I src1, immI_M1 m1,
11250                          rFlagsReg cr) %{
11251   match(Set dst (XorI src1 m1));
11252   ins_cost(INSN_COST);
11253   format %{ "eonw  $dst, $src1, zr" %}
11254 
11255   ins_encode %{
11256     __ eonw(as_Register($dst$$reg),
11257               as_Register($src1$$reg),
11258               zr,
11259               Assembler::LSL, 0);
11260   %}
11261 
11262   ins_pipe(ialu_reg);
11263 %}
11264 
11265 instruct AndI_reg_not_reg(iRegINoSp dst,
11266                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11267                          rFlagsReg cr) %{
11268   match(Set dst (AndI src1 (XorI src2 m1)));
11269   ins_cost(INSN_COST);
11270   format %{ "bicw  $dst, $src1, $src2" %}
11271 
11272   ins_encode %{
11273     __ bicw(as_Register($dst$$reg),
11274               as_Register($src1$$reg),
11275               as_Register($src2$$reg),
11276               Assembler::LSL, 0);
11277   %}
11278 
11279   ins_pipe(ialu_reg_reg);
11280 %}
11281 
11282 instruct AndL_reg_not_reg(iRegLNoSp dst,
11283                          iRegL src1, iRegL src2, immL_M1 m1,
11284                          rFlagsReg cr) %{
11285   match(Set dst (AndL src1 (XorL src2 m1)));
11286   ins_cost(INSN_COST);
11287   format %{ "bic  $dst, $src1, $src2" %}
11288 
11289   ins_encode %{
11290     __ bic(as_Register($dst$$reg),
11291               as_Register($src1$$reg),
11292               as_Register($src2$$reg),
11293               Assembler::LSL, 0);
11294   %}
11295 
11296   ins_pipe(ialu_reg_reg);
11297 %}
11298 
11299 instruct OrI_reg_not_reg(iRegINoSp dst,
11300                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11301                          rFlagsReg cr) %{
11302   match(Set dst (OrI src1 (XorI src2 m1)));
11303   ins_cost(INSN_COST);
11304   format %{ "ornw  $dst, $src1, $src2" %}
11305 
11306   ins_encode %{
11307     __ ornw(as_Register($dst$$reg),
11308               as_Register($src1$$reg),
11309               as_Register($src2$$reg),
11310               Assembler::LSL, 0);
11311   %}
11312 
11313   ins_pipe(ialu_reg_reg);
11314 %}
11315 
11316 instruct OrL_reg_not_reg(iRegLNoSp dst,
11317                          iRegL src1, iRegL src2, immL_M1 m1,
11318                          rFlagsReg cr) %{
11319   match(Set dst (OrL src1 (XorL src2 m1)));
11320   ins_cost(INSN_COST);
11321   format %{ "orn  $dst, $src1, $src2" %}
11322 
11323   ins_encode %{
11324     __ orn(as_Register($dst$$reg),
11325               as_Register($src1$$reg),
11326               as_Register($src2$$reg),
11327               Assembler::LSL, 0);
11328   %}
11329 
11330   ins_pipe(ialu_reg_reg);
11331 %}
11332 
11333 instruct XorI_reg_not_reg(iRegINoSp dst,
11334                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11335                          rFlagsReg cr) %{
11336   match(Set dst (XorI m1 (XorI src2 src1)));
11337   ins_cost(INSN_COST);
11338   format %{ "eonw  $dst, $src1, $src2" %}
11339 
11340   ins_encode %{
11341     __ eonw(as_Register($dst$$reg),
11342               as_Register($src1$$reg),
11343               as_Register($src2$$reg),
11344               Assembler::LSL, 0);
11345   %}
11346 
11347   ins_pipe(ialu_reg_reg);
11348 %}
11349 
11350 instruct XorL_reg_not_reg(iRegLNoSp dst,
11351                          iRegL src1, iRegL src2, immL_M1 m1,
11352                          rFlagsReg cr) %{
11353   match(Set dst (XorL m1 (XorL src2 src1)));
11354   ins_cost(INSN_COST);
11355   format %{ "eon  $dst, $src1, $src2" %}
11356 
11357   ins_encode %{
11358     __ eon(as_Register($dst$$reg),
11359               as_Register($src1$$reg),
11360               as_Register($src2$$reg),
11361               Assembler::LSL, 0);
11362   %}
11363 
11364   ins_pipe(ialu_reg_reg);
11365 %}
11366 
11367 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
11368                          iRegIorL2I src1, iRegIorL2I src2,
11369                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11370   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
11371   ins_cost(1.9 * INSN_COST);
11372   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
11373 
11374   ins_encode %{
11375     __ bicw(as_Register($dst$$reg),
11376               as_Register($src1$$reg),
11377               as_Register($src2$$reg),
11378               Assembler::LSR,
11379               $src3$$constant & 0x1f);
11380   %}
11381 
11382   ins_pipe(ialu_reg_reg_shift);
11383 %}
11384 
11385 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
11386                          iRegL src1, iRegL src2,
11387                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11388   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
11389   ins_cost(1.9 * INSN_COST);
11390   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
11391 
11392   ins_encode %{
11393     __ bic(as_Register($dst$$reg),
11394               as_Register($src1$$reg),
11395               as_Register($src2$$reg),
11396               Assembler::LSR,
11397               $src3$$constant & 0x3f);
11398   %}
11399 
11400   ins_pipe(ialu_reg_reg_shift);
11401 %}
11402 
11403 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
11404                          iRegIorL2I src1, iRegIorL2I src2,
11405                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11406   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
11407   ins_cost(1.9 * INSN_COST);
11408   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
11409 
11410   ins_encode %{
11411     __ bicw(as_Register($dst$$reg),
11412               as_Register($src1$$reg),
11413               as_Register($src2$$reg),
11414               Assembler::ASR,
11415               $src3$$constant & 0x1f);
11416   %}
11417 
11418   ins_pipe(ialu_reg_reg_shift);
11419 %}
11420 
11421 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
11422                          iRegL src1, iRegL src2,
11423                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11424   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
11425   ins_cost(1.9 * INSN_COST);
11426   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
11427 
11428   ins_encode %{
11429     __ bic(as_Register($dst$$reg),
11430               as_Register($src1$$reg),
11431               as_Register($src2$$reg),
11432               Assembler::ASR,
11433               $src3$$constant & 0x3f);
11434   %}
11435 
11436   ins_pipe(ialu_reg_reg_shift);
11437 %}
11438 
11439 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11440                          iRegIorL2I src1, iRegIorL2I src2,
11441                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11442   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11443   ins_cost(1.9 * INSN_COST);
11444   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11445 
11446   ins_encode %{
11447     __ bicw(as_Register($dst$$reg),
11448               as_Register($src1$$reg),
11449               as_Register($src2$$reg),
11450               Assembler::LSL,
11451               $src3$$constant & 0x1f);
11452   %}
11453 
11454   ins_pipe(ialu_reg_reg_shift);
11455 %}
11456 
11457 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11458                          iRegL src1, iRegL src2,
11459                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11460   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11461   ins_cost(1.9 * INSN_COST);
11462   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11463 
11464   ins_encode %{
11465     __ bic(as_Register($dst$$reg),
11466               as_Register($src1$$reg),
11467               as_Register($src2$$reg),
11468               Assembler::LSL,
11469               $src3$$constant & 0x3f);
11470   %}
11471 
11472   ins_pipe(ialu_reg_reg_shift);
11473 %}
11474 
11475 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11476                          iRegIorL2I src1, iRegIorL2I src2,
11477                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11478   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11479   ins_cost(1.9 * INSN_COST);
11480   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11481 
11482   ins_encode %{
11483     __ eonw(as_Register($dst$$reg),
11484               as_Register($src1$$reg),
11485               as_Register($src2$$reg),
11486               Assembler::LSR,
11487               $src3$$constant & 0x1f);
11488   %}
11489 
11490   ins_pipe(ialu_reg_reg_shift);
11491 %}
11492 
11493 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11494                          iRegL src1, iRegL src2,
11495                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11496   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11497   ins_cost(1.9 * INSN_COST);
11498   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11499 
11500   ins_encode %{
11501     __ eon(as_Register($dst$$reg),
11502               as_Register($src1$$reg),
11503               as_Register($src2$$reg),
11504               Assembler::LSR,
11505               $src3$$constant & 0x3f);
11506   %}
11507 
11508   ins_pipe(ialu_reg_reg_shift);
11509 %}
11510 
11511 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11512                          iRegIorL2I src1, iRegIorL2I src2,
11513                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11514   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11515   ins_cost(1.9 * INSN_COST);
11516   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11517 
11518   ins_encode %{
11519     __ eonw(as_Register($dst$$reg),
11520               as_Register($src1$$reg),
11521               as_Register($src2$$reg),
11522               Assembler::ASR,
11523               $src3$$constant & 0x1f);
11524   %}
11525 
11526   ins_pipe(ialu_reg_reg_shift);
11527 %}
11528 
11529 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11530                          iRegL src1, iRegL src2,
11531                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11532   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11533   ins_cost(1.9 * INSN_COST);
11534   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11535 
11536   ins_encode %{
11537     __ eon(as_Register($dst$$reg),
11538               as_Register($src1$$reg),
11539               as_Register($src2$$reg),
11540               Assembler::ASR,
11541               $src3$$constant & 0x3f);
11542   %}
11543 
11544   ins_pipe(ialu_reg_reg_shift);
11545 %}
11546 
11547 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11548                          iRegIorL2I src1, iRegIorL2I src2,
11549                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11550   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11551   ins_cost(1.9 * INSN_COST);
11552   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11553 
11554   ins_encode %{
11555     __ eonw(as_Register($dst$$reg),
11556               as_Register($src1$$reg),
11557               as_Register($src2$$reg),
11558               Assembler::LSL,
11559               $src3$$constant & 0x1f);
11560   %}
11561 
11562   ins_pipe(ialu_reg_reg_shift);
11563 %}
11564 
11565 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11566                          iRegL src1, iRegL src2,
11567                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11568   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11569   ins_cost(1.9 * INSN_COST);
11570   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11571 
11572   ins_encode %{
11573     __ eon(as_Register($dst$$reg),
11574               as_Register($src1$$reg),
11575               as_Register($src2$$reg),
11576               Assembler::LSL,
11577               $src3$$constant & 0x3f);
11578   %}
11579 
11580   ins_pipe(ialu_reg_reg_shift);
11581 %}
11582 
11583 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11584                          iRegIorL2I src1, iRegIorL2I src2,
11585                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11586   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11587   ins_cost(1.9 * INSN_COST);
11588   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11589 
11590   ins_encode %{
11591     __ ornw(as_Register($dst$$reg),
11592               as_Register($src1$$reg),
11593               as_Register($src2$$reg),
11594               Assembler::LSR,
11595               $src3$$constant & 0x1f);
11596   %}
11597 
11598   ins_pipe(ialu_reg_reg_shift);
11599 %}
11600 
11601 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11602                          iRegL src1, iRegL src2,
11603                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11604   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11605   ins_cost(1.9 * INSN_COST);
11606   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11607 
11608   ins_encode %{
11609     __ orn(as_Register($dst$$reg),
11610               as_Register($src1$$reg),
11611               as_Register($src2$$reg),
11612               Assembler::LSR,
11613               $src3$$constant & 0x3f);
11614   %}
11615 
11616   ins_pipe(ialu_reg_reg_shift);
11617 %}
11618 
11619 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11620                          iRegIorL2I src1, iRegIorL2I src2,
11621                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11622   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11623   ins_cost(1.9 * INSN_COST);
11624   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11625 
11626   ins_encode %{
11627     __ ornw(as_Register($dst$$reg),
11628               as_Register($src1$$reg),
11629               as_Register($src2$$reg),
11630               Assembler::ASR,
11631               $src3$$constant & 0x1f);
11632   %}
11633 
11634   ins_pipe(ialu_reg_reg_shift);
11635 %}
11636 
11637 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11638                          iRegL src1, iRegL src2,
11639                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11640   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11641   ins_cost(1.9 * INSN_COST);
11642   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11643 
11644   ins_encode %{
11645     __ orn(as_Register($dst$$reg),
11646               as_Register($src1$$reg),
11647               as_Register($src2$$reg),
11648               Assembler::ASR,
11649               $src3$$constant & 0x3f);
11650   %}
11651 
11652   ins_pipe(ialu_reg_reg_shift);
11653 %}
11654 
11655 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11656                          iRegIorL2I src1, iRegIorL2I src2,
11657                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11658   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11659   ins_cost(1.9 * INSN_COST);
11660   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11661 
11662   ins_encode %{
11663     __ ornw(as_Register($dst$$reg),
11664               as_Register($src1$$reg),
11665               as_Register($src2$$reg),
11666               Assembler::LSL,
11667               $src3$$constant & 0x1f);
11668   %}
11669 
11670   ins_pipe(ialu_reg_reg_shift);
11671 %}
11672 
11673 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11674                          iRegL src1, iRegL src2,
11675                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11676   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11677   ins_cost(1.9 * INSN_COST);
11678   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11679 
11680   ins_encode %{
11681     __ orn(as_Register($dst$$reg),
11682               as_Register($src1$$reg),
11683               as_Register($src2$$reg),
11684               Assembler::LSL,
11685               $src3$$constant & 0x3f);
11686   %}
11687 
11688   ins_pipe(ialu_reg_reg_shift);
11689 %}
11690 
11691 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11692                          iRegIorL2I src1, iRegIorL2I src2,
11693                          immI src3, rFlagsReg cr) %{
11694   match(Set dst (AndI src1 (URShiftI src2 src3)));
11695 
11696   ins_cost(1.9 * INSN_COST);
11697   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11698 
11699   ins_encode %{
11700     __ andw(as_Register($dst$$reg),
11701               as_Register($src1$$reg),
11702               as_Register($src2$$reg),
11703               Assembler::LSR,
11704               $src3$$constant & 0x1f);
11705   %}
11706 
11707   ins_pipe(ialu_reg_reg_shift);
11708 %}
11709 
11710 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11711                          iRegL src1, iRegL src2,
11712                          immI src3, rFlagsReg cr) %{
11713   match(Set dst (AndL src1 (URShiftL src2 src3)));
11714 
11715   ins_cost(1.9 * INSN_COST);
11716   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11717 
11718   ins_encode %{
11719     __ andr(as_Register($dst$$reg),
11720               as_Register($src1$$reg),
11721               as_Register($src2$$reg),
11722               Assembler::LSR,
11723               $src3$$constant & 0x3f);
11724   %}
11725 
11726   ins_pipe(ialu_reg_reg_shift);
11727 %}
11728 
11729 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11730                          iRegIorL2I src1, iRegIorL2I src2,
11731                          immI src3, rFlagsReg cr) %{
11732   match(Set dst (AndI src1 (RShiftI src2 src3)));
11733 
11734   ins_cost(1.9 * INSN_COST);
11735   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11736 
11737   ins_encode %{
11738     __ andw(as_Register($dst$$reg),
11739               as_Register($src1$$reg),
11740               as_Register($src2$$reg),
11741               Assembler::ASR,
11742               $src3$$constant & 0x1f);
11743   %}
11744 
11745   ins_pipe(ialu_reg_reg_shift);
11746 %}
11747 
11748 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11749                          iRegL src1, iRegL src2,
11750                          immI src3, rFlagsReg cr) %{
11751   match(Set dst (AndL src1 (RShiftL src2 src3)));
11752 
11753   ins_cost(1.9 * INSN_COST);
11754   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11755 
11756   ins_encode %{
11757     __ andr(as_Register($dst$$reg),
11758               as_Register($src1$$reg),
11759               as_Register($src2$$reg),
11760               Assembler::ASR,
11761               $src3$$constant & 0x3f);
11762   %}
11763 
11764   ins_pipe(ialu_reg_reg_shift);
11765 %}
11766 
11767 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11768                          iRegIorL2I src1, iRegIorL2I src2,
11769                          immI src3, rFlagsReg cr) %{
11770   match(Set dst (AndI src1 (LShiftI src2 src3)));
11771 
11772   ins_cost(1.9 * INSN_COST);
11773   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11774 
11775   ins_encode %{
11776     __ andw(as_Register($dst$$reg),
11777               as_Register($src1$$reg),
11778               as_Register($src2$$reg),
11779               Assembler::LSL,
11780               $src3$$constant & 0x1f);
11781   %}
11782 
11783   ins_pipe(ialu_reg_reg_shift);
11784 %}
11785 
11786 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11787                          iRegL src1, iRegL src2,
11788                          immI src3, rFlagsReg cr) %{
11789   match(Set dst (AndL src1 (LShiftL src2 src3)));
11790 
11791   ins_cost(1.9 * INSN_COST);
11792   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11793 
11794   ins_encode %{
11795     __ andr(as_Register($dst$$reg),
11796               as_Register($src1$$reg),
11797               as_Register($src2$$reg),
11798               Assembler::LSL,
11799               $src3$$constant & 0x3f);
11800   %}
11801 
11802   ins_pipe(ialu_reg_reg_shift);
11803 %}
11804 
11805 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11806                          iRegIorL2I src1, iRegIorL2I src2,
11807                          immI src3, rFlagsReg cr) %{
11808   match(Set dst (XorI src1 (URShiftI src2 src3)));
11809 
11810   ins_cost(1.9 * INSN_COST);
11811   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11812 
11813   ins_encode %{
11814     __ eorw(as_Register($dst$$reg),
11815               as_Register($src1$$reg),
11816               as_Register($src2$$reg),
11817               Assembler::LSR,
11818               $src3$$constant & 0x1f);
11819   %}
11820 
11821   ins_pipe(ialu_reg_reg_shift);
11822 %}
11823 
11824 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11825                          iRegL src1, iRegL src2,
11826                          immI src3, rFlagsReg cr) %{
11827   match(Set dst (XorL src1 (URShiftL src2 src3)));
11828 
11829   ins_cost(1.9 * INSN_COST);
11830   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11831 
11832   ins_encode %{
11833     __ eor(as_Register($dst$$reg),
11834               as_Register($src1$$reg),
11835               as_Register($src2$$reg),
11836               Assembler::LSR,
11837               $src3$$constant & 0x3f);
11838   %}
11839 
11840   ins_pipe(ialu_reg_reg_shift);
11841 %}
11842 
11843 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11844                          iRegIorL2I src1, iRegIorL2I src2,
11845                          immI src3, rFlagsReg cr) %{
11846   match(Set dst (XorI src1 (RShiftI src2 src3)));
11847 
11848   ins_cost(1.9 * INSN_COST);
11849   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11850 
11851   ins_encode %{
11852     __ eorw(as_Register($dst$$reg),
11853               as_Register($src1$$reg),
11854               as_Register($src2$$reg),
11855               Assembler::ASR,
11856               $src3$$constant & 0x1f);
11857   %}
11858 
11859   ins_pipe(ialu_reg_reg_shift);
11860 %}
11861 
11862 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11863                          iRegL src1, iRegL src2,
11864                          immI src3, rFlagsReg cr) %{
11865   match(Set dst (XorL src1 (RShiftL src2 src3)));
11866 
11867   ins_cost(1.9 * INSN_COST);
11868   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11869 
11870   ins_encode %{
11871     __ eor(as_Register($dst$$reg),
11872               as_Register($src1$$reg),
11873               as_Register($src2$$reg),
11874               Assembler::ASR,
11875               $src3$$constant & 0x3f);
11876   %}
11877 
11878   ins_pipe(ialu_reg_reg_shift);
11879 %}
11880 
11881 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11882                          iRegIorL2I src1, iRegIorL2I src2,
11883                          immI src3, rFlagsReg cr) %{
11884   match(Set dst (XorI src1 (LShiftI src2 src3)));
11885 
11886   ins_cost(1.9 * INSN_COST);
11887   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11888 
11889   ins_encode %{
11890     __ eorw(as_Register($dst$$reg),
11891               as_Register($src1$$reg),
11892               as_Register($src2$$reg),
11893               Assembler::LSL,
11894               $src3$$constant & 0x1f);
11895   %}
11896 
11897   ins_pipe(ialu_reg_reg_shift);
11898 %}
11899 
11900 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11901                          iRegL src1, iRegL src2,
11902                          immI src3, rFlagsReg cr) %{
11903   match(Set dst (XorL src1 (LShiftL src2 src3)));
11904 
11905   ins_cost(1.9 * INSN_COST);
11906   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11907 
11908   ins_encode %{
11909     __ eor(as_Register($dst$$reg),
11910               as_Register($src1$$reg),
11911               as_Register($src2$$reg),
11912               Assembler::LSL,
11913               $src3$$constant & 0x3f);
11914   %}
11915 
11916   ins_pipe(ialu_reg_reg_shift);
11917 %}
11918 
11919 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11920                          iRegIorL2I src1, iRegIorL2I src2,
11921                          immI src3, rFlagsReg cr) %{
11922   match(Set dst (OrI src1 (URShiftI src2 src3)));
11923 
11924   ins_cost(1.9 * INSN_COST);
11925   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11926 
11927   ins_encode %{
11928     __ orrw(as_Register($dst$$reg),
11929               as_Register($src1$$reg),
11930               as_Register($src2$$reg),
11931               Assembler::LSR,
11932               $src3$$constant & 0x1f);
11933   %}
11934 
11935   ins_pipe(ialu_reg_reg_shift);
11936 %}
11937 
11938 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11939                          iRegL src1, iRegL src2,
11940                          immI src3, rFlagsReg cr) %{
11941   match(Set dst (OrL src1 (URShiftL src2 src3)));
11942 
11943   ins_cost(1.9 * INSN_COST);
11944   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11945 
11946   ins_encode %{
11947     __ orr(as_Register($dst$$reg),
11948               as_Register($src1$$reg),
11949               as_Register($src2$$reg),
11950               Assembler::LSR,
11951               $src3$$constant & 0x3f);
11952   %}
11953 
11954   ins_pipe(ialu_reg_reg_shift);
11955 %}
11956 
11957 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11958                          iRegIorL2I src1, iRegIorL2I src2,
11959                          immI src3, rFlagsReg cr) %{
11960   match(Set dst (OrI src1 (RShiftI src2 src3)));
11961 
11962   ins_cost(1.9 * INSN_COST);
11963   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11964 
11965   ins_encode %{
11966     __ orrw(as_Register($dst$$reg),
11967               as_Register($src1$$reg),
11968               as_Register($src2$$reg),
11969               Assembler::ASR,
11970               $src3$$constant & 0x1f);
11971   %}
11972 
11973   ins_pipe(ialu_reg_reg_shift);
11974 %}
11975 
11976 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11977                          iRegL src1, iRegL src2,
11978                          immI src3, rFlagsReg cr) %{
11979   match(Set dst (OrL src1 (RShiftL src2 src3)));
11980 
11981   ins_cost(1.9 * INSN_COST);
11982   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11983 
11984   ins_encode %{
11985     __ orr(as_Register($dst$$reg),
11986               as_Register($src1$$reg),
11987               as_Register($src2$$reg),
11988               Assembler::ASR,
11989               $src3$$constant & 0x3f);
11990   %}
11991 
11992   ins_pipe(ialu_reg_reg_shift);
11993 %}
11994 
11995 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11996                          iRegIorL2I src1, iRegIorL2I src2,
11997                          immI src3, rFlagsReg cr) %{
11998   match(Set dst (OrI src1 (LShiftI src2 src3)));
11999 
12000   ins_cost(1.9 * INSN_COST);
12001   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
12002 
12003   ins_encode %{
12004     __ orrw(as_Register($dst$$reg),
12005               as_Register($src1$$reg),
12006               as_Register($src2$$reg),
12007               Assembler::LSL,
12008               $src3$$constant & 0x1f);
12009   %}
12010 
12011   ins_pipe(ialu_reg_reg_shift);
12012 %}
12013 
12014 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
12015                          iRegL src1, iRegL src2,
12016                          immI src3, rFlagsReg cr) %{
12017   match(Set dst (OrL src1 (LShiftL src2 src3)));
12018 
12019   ins_cost(1.9 * INSN_COST);
12020   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
12021 
12022   ins_encode %{
12023     __ orr(as_Register($dst$$reg),
12024               as_Register($src1$$reg),
12025               as_Register($src2$$reg),
12026               Assembler::LSL,
12027               $src3$$constant & 0x3f);
12028   %}
12029 
12030   ins_pipe(ialu_reg_reg_shift);
12031 %}
12032 
12033 instruct AddI_reg_URShift_reg(iRegINoSp dst,
12034                          iRegIorL2I src1, iRegIorL2I src2,
12035                          immI src3, rFlagsReg cr) %{
12036   match(Set dst (AddI src1 (URShiftI src2 src3)));
12037 
12038   ins_cost(1.9 * INSN_COST);
12039   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
12040 
12041   ins_encode %{
12042     __ addw(as_Register($dst$$reg),
12043               as_Register($src1$$reg),
12044               as_Register($src2$$reg),
12045               Assembler::LSR,
12046               $src3$$constant & 0x1f);
12047   %}
12048 
12049   ins_pipe(ialu_reg_reg_shift);
12050 %}
12051 
12052 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
12053                          iRegL src1, iRegL src2,
12054                          immI src3, rFlagsReg cr) %{
12055   match(Set dst (AddL src1 (URShiftL src2 src3)));
12056 
12057   ins_cost(1.9 * INSN_COST);
12058   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
12059 
12060   ins_encode %{
12061     __ add(as_Register($dst$$reg),
12062               as_Register($src1$$reg),
12063               as_Register($src2$$reg),
12064               Assembler::LSR,
12065               $src3$$constant & 0x3f);
12066   %}
12067 
12068   ins_pipe(ialu_reg_reg_shift);
12069 %}
12070 
12071 instruct AddI_reg_RShift_reg(iRegINoSp dst,
12072                          iRegIorL2I src1, iRegIorL2I src2,
12073                          immI src3, rFlagsReg cr) %{
12074   match(Set dst (AddI src1 (RShiftI src2 src3)));
12075 
12076   ins_cost(1.9 * INSN_COST);
12077   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
12078 
12079   ins_encode %{
12080     __ addw(as_Register($dst$$reg),
12081               as_Register($src1$$reg),
12082               as_Register($src2$$reg),
12083               Assembler::ASR,
12084               $src3$$constant & 0x1f);
12085   %}
12086 
12087   ins_pipe(ialu_reg_reg_shift);
12088 %}
12089 
12090 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
12091                          iRegL src1, iRegL src2,
12092                          immI src3, rFlagsReg cr) %{
12093   match(Set dst (AddL src1 (RShiftL src2 src3)));
12094 
12095   ins_cost(1.9 * INSN_COST);
12096   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
12097 
12098   ins_encode %{
12099     __ add(as_Register($dst$$reg),
12100               as_Register($src1$$reg),
12101               as_Register($src2$$reg),
12102               Assembler::ASR,
12103               $src3$$constant & 0x3f);
12104   %}
12105 
12106   ins_pipe(ialu_reg_reg_shift);
12107 %}
12108 
12109 instruct AddI_reg_LShift_reg(iRegINoSp dst,
12110                          iRegIorL2I src1, iRegIorL2I src2,
12111                          immI src3, rFlagsReg cr) %{
12112   match(Set dst (AddI src1 (LShiftI src2 src3)));
12113 
12114   ins_cost(1.9 * INSN_COST);
12115   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
12116 
12117   ins_encode %{
12118     __ addw(as_Register($dst$$reg),
12119               as_Register($src1$$reg),
12120               as_Register($src2$$reg),
12121               Assembler::LSL,
12122               $src3$$constant & 0x1f);
12123   %}
12124 
12125   ins_pipe(ialu_reg_reg_shift);
12126 %}
12127 
12128 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
12129                          iRegL src1, iRegL src2,
12130                          immI src3, rFlagsReg cr) %{
12131   match(Set dst (AddL src1 (LShiftL src2 src3)));
12132 
12133   ins_cost(1.9 * INSN_COST);
12134   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
12135 
12136   ins_encode %{
12137     __ add(as_Register($dst$$reg),
12138               as_Register($src1$$reg),
12139               as_Register($src2$$reg),
12140               Assembler::LSL,
12141               $src3$$constant & 0x3f);
12142   %}
12143 
12144   ins_pipe(ialu_reg_reg_shift);
12145 %}
12146 
12147 instruct SubI_reg_URShift_reg(iRegINoSp dst,
12148                          iRegIorL2I src1, iRegIorL2I src2,
12149                          immI src3, rFlagsReg cr) %{
12150   match(Set dst (SubI src1 (URShiftI src2 src3)));
12151 
12152   ins_cost(1.9 * INSN_COST);
12153   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
12154 
12155   ins_encode %{
12156     __ subw(as_Register($dst$$reg),
12157               as_Register($src1$$reg),
12158               as_Register($src2$$reg),
12159               Assembler::LSR,
12160               $src3$$constant & 0x1f);
12161   %}
12162 
12163   ins_pipe(ialu_reg_reg_shift);
12164 %}
12165 
12166 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
12167                          iRegL src1, iRegL src2,
12168                          immI src3, rFlagsReg cr) %{
12169   match(Set dst (SubL src1 (URShiftL src2 src3)));
12170 
12171   ins_cost(1.9 * INSN_COST);
12172   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
12173 
12174   ins_encode %{
12175     __ sub(as_Register($dst$$reg),
12176               as_Register($src1$$reg),
12177               as_Register($src2$$reg),
12178               Assembler::LSR,
12179               $src3$$constant & 0x3f);
12180   %}
12181 
12182   ins_pipe(ialu_reg_reg_shift);
12183 %}
12184 
12185 instruct SubI_reg_RShift_reg(iRegINoSp dst,
12186                          iRegIorL2I src1, iRegIorL2I src2,
12187                          immI src3, rFlagsReg cr) %{
12188   match(Set dst (SubI src1 (RShiftI src2 src3)));
12189 
12190   ins_cost(1.9 * INSN_COST);
12191   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
12192 
12193   ins_encode %{
12194     __ subw(as_Register($dst$$reg),
12195               as_Register($src1$$reg),
12196               as_Register($src2$$reg),
12197               Assembler::ASR,
12198               $src3$$constant & 0x1f);
12199   %}
12200 
12201   ins_pipe(ialu_reg_reg_shift);
12202 %}
12203 
12204 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
12205                          iRegL src1, iRegL src2,
12206                          immI src3, rFlagsReg cr) %{
12207   match(Set dst (SubL src1 (RShiftL src2 src3)));
12208 
12209   ins_cost(1.9 * INSN_COST);
12210   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
12211 
12212   ins_encode %{
12213     __ sub(as_Register($dst$$reg),
12214               as_Register($src1$$reg),
12215               as_Register($src2$$reg),
12216               Assembler::ASR,
12217               $src3$$constant & 0x3f);
12218   %}
12219 
12220   ins_pipe(ialu_reg_reg_shift);
12221 %}
12222 
12223 instruct SubI_reg_LShift_reg(iRegINoSp dst,
12224                          iRegIorL2I src1, iRegIorL2I src2,
12225                          immI src3, rFlagsReg cr) %{
12226   match(Set dst (SubI src1 (LShiftI src2 src3)));
12227 
12228   ins_cost(1.9 * INSN_COST);
12229   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
12230 
12231   ins_encode %{
12232     __ subw(as_Register($dst$$reg),
12233               as_Register($src1$$reg),
12234               as_Register($src2$$reg),
12235               Assembler::LSL,
12236               $src3$$constant & 0x1f);
12237   %}
12238 
12239   ins_pipe(ialu_reg_reg_shift);
12240 %}
12241 
12242 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
12243                          iRegL src1, iRegL src2,
12244                          immI src3, rFlagsReg cr) %{
12245   match(Set dst (SubL src1 (LShiftL src2 src3)));
12246 
12247   ins_cost(1.9 * INSN_COST);
12248   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
12249 
12250   ins_encode %{
12251     __ sub(as_Register($dst$$reg),
12252               as_Register($src1$$reg),
12253               as_Register($src2$$reg),
12254               Assembler::LSL,
12255               $src3$$constant & 0x3f);
12256   %}
12257 
12258   ins_pipe(ialu_reg_reg_shift);
12259 %}
12260 
12261 
12262 
12263 // Shift Left followed by Shift Right.
12264 // This idiom is used by the compiler for the i2b bytecode etc.
12265 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12266 %{
12267   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
12268   // Make sure we are not going to exceed what sbfm can do.
12269   predicate((unsigned int)n->in(2)->get_int() <= 63
12270             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
12271 
12272   ins_cost(INSN_COST * 2);
12273   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12274   ins_encode %{
12275     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12276     int s = 63 - lshift;
12277     int r = (rshift - lshift) & 63;
12278     __ sbfm(as_Register($dst$$reg),
12279             as_Register($src$$reg),
12280             r, s);
12281   %}
12282 
12283   ins_pipe(ialu_reg_shift);
12284 %}
12285 
12286 // Shift Left followed by Shift Right.
12287 // This idiom is used by the compiler for the i2b bytecode etc.
12288 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12289 %{
12290   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
12291   // Make sure we are not going to exceed what sbfmw can do.
12292   predicate((unsigned int)n->in(2)->get_int() <= 31
12293             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
12294 
12295   ins_cost(INSN_COST * 2);
12296   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12297   ins_encode %{
12298     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12299     int s = 31 - lshift;
12300     int r = (rshift - lshift) & 31;
12301     __ sbfmw(as_Register($dst$$reg),
12302             as_Register($src$$reg),
12303             r, s);
12304   %}
12305 
12306   ins_pipe(ialu_reg_shift);
12307 %}
12308 
12309 // Shift Left followed by Shift Right.
12310 // This idiom is used by the compiler for the i2b bytecode etc.
12311 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12312 %{
12313   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
12314   // Make sure we are not going to exceed what ubfm can do.
12315   predicate((unsigned int)n->in(2)->get_int() <= 63
12316             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
12317 
12318   ins_cost(INSN_COST * 2);
12319   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12320   ins_encode %{
12321     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12322     int s = 63 - lshift;
12323     int r = (rshift - lshift) & 63;
12324     __ ubfm(as_Register($dst$$reg),
12325             as_Register($src$$reg),
12326             r, s);
12327   %}
12328 
12329   ins_pipe(ialu_reg_shift);
12330 %}
12331 
12332 // Shift Left followed by Shift Right.
12333 // This idiom is used by the compiler for the i2b bytecode etc.
12334 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12335 %{
12336   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
12337   // Make sure we are not going to exceed what ubfmw can do.
12338   predicate((unsigned int)n->in(2)->get_int() <= 31
12339             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
12340 
12341   ins_cost(INSN_COST * 2);
12342   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12343   ins_encode %{
12344     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12345     int s = 31 - lshift;
12346     int r = (rshift - lshift) & 31;
12347     __ ubfmw(as_Register($dst$$reg),
12348             as_Register($src$$reg),
12349             r, s);
12350   %}
12351 
12352   ins_pipe(ialu_reg_shift);
12353 %}
12354 // Bitfield extract with shift & mask
12355 
12356 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12357 %{
12358   match(Set dst (AndI (URShiftI src rshift) mask));
12359 
12360   ins_cost(INSN_COST);
12361   format %{ "ubfxw $dst, $src, $mask" %}
12362   ins_encode %{
12363     int rshift = $rshift$$constant;
12364     long mask = $mask$$constant;
12365     int width = exact_log2(mask+1);
12366     __ ubfxw(as_Register($dst$$reg),
12367             as_Register($src$$reg), rshift, width);
12368   %}
12369   ins_pipe(ialu_reg_shift);
12370 %}
12371 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
12372 %{
12373   match(Set dst (AndL (URShiftL src rshift) mask));
12374 
12375   ins_cost(INSN_COST);
12376   format %{ "ubfx $dst, $src, $mask" %}
12377   ins_encode %{
12378     int rshift = $rshift$$constant;
12379     long mask = $mask$$constant;
12380     int width = exact_log2(mask+1);
12381     __ ubfx(as_Register($dst$$reg),
12382             as_Register($src$$reg), rshift, width);
12383   %}
12384   ins_pipe(ialu_reg_shift);
12385 %}
12386 
12387 // We can use ubfx when extending an And with a mask when we know mask
12388 // is positive.  We know that because immI_bitmask guarantees it.
12389 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12390 %{
12391   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
12392 
12393   ins_cost(INSN_COST * 2);
12394   format %{ "ubfx $dst, $src, $mask" %}
12395   ins_encode %{
12396     int rshift = $rshift$$constant;
12397     long mask = $mask$$constant;
12398     int width = exact_log2(mask+1);
12399     __ ubfx(as_Register($dst$$reg),
12400             as_Register($src$$reg), rshift, width);
12401   %}
12402   ins_pipe(ialu_reg_shift);
12403 %}
12404 
12405 // Rotations
12406 
12407 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12408 %{
12409   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12410   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
12411 
12412   ins_cost(INSN_COST);
12413   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12414 
12415   ins_encode %{
12416     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12417             $rshift$$constant & 63);
12418   %}
12419   ins_pipe(ialu_reg_reg_extr);
12420 %}
12421 
12422 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12423 %{
12424   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12425   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
12426 
12427   ins_cost(INSN_COST);
12428   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12429 
12430   ins_encode %{
12431     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12432             $rshift$$constant & 31);
12433   %}
12434   ins_pipe(ialu_reg_reg_extr);
12435 %}
12436 
12437 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12438 %{
12439   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12440   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
12441 
12442   ins_cost(INSN_COST);
12443   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12444 
12445   ins_encode %{
12446     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12447             $rshift$$constant & 63);
12448   %}
12449   ins_pipe(ialu_reg_reg_extr);
12450 %}
12451 
12452 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12453 %{
12454   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12455   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
12456 
12457   ins_cost(INSN_COST);
12458   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12459 
12460   ins_encode %{
12461     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12462             $rshift$$constant & 31);
12463   %}
12464   ins_pipe(ialu_reg_reg_extr);
12465 %}
12466 
12467 
12468 // rol expander
12469 
12470 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12471 %{
12472   effect(DEF dst, USE src, USE shift);
12473 
12474   format %{ "rol    $dst, $src, $shift" %}
12475   ins_cost(INSN_COST * 3);
12476   ins_encode %{
12477     __ subw(rscratch1, zr, as_Register($shift$$reg));
12478     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12479             rscratch1);
12480     %}
12481   ins_pipe(ialu_reg_reg_vshift);
12482 %}
12483 
12484 // rol expander
12485 
12486 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12487 %{
12488   effect(DEF dst, USE src, USE shift);
12489 
12490   format %{ "rol    $dst, $src, $shift" %}
12491   ins_cost(INSN_COST * 3);
12492   ins_encode %{
12493     __ subw(rscratch1, zr, as_Register($shift$$reg));
12494     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12495             rscratch1);
12496     %}
12497   ins_pipe(ialu_reg_reg_vshift);
12498 %}
12499 
12500 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12501 %{
12502   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
12503 
12504   expand %{
12505     rolL_rReg(dst, src, shift, cr);
12506   %}
12507 %}
12508 
12509 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12510 %{
12511   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
12512 
12513   expand %{
12514     rolL_rReg(dst, src, shift, cr);
12515   %}
12516 %}
12517 
12518 instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12519 %{
12520   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
12521 
12522   expand %{
12523     rolI_rReg(dst, src, shift, cr);
12524   %}
12525 %}
12526 
12527 instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12528 %{
12529   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
12530 
12531   expand %{
12532     rolI_rReg(dst, src, shift, cr);
12533   %}
12534 %}
12535 
12536 // ror expander
12537 
12538 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12539 %{
12540   effect(DEF dst, USE src, USE shift);
12541 
12542   format %{ "ror    $dst, $src, $shift" %}
12543   ins_cost(INSN_COST);
12544   ins_encode %{
12545     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12546             as_Register($shift$$reg));
12547     %}
12548   ins_pipe(ialu_reg_reg_vshift);
12549 %}
12550 
12551 // ror expander
12552 
12553 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12554 %{
12555   effect(DEF dst, USE src, USE shift);
12556 
12557   format %{ "ror    $dst, $src, $shift" %}
12558   ins_cost(INSN_COST);
12559   ins_encode %{
12560     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12561             as_Register($shift$$reg));
12562     %}
12563   ins_pipe(ialu_reg_reg_vshift);
12564 %}
12565 
12566 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12567 %{
12568   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
12569 
12570   expand %{
12571     rorL_rReg(dst, src, shift, cr);
12572   %}
12573 %}
12574 
12575 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12576 %{
12577   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
12578 
12579   expand %{
12580     rorL_rReg(dst, src, shift, cr);
12581   %}
12582 %}
12583 
12584 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12585 %{
12586   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
12587 
12588   expand %{
12589     rorI_rReg(dst, src, shift, cr);
12590   %}
12591 %}
12592 
12593 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12594 %{
12595   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
12596 
12597   expand %{
12598     rorI_rReg(dst, src, shift, cr);
12599   %}
12600 %}
12601 
12602 // Add/subtract (extended)
12603 
12604 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12605 %{
12606   match(Set dst (AddL src1 (ConvI2L src2)));
12607   ins_cost(INSN_COST);
12608   format %{ "add  $dst, $src1, sxtw $src2" %}
12609 
12610    ins_encode %{
12611      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12612             as_Register($src2$$reg), ext::sxtw);
12613    %}
12614   ins_pipe(ialu_reg_reg);
12615 %};
12616 
12617 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12618 %{
12619   match(Set dst (SubL src1 (ConvI2L src2)));
12620   ins_cost(INSN_COST);
12621   format %{ "sub  $dst, $src1, sxtw $src2" %}
12622 
12623    ins_encode %{
12624      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12625             as_Register($src2$$reg), ext::sxtw);
12626    %}
12627   ins_pipe(ialu_reg_reg);
12628 %};
12629 
12630 
12631 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12632 %{
12633   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12634   ins_cost(INSN_COST);
12635   format %{ "add  $dst, $src1, sxth $src2" %}
12636 
12637    ins_encode %{
12638      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12639             as_Register($src2$$reg), ext::sxth);
12640    %}
12641   ins_pipe(ialu_reg_reg);
12642 %}
12643 
12644 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12645 %{
12646   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12647   ins_cost(INSN_COST);
12648   format %{ "add  $dst, $src1, sxtb $src2" %}
12649 
12650    ins_encode %{
12651      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12652             as_Register($src2$$reg), ext::sxtb);
12653    %}
12654   ins_pipe(ialu_reg_reg);
12655 %}
12656 
12657 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12658 %{
12659   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12660   ins_cost(INSN_COST);
12661   format %{ "add  $dst, $src1, uxtb $src2" %}
12662 
12663    ins_encode %{
12664      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12665             as_Register($src2$$reg), ext::uxtb);
12666    %}
12667   ins_pipe(ialu_reg_reg);
12668 %}
12669 
12670 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12671 %{
12672   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12673   ins_cost(INSN_COST);
12674   format %{ "add  $dst, $src1, sxth $src2" %}
12675 
12676    ins_encode %{
12677      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12678             as_Register($src2$$reg), ext::sxth);
12679    %}
12680   ins_pipe(ialu_reg_reg);
12681 %}
12682 
12683 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12684 %{
12685   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12686   ins_cost(INSN_COST);
12687   format %{ "add  $dst, $src1, sxtw $src2" %}
12688 
12689    ins_encode %{
12690      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12691             as_Register($src2$$reg), ext::sxtw);
12692    %}
12693   ins_pipe(ialu_reg_reg);
12694 %}
12695 
12696 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12697 %{
12698   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12699   ins_cost(INSN_COST);
12700   format %{ "add  $dst, $src1, sxtb $src2" %}
12701 
12702    ins_encode %{
12703      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12704             as_Register($src2$$reg), ext::sxtb);
12705    %}
12706   ins_pipe(ialu_reg_reg);
12707 %}
12708 
12709 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12710 %{
12711   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
12712   ins_cost(INSN_COST);
12713   format %{ "add  $dst, $src1, uxtb $src2" %}
12714 
12715    ins_encode %{
12716      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12717             as_Register($src2$$reg), ext::uxtb);
12718    %}
12719   ins_pipe(ialu_reg_reg);
12720 %}
12721 
12722 
12723 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12724 %{
12725   match(Set dst (AddI src1 (AndI src2 mask)));
12726   ins_cost(INSN_COST);
12727   format %{ "addw  $dst, $src1, $src2, uxtb" %}
12728 
12729    ins_encode %{
12730      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12731             as_Register($src2$$reg), ext::uxtb);
12732    %}
12733   ins_pipe(ialu_reg_reg);
12734 %}
12735 
12736 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12737 %{
12738   match(Set dst (AddI src1 (AndI src2 mask)));
12739   ins_cost(INSN_COST);
12740   format %{ "addw  $dst, $src1, $src2, uxth" %}
12741 
12742    ins_encode %{
12743      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12744             as_Register($src2$$reg), ext::uxth);
12745    %}
12746   ins_pipe(ialu_reg_reg);
12747 %}
12748 
12749 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12750 %{
12751   match(Set dst (AddL src1 (AndL src2 mask)));
12752   ins_cost(INSN_COST);
12753   format %{ "add  $dst, $src1, $src2, uxtb" %}
12754 
12755    ins_encode %{
12756      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12757             as_Register($src2$$reg), ext::uxtb);
12758    %}
12759   ins_pipe(ialu_reg_reg);
12760 %}
12761 
12762 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12763 %{
12764   match(Set dst (AddL src1 (AndL src2 mask)));
12765   ins_cost(INSN_COST);
12766   format %{ "add  $dst, $src1, $src2, uxth" %}
12767 
12768    ins_encode %{
12769      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12770             as_Register($src2$$reg), ext::uxth);
12771    %}
12772   ins_pipe(ialu_reg_reg);
12773 %}
12774 
12775 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12776 %{
12777   match(Set dst (AddL src1 (AndL src2 mask)));
12778   ins_cost(INSN_COST);
12779   format %{ "add  $dst, $src1, $src2, uxtw" %}
12780 
12781    ins_encode %{
12782      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12783             as_Register($src2$$reg), ext::uxtw);
12784    %}
12785   ins_pipe(ialu_reg_reg);
12786 %}
12787 
12788 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12789 %{
12790   match(Set dst (SubI src1 (AndI src2 mask)));
12791   ins_cost(INSN_COST);
12792   format %{ "subw  $dst, $src1, $src2, uxtb" %}
12793 
12794    ins_encode %{
12795      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12796             as_Register($src2$$reg), ext::uxtb);
12797    %}
12798   ins_pipe(ialu_reg_reg);
12799 %}
12800 
12801 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12802 %{
12803   match(Set dst (SubI src1 (AndI src2 mask)));
12804   ins_cost(INSN_COST);
12805   format %{ "subw  $dst, $src1, $src2, uxth" %}
12806 
12807    ins_encode %{
12808      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12809             as_Register($src2$$reg), ext::uxth);
12810    %}
12811   ins_pipe(ialu_reg_reg);
12812 %}
12813 
12814 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12815 %{
12816   match(Set dst (SubL src1 (AndL src2 mask)));
12817   ins_cost(INSN_COST);
12818   format %{ "sub  $dst, $src1, $src2, uxtb" %}
12819 
12820    ins_encode %{
12821      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12822             as_Register($src2$$reg), ext::uxtb);
12823    %}
12824   ins_pipe(ialu_reg_reg);
12825 %}
12826 
12827 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12828 %{
12829   match(Set dst (SubL src1 (AndL src2 mask)));
12830   ins_cost(INSN_COST);
12831   format %{ "sub  $dst, $src1, $src2, uxth" %}
12832 
12833    ins_encode %{
12834      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12835             as_Register($src2$$reg), ext::uxth);
12836    %}
12837   ins_pipe(ialu_reg_reg);
12838 %}
12839 
12840 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12841 %{
12842   match(Set dst (SubL src1 (AndL src2 mask)));
12843   ins_cost(INSN_COST);
12844   format %{ "sub  $dst, $src1, $src2, uxtw" %}
12845 
12846    ins_encode %{
12847      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12848             as_Register($src2$$reg), ext::uxtw);
12849    %}
12850   ins_pipe(ialu_reg_reg);
12851 %}
12852 
12853 // END This section of the file is automatically generated. Do not edit --------------
12854 
12855 // ============================================================================
12856 // Floating Point Arithmetic Instructions
12857 
12858 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12859   match(Set dst (AddF src1 src2));
12860 
12861   ins_cost(INSN_COST * 5);
12862   format %{ "fadds   $dst, $src1, $src2" %}
12863 
12864   ins_encode %{
12865     __ fadds(as_FloatRegister($dst$$reg),
12866              as_FloatRegister($src1$$reg),
12867              as_FloatRegister($src2$$reg));
12868   %}
12869 
12870   ins_pipe(fp_dop_reg_reg_s);
12871 %}
12872 
12873 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12874   match(Set dst (AddD src1 src2));
12875 
12876   ins_cost(INSN_COST * 5);
12877   format %{ "faddd   $dst, $src1, $src2" %}
12878 
12879   ins_encode %{
12880     __ faddd(as_FloatRegister($dst$$reg),
12881              as_FloatRegister($src1$$reg),
12882              as_FloatRegister($src2$$reg));
12883   %}
12884 
12885   ins_pipe(fp_dop_reg_reg_d);
12886 %}
12887 
12888 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12889   match(Set dst (SubF src1 src2));
12890 
12891   ins_cost(INSN_COST * 5);
12892   format %{ "fsubs   $dst, $src1, $src2" %}
12893 
12894   ins_encode %{
12895     __ fsubs(as_FloatRegister($dst$$reg),
12896              as_FloatRegister($src1$$reg),
12897              as_FloatRegister($src2$$reg));
12898   %}
12899 
12900   ins_pipe(fp_dop_reg_reg_s);
12901 %}
12902 
12903 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12904   match(Set dst (SubD src1 src2));
12905 
12906   ins_cost(INSN_COST * 5);
12907   format %{ "fsubd   $dst, $src1, $src2" %}
12908 
12909   ins_encode %{
12910     __ fsubd(as_FloatRegister($dst$$reg),
12911              as_FloatRegister($src1$$reg),
12912              as_FloatRegister($src2$$reg));
12913   %}
12914 
12915   ins_pipe(fp_dop_reg_reg_d);
12916 %}
12917 
12918 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12919   match(Set dst (MulF src1 src2));
12920 
12921   ins_cost(INSN_COST * 6);
12922   format %{ "fmuls   $dst, $src1, $src2" %}
12923 
12924   ins_encode %{
12925     __ fmuls(as_FloatRegister($dst$$reg),
12926              as_FloatRegister($src1$$reg),
12927              as_FloatRegister($src2$$reg));
12928   %}
12929 
12930   ins_pipe(fp_dop_reg_reg_s);
12931 %}
12932 
12933 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12934   match(Set dst (MulD src1 src2));
12935 
12936   ins_cost(INSN_COST * 6);
12937   format %{ "fmuld   $dst, $src1, $src2" %}
12938 
12939   ins_encode %{
12940     __ fmuld(as_FloatRegister($dst$$reg),
12941              as_FloatRegister($src1$$reg),
12942              as_FloatRegister($src2$$reg));
12943   %}
12944 
12945   ins_pipe(fp_dop_reg_reg_d);
12946 %}
12947 
12948 // We cannot use these fused mul w add/sub ops because they don't
12949 // produce the same result as the equivalent separated ops
12950 // (essentially they don't round the intermediate result). that's a
12951 // shame. leaving them here in case we can idenitfy cases where it is
12952 // legitimate to use them
12953 
12954 
12955 // instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12956 //   match(Set dst (AddF (MulF src1 src2) src3));
12957 
12958 //   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
12959 
12960 //   ins_encode %{
12961 //     __ fmadds(as_FloatRegister($dst$$reg),
12962 //              as_FloatRegister($src1$$reg),
12963 //              as_FloatRegister($src2$$reg),
12964 //              as_FloatRegister($src3$$reg));
12965 //   %}
12966 
12967 //   ins_pipe(pipe_class_default);
12968 // %}
12969 
12970 // instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12971 //   match(Set dst (AddD (MulD src1 src2) src3));
12972 
12973 //   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
12974 
12975 //   ins_encode %{
12976 //     __ fmaddd(as_FloatRegister($dst$$reg),
12977 //              as_FloatRegister($src1$$reg),
12978 //              as_FloatRegister($src2$$reg),
12979 //              as_FloatRegister($src3$$reg));
12980 //   %}
12981 
12982 //   ins_pipe(pipe_class_default);
12983 // %}
12984 
12985 // instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12986 //   match(Set dst (AddF (MulF (NegF src1) src2) src3));
12987 //   match(Set dst (AddF (NegF (MulF src1 src2)) src3));
12988 
12989 //   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
12990 
12991 //   ins_encode %{
12992 //     __ fmsubs(as_FloatRegister($dst$$reg),
12993 //               as_FloatRegister($src1$$reg),
12994 //               as_FloatRegister($src2$$reg),
12995 //              as_FloatRegister($src3$$reg));
12996 //   %}
12997 
12998 //   ins_pipe(pipe_class_default);
12999 // %}
13000 
13001 // instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13002 //   match(Set dst (AddD (MulD (NegD src1) src2) src3));
13003 //   match(Set dst (AddD (NegD (MulD src1 src2)) src3));
13004 
13005 //   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
13006 
13007 //   ins_encode %{
13008 //     __ fmsubd(as_FloatRegister($dst$$reg),
13009 //               as_FloatRegister($src1$$reg),
13010 //               as_FloatRegister($src2$$reg),
13011 //               as_FloatRegister($src3$$reg));
13012 //   %}
13013 
13014 //   ins_pipe(pipe_class_default);
13015 // %}
13016 
13017 // instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13018 //   match(Set dst (SubF (MulF (NegF src1) src2) src3));
13019 //   match(Set dst (SubF (NegF (MulF src1 src2)) src3));
13020 
13021 //   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
13022 
13023 //   ins_encode %{
13024 //     __ fnmadds(as_FloatRegister($dst$$reg),
13025 //                as_FloatRegister($src1$$reg),
13026 //                as_FloatRegister($src2$$reg),
13027 //                as_FloatRegister($src3$$reg));
13028 //   %}
13029 
13030 //   ins_pipe(pipe_class_default);
13031 // %}
13032 
13033 // instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13034 //   match(Set dst (SubD (MulD (NegD src1) src2) src3));
13035 //   match(Set dst (SubD (NegD (MulD src1 src2)) src3));
13036 
13037 //   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
13038 
13039 //   ins_encode %{
13040 //     __ fnmaddd(as_FloatRegister($dst$$reg),
13041 //                as_FloatRegister($src1$$reg),
13042 //                as_FloatRegister($src2$$reg),
13043 //                as_FloatRegister($src3$$reg));
13044 //   %}
13045 
13046 //   ins_pipe(pipe_class_default);
13047 // %}
13048 
13049 // instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
13050 //   match(Set dst (SubF (MulF src1 src2) src3));
13051 
13052 //   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
13053 
13054 //   ins_encode %{
13055 //     __ fnmsubs(as_FloatRegister($dst$$reg),
13056 //                as_FloatRegister($src1$$reg),
13057 //                as_FloatRegister($src2$$reg),
13058 //                as_FloatRegister($src3$$reg));
13059 //   %}
13060 
13061 //   ins_pipe(pipe_class_default);
13062 // %}
13063 
13064 // instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
13065 //   match(Set dst (SubD (MulD src1 src2) src3));
13066 
13067 //   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
13068 
13069 //   ins_encode %{
13070 //   // n.b. insn name should be fnmsubd
13071 //     __ fnmsub(as_FloatRegister($dst$$reg),
13072 //                as_FloatRegister($src1$$reg),
13073 //                as_FloatRegister($src2$$reg),
13074 //                as_FloatRegister($src3$$reg));
13075 //   %}
13076 
13077 //   ins_pipe(pipe_class_default);
13078 // %}
13079 
13080 
13081 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13082   match(Set dst (DivF src1  src2));
13083 
13084   ins_cost(INSN_COST * 18);
13085   format %{ "fdivs   $dst, $src1, $src2" %}
13086 
13087   ins_encode %{
13088     __ fdivs(as_FloatRegister($dst$$reg),
13089              as_FloatRegister($src1$$reg),
13090              as_FloatRegister($src2$$reg));
13091   %}
13092 
13093   ins_pipe(fp_div_s);
13094 %}
13095 
13096 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13097   match(Set dst (DivD src1  src2));
13098 
13099   ins_cost(INSN_COST * 32);
13100   format %{ "fdivd   $dst, $src1, $src2" %}
13101 
13102   ins_encode %{
13103     __ fdivd(as_FloatRegister($dst$$reg),
13104              as_FloatRegister($src1$$reg),
13105              as_FloatRegister($src2$$reg));
13106   %}
13107 
13108   ins_pipe(fp_div_d);
13109 %}
13110 
13111 instruct negF_reg_reg(vRegF dst, vRegF src) %{
13112   match(Set dst (NegF src));
13113 
13114   ins_cost(INSN_COST * 3);
13115   format %{ "fneg   $dst, $src" %}
13116 
13117   ins_encode %{
13118     __ fnegs(as_FloatRegister($dst$$reg),
13119              as_FloatRegister($src$$reg));
13120   %}
13121 
13122   ins_pipe(fp_uop_s);
13123 %}
13124 
13125 instruct negD_reg_reg(vRegD dst, vRegD src) %{
13126   match(Set dst (NegD src));
13127 
13128   ins_cost(INSN_COST * 3);
13129   format %{ "fnegd   $dst, $src" %}
13130 
13131   ins_encode %{
13132     __ fnegd(as_FloatRegister($dst$$reg),
13133              as_FloatRegister($src$$reg));
13134   %}
13135 
13136   ins_pipe(fp_uop_d);
13137 %}
13138 
13139 instruct absF_reg(vRegF dst, vRegF src) %{
13140   match(Set dst (AbsF src));
13141 
13142   ins_cost(INSN_COST * 3);
13143   format %{ "fabss   $dst, $src" %}
13144   ins_encode %{
13145     __ fabss(as_FloatRegister($dst$$reg),
13146              as_FloatRegister($src$$reg));
13147   %}
13148 
13149   ins_pipe(fp_uop_s);
13150 %}
13151 
13152 instruct absD_reg(vRegD dst, vRegD src) %{
13153   match(Set dst (AbsD src));
13154 
13155   ins_cost(INSN_COST * 3);
13156   format %{ "fabsd   $dst, $src" %}
13157   ins_encode %{
13158     __ fabsd(as_FloatRegister($dst$$reg),
13159              as_FloatRegister($src$$reg));
13160   %}
13161 
13162   ins_pipe(fp_uop_d);
13163 %}
13164 
13165 instruct sqrtD_reg(vRegD dst, vRegD src) %{
13166   match(Set dst (SqrtD src));
13167 
13168   ins_cost(INSN_COST * 50);
13169   format %{ "fsqrtd  $dst, $src" %}
13170   ins_encode %{
13171     __ fsqrtd(as_FloatRegister($dst$$reg),
13172              as_FloatRegister($src$$reg));
13173   %}
13174 
13175   ins_pipe(fp_div_s);
13176 %}
13177 
13178 instruct sqrtF_reg(vRegF dst, vRegF src) %{
13179   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
13180 
13181   ins_cost(INSN_COST * 50);
13182   format %{ "fsqrts  $dst, $src" %}
13183   ins_encode %{
13184     __ fsqrts(as_FloatRegister($dst$$reg),
13185              as_FloatRegister($src$$reg));
13186   %}
13187 
13188   ins_pipe(fp_div_d);
13189 %}
13190 
13191 // ============================================================================
13192 // Logical Instructions
13193 
13194 // Integer Logical Instructions
13195 
13196 // And Instructions
13197 
13198 
13199 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
13200   match(Set dst (AndI src1 src2));
13201 
13202   format %{ "andw  $dst, $src1, $src2\t# int" %}
13203 
13204   ins_cost(INSN_COST);
13205   ins_encode %{
13206     __ andw(as_Register($dst$$reg),
13207             as_Register($src1$$reg),
13208             as_Register($src2$$reg));
13209   %}
13210 
13211   ins_pipe(ialu_reg_reg);
13212 %}
13213 
13214 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
13215   match(Set dst (AndI src1 src2));
13216 
13217   format %{ "andsw  $dst, $src1, $src2\t# int" %}
13218 
13219   ins_cost(INSN_COST);
13220   ins_encode %{
13221     __ andw(as_Register($dst$$reg),
13222             as_Register($src1$$reg),
13223             (unsigned long)($src2$$constant));
13224   %}
13225 
13226   ins_pipe(ialu_reg_imm);
13227 %}
13228 
13229 // Or Instructions
13230 
13231 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13232   match(Set dst (OrI src1 src2));
13233 
13234   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13235 
13236   ins_cost(INSN_COST);
13237   ins_encode %{
13238     __ orrw(as_Register($dst$$reg),
13239             as_Register($src1$$reg),
13240             as_Register($src2$$reg));
13241   %}
13242 
13243   ins_pipe(ialu_reg_reg);
13244 %}
13245 
13246 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13247   match(Set dst (OrI src1 src2));
13248 
13249   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13250 
13251   ins_cost(INSN_COST);
13252   ins_encode %{
13253     __ orrw(as_Register($dst$$reg),
13254             as_Register($src1$$reg),
13255             (unsigned long)($src2$$constant));
13256   %}
13257 
13258   ins_pipe(ialu_reg_imm);
13259 %}
13260 
13261 // Xor Instructions
13262 
13263 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13264   match(Set dst (XorI src1 src2));
13265 
13266   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13267 
13268   ins_cost(INSN_COST);
13269   ins_encode %{
13270     __ eorw(as_Register($dst$$reg),
13271             as_Register($src1$$reg),
13272             as_Register($src2$$reg));
13273   %}
13274 
13275   ins_pipe(ialu_reg_reg);
13276 %}
13277 
13278 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13279   match(Set dst (XorI src1 src2));
13280 
13281   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13282 
13283   ins_cost(INSN_COST);
13284   ins_encode %{
13285     __ eorw(as_Register($dst$$reg),
13286             as_Register($src1$$reg),
13287             (unsigned long)($src2$$constant));
13288   %}
13289 
13290   ins_pipe(ialu_reg_imm);
13291 %}
13292 
13293 // Long Logical Instructions
13294 // TODO
13295 
13296 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
13297   match(Set dst (AndL src1 src2));
13298 
13299   format %{ "and  $dst, $src1, $src2\t# int" %}
13300 
13301   ins_cost(INSN_COST);
13302   ins_encode %{
13303     __ andr(as_Register($dst$$reg),
13304             as_Register($src1$$reg),
13305             as_Register($src2$$reg));
13306   %}
13307 
13308   ins_pipe(ialu_reg_reg);
13309 %}
13310 
13311 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
13312   match(Set dst (AndL src1 src2));
13313 
13314   format %{ "and  $dst, $src1, $src2\t# int" %}
13315 
13316   ins_cost(INSN_COST);
13317   ins_encode %{
13318     __ andr(as_Register($dst$$reg),
13319             as_Register($src1$$reg),
13320             (unsigned long)($src2$$constant));
13321   %}
13322 
13323   ins_pipe(ialu_reg_imm);
13324 %}
13325 
13326 // Or Instructions
13327 
13328 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13329   match(Set dst (OrL src1 src2));
13330 
13331   format %{ "orr  $dst, $src1, $src2\t# int" %}
13332 
13333   ins_cost(INSN_COST);
13334   ins_encode %{
13335     __ orr(as_Register($dst$$reg),
13336            as_Register($src1$$reg),
13337            as_Register($src2$$reg));
13338   %}
13339 
13340   ins_pipe(ialu_reg_reg);
13341 %}
13342 
13343 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13344   match(Set dst (OrL src1 src2));
13345 
13346   format %{ "orr  $dst, $src1, $src2\t# int" %}
13347 
13348   ins_cost(INSN_COST);
13349   ins_encode %{
13350     __ orr(as_Register($dst$$reg),
13351            as_Register($src1$$reg),
13352            (unsigned long)($src2$$constant));
13353   %}
13354 
13355   ins_pipe(ialu_reg_imm);
13356 %}
13357 
13358 // Xor Instructions
13359 
13360 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13361   match(Set dst (XorL src1 src2));
13362 
13363   format %{ "eor  $dst, $src1, $src2\t# int" %}
13364 
13365   ins_cost(INSN_COST);
13366   ins_encode %{
13367     __ eor(as_Register($dst$$reg),
13368            as_Register($src1$$reg),
13369            as_Register($src2$$reg));
13370   %}
13371 
13372   ins_pipe(ialu_reg_reg);
13373 %}
13374 
13375 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13376   match(Set dst (XorL src1 src2));
13377 
13378   ins_cost(INSN_COST);
13379   format %{ "eor  $dst, $src1, $src2\t# int" %}
13380 
13381   ins_encode %{
13382     __ eor(as_Register($dst$$reg),
13383            as_Register($src1$$reg),
13384            (unsigned long)($src2$$constant));
13385   %}
13386 
13387   ins_pipe(ialu_reg_imm);
13388 %}
13389 
13390 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
13391 %{
13392   match(Set dst (ConvI2L src));
13393 
13394   ins_cost(INSN_COST);
13395   format %{ "sxtw  $dst, $src\t# i2l" %}
13396   ins_encode %{
13397     __ sbfm($dst$$Register, $src$$Register, 0, 31);
13398   %}
13399   ins_pipe(ialu_reg_shift);
13400 %}
13401 
13402 // this pattern occurs in bigmath arithmetic
13403 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
13404 %{
13405   match(Set dst (AndL (ConvI2L src) mask));
13406 
13407   ins_cost(INSN_COST);
13408   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
13409   ins_encode %{
13410     __ ubfm($dst$$Register, $src$$Register, 0, 31);
13411   %}
13412 
13413   ins_pipe(ialu_reg_shift);
13414 %}
13415 
13416 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
13417   match(Set dst (ConvL2I src));
13418 
13419   ins_cost(INSN_COST);
13420   format %{ "movw  $dst, $src \t// l2i" %}
13421 
13422   ins_encode %{
13423     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
13424   %}
13425 
13426   ins_pipe(ialu_reg);
13427 %}
13428 
13429 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
13430 %{
13431   match(Set dst (Conv2B src));
13432   effect(KILL cr);
13433 
13434   format %{
13435     "cmpw $src, zr\n\t"
13436     "cset $dst, ne"
13437   %}
13438 
13439   ins_encode %{
13440     __ cmpw(as_Register($src$$reg), zr);
13441     __ cset(as_Register($dst$$reg), Assembler::NE);
13442   %}
13443 
13444   ins_pipe(ialu_reg);
13445 %}
13446 
13447 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
13448 %{
13449   match(Set dst (Conv2B src));
13450   effect(KILL cr);
13451 
13452   format %{
13453     "cmp  $src, zr\n\t"
13454     "cset $dst, ne"
13455   %}
13456 
13457   ins_encode %{
13458     __ cmp(as_Register($src$$reg), zr);
13459     __ cset(as_Register($dst$$reg), Assembler::NE);
13460   %}
13461 
13462   ins_pipe(ialu_reg);
13463 %}
13464 
13465 instruct convD2F_reg(vRegF dst, vRegD src) %{
13466   match(Set dst (ConvD2F src));
13467 
13468   ins_cost(INSN_COST * 5);
13469   format %{ "fcvtd  $dst, $src \t// d2f" %}
13470 
13471   ins_encode %{
13472     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13473   %}
13474 
13475   ins_pipe(fp_d2f);
13476 %}
13477 
13478 instruct convF2D_reg(vRegD dst, vRegF src) %{
13479   match(Set dst (ConvF2D src));
13480 
13481   ins_cost(INSN_COST * 5);
13482   format %{ "fcvts  $dst, $src \t// f2d" %}
13483 
13484   ins_encode %{
13485     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13486   %}
13487 
13488   ins_pipe(fp_f2d);
13489 %}
13490 
13491 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13492   match(Set dst (ConvF2I src));
13493 
13494   ins_cost(INSN_COST * 5);
13495   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
13496 
13497   ins_encode %{
13498     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13499   %}
13500 
13501   ins_pipe(fp_f2i);
13502 %}
13503 
13504 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
13505   match(Set dst (ConvF2L src));
13506 
13507   ins_cost(INSN_COST * 5);
13508   format %{ "fcvtzs  $dst, $src \t// f2l" %}
13509 
13510   ins_encode %{
13511     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13512   %}
13513 
13514   ins_pipe(fp_f2l);
13515 %}
13516 
13517 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
13518   match(Set dst (ConvI2F src));
13519 
13520   ins_cost(INSN_COST * 5);
13521   format %{ "scvtfws  $dst, $src \t// i2f" %}
13522 
13523   ins_encode %{
13524     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13525   %}
13526 
13527   ins_pipe(fp_i2f);
13528 %}
13529 
13530 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
13531   match(Set dst (ConvL2F src));
13532 
13533   ins_cost(INSN_COST * 5);
13534   format %{ "scvtfs  $dst, $src \t// l2f" %}
13535 
13536   ins_encode %{
13537     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13538   %}
13539 
13540   ins_pipe(fp_l2f);
13541 %}
13542 
13543 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
13544   match(Set dst (ConvD2I src));
13545 
13546   ins_cost(INSN_COST * 5);
13547   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
13548 
13549   ins_encode %{
13550     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13551   %}
13552 
13553   ins_pipe(fp_d2i);
13554 %}
13555 
13556 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13557   match(Set dst (ConvD2L src));
13558 
13559   ins_cost(INSN_COST * 5);
13560   format %{ "fcvtzd  $dst, $src \t// d2l" %}
13561 
13562   ins_encode %{
13563     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13564   %}
13565 
13566   ins_pipe(fp_d2l);
13567 %}
13568 
13569 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
13570   match(Set dst (ConvI2D src));
13571 
13572   ins_cost(INSN_COST * 5);
13573   format %{ "scvtfwd  $dst, $src \t// i2d" %}
13574 
13575   ins_encode %{
13576     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13577   %}
13578 
13579   ins_pipe(fp_i2d);
13580 %}
13581 
13582 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
13583   match(Set dst (ConvL2D src));
13584 
13585   ins_cost(INSN_COST * 5);
13586   format %{ "scvtfd  $dst, $src \t// l2d" %}
13587 
13588   ins_encode %{
13589     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13590   %}
13591 
13592   ins_pipe(fp_l2d);
13593 %}
13594 
13595 // stack <-> reg and reg <-> reg shuffles with no conversion
13596 
13597 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
13598 
13599   match(Set dst (MoveF2I src));
13600 
13601   effect(DEF dst, USE src);
13602 
13603   ins_cost(4 * INSN_COST);
13604 
13605   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
13606 
13607   ins_encode %{
13608     __ ldrw($dst$$Register, Address(sp, $src$$disp));
13609   %}
13610 
13611   ins_pipe(iload_reg_reg);
13612 
13613 %}
13614 
13615 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
13616 
13617   match(Set dst (MoveI2F src));
13618 
13619   effect(DEF dst, USE src);
13620 
13621   ins_cost(4 * INSN_COST);
13622 
13623   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
13624 
13625   ins_encode %{
13626     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13627   %}
13628 
13629   ins_pipe(pipe_class_memory);
13630 
13631 %}
13632 
13633 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
13634 
13635   match(Set dst (MoveD2L src));
13636 
13637   effect(DEF dst, USE src);
13638 
13639   ins_cost(4 * INSN_COST);
13640 
13641   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
13642 
13643   ins_encode %{
13644     __ ldr($dst$$Register, Address(sp, $src$$disp));
13645   %}
13646 
13647   ins_pipe(iload_reg_reg);
13648 
13649 %}
13650 
13651 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
13652 
13653   match(Set dst (MoveL2D src));
13654 
13655   effect(DEF dst, USE src);
13656 
13657   ins_cost(4 * INSN_COST);
13658 
13659   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
13660 
13661   ins_encode %{
13662     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13663   %}
13664 
13665   ins_pipe(pipe_class_memory);
13666 
13667 %}
13668 
13669 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
13670 
13671   match(Set dst (MoveF2I src));
13672 
13673   effect(DEF dst, USE src);
13674 
13675   ins_cost(INSN_COST);
13676 
13677   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
13678 
13679   ins_encode %{
13680     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13681   %}
13682 
13683   ins_pipe(pipe_class_memory);
13684 
13685 %}
13686 
13687 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
13688 
13689   match(Set dst (MoveI2F src));
13690 
13691   effect(DEF dst, USE src);
13692 
13693   ins_cost(INSN_COST);
13694 
13695   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
13696 
13697   ins_encode %{
13698     __ strw($src$$Register, Address(sp, $dst$$disp));
13699   %}
13700 
13701   ins_pipe(istore_reg_reg);
13702 
13703 %}
13704 
13705 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
13706 
13707   match(Set dst (MoveD2L src));
13708 
13709   effect(DEF dst, USE src);
13710 
13711   ins_cost(INSN_COST);
13712 
13713   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
13714 
13715   ins_encode %{
13716     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13717   %}
13718 
13719   ins_pipe(pipe_class_memory);
13720 
13721 %}
13722 
13723 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
13724 
13725   match(Set dst (MoveL2D src));
13726 
13727   effect(DEF dst, USE src);
13728 
13729   ins_cost(INSN_COST);
13730 
13731   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
13732 
13733   ins_encode %{
13734     __ str($src$$Register, Address(sp, $dst$$disp));
13735   %}
13736 
13737   ins_pipe(istore_reg_reg);
13738 
13739 %}
13740 
13741 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13742 
13743   match(Set dst (MoveF2I src));
13744 
13745   effect(DEF dst, USE src);
13746 
13747   ins_cost(INSN_COST);
13748 
13749   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
13750 
13751   ins_encode %{
13752     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
13753   %}
13754 
13755   ins_pipe(fp_f2i);
13756 
13757 %}
13758 
13759 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
13760 
13761   match(Set dst (MoveI2F src));
13762 
13763   effect(DEF dst, USE src);
13764 
13765   ins_cost(INSN_COST);
13766 
13767   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
13768 
13769   ins_encode %{
13770     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
13771   %}
13772 
13773   ins_pipe(fp_i2f);
13774 
13775 %}
13776 
13777 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13778 
13779   match(Set dst (MoveD2L src));
13780 
13781   effect(DEF dst, USE src);
13782 
13783   ins_cost(INSN_COST);
13784 
13785   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
13786 
13787   ins_encode %{
13788     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
13789   %}
13790 
13791   ins_pipe(fp_d2l);
13792 
13793 %}
13794 
13795 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
13796 
13797   match(Set dst (MoveL2D src));
13798 
13799   effect(DEF dst, USE src);
13800 
13801   ins_cost(INSN_COST);
13802 
13803   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
13804 
13805   ins_encode %{
13806     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
13807   %}
13808 
13809   ins_pipe(fp_l2d);
13810 
13811 %}
13812 
13813 // ============================================================================
13814 // clearing of an array
13815 
13816 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13817 %{
13818   match(Set dummy (ClearArray cnt base));
13819   effect(USE_KILL cnt, USE_KILL base);
13820 
13821   ins_cost(4 * INSN_COST);
13822   format %{ "ClearArray $cnt, $base" %}
13823 
13824   ins_encode %{
13825     __ zero_words($base$$Register, $cnt$$Register);
13826   %}
13827 
13828   ins_pipe(pipe_class_memory);
13829 %}
13830 
13831 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, iRegL_R11 tmp, Universe dummy, rFlagsReg cr)
13832 %{
13833   match(Set dummy (ClearArray cnt base));
13834   effect(USE_KILL base, TEMP tmp);
13835 
13836   ins_cost(4 * INSN_COST);
13837   format %{ "ClearArray $cnt, $base" %}
13838 
13839   ins_encode %{
13840     __ zero_words($base$$Register, (u_int64_t)$cnt$$constant);
13841   %}
13842 
13843   ins_pipe(pipe_class_memory);
13844 %}
13845 
13846 // ============================================================================
13847 // Overflow Math Instructions
13848 
13849 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13850 %{
13851   match(Set cr (OverflowAddI op1 op2));
13852 
13853   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13854   ins_cost(INSN_COST);
13855   ins_encode %{
13856     __ cmnw($op1$$Register, $op2$$Register);
13857   %}
13858 
13859   ins_pipe(icmp_reg_reg);
13860 %}
13861 
13862 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13863 %{
13864   match(Set cr (OverflowAddI op1 op2));
13865 
13866   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13867   ins_cost(INSN_COST);
13868   ins_encode %{
13869     __ cmnw($op1$$Register, $op2$$constant);
13870   %}
13871 
13872   ins_pipe(icmp_reg_imm);
13873 %}
13874 
13875 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13876 %{
13877   match(Set cr (OverflowAddL op1 op2));
13878 
13879   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13880   ins_cost(INSN_COST);
13881   ins_encode %{
13882     __ cmn($op1$$Register, $op2$$Register);
13883   %}
13884 
13885   ins_pipe(icmp_reg_reg);
13886 %}
13887 
13888 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13889 %{
13890   match(Set cr (OverflowAddL op1 op2));
13891 
13892   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13893   ins_cost(INSN_COST);
13894   ins_encode %{
13895     __ cmn($op1$$Register, $op2$$constant);
13896   %}
13897 
13898   ins_pipe(icmp_reg_imm);
13899 %}
13900 
13901 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13902 %{
13903   match(Set cr (OverflowSubI op1 op2));
13904 
13905   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13906   ins_cost(INSN_COST);
13907   ins_encode %{
13908     __ cmpw($op1$$Register, $op2$$Register);
13909   %}
13910 
13911   ins_pipe(icmp_reg_reg);
13912 %}
13913 
13914 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13915 %{
13916   match(Set cr (OverflowSubI op1 op2));
13917 
13918   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13919   ins_cost(INSN_COST);
13920   ins_encode %{
13921     __ cmpw($op1$$Register, $op2$$constant);
13922   %}
13923 
13924   ins_pipe(icmp_reg_imm);
13925 %}
13926 
13927 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13928 %{
13929   match(Set cr (OverflowSubL op1 op2));
13930 
13931   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13932   ins_cost(INSN_COST);
13933   ins_encode %{
13934     __ cmp($op1$$Register, $op2$$Register);
13935   %}
13936 
13937   ins_pipe(icmp_reg_reg);
13938 %}
13939 
13940 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13941 %{
13942   match(Set cr (OverflowSubL op1 op2));
13943 
13944   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13945   ins_cost(INSN_COST);
13946   ins_encode %{
13947     __ cmp($op1$$Register, $op2$$constant);
13948   %}
13949 
13950   ins_pipe(icmp_reg_imm);
13951 %}
13952 
13953 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
13954 %{
13955   match(Set cr (OverflowSubI zero op1));
13956 
13957   format %{ "cmpw  zr, $op1\t# overflow check int" %}
13958   ins_cost(INSN_COST);
13959   ins_encode %{
13960     __ cmpw(zr, $op1$$Register);
13961   %}
13962 
13963   ins_pipe(icmp_reg_imm);
13964 %}
13965 
13966 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
13967 %{
13968   match(Set cr (OverflowSubL zero op1));
13969 
13970   format %{ "cmp   zr, $op1\t# overflow check long" %}
13971   ins_cost(INSN_COST);
13972   ins_encode %{
13973     __ cmp(zr, $op1$$Register);
13974   %}
13975 
13976   ins_pipe(icmp_reg_imm);
13977 %}
13978 
13979 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13980 %{
13981   match(Set cr (OverflowMulI op1 op2));
13982 
13983   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13984             "cmp   rscratch1, rscratch1, sxtw\n\t"
13985             "movw  rscratch1, #0x80000000\n\t"
13986             "cselw rscratch1, rscratch1, zr, NE\n\t"
13987             "cmpw  rscratch1, #1" %}
13988   ins_cost(5 * INSN_COST);
13989   ins_encode %{
13990     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13991     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13992     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13993     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13994     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13995   %}
13996 
13997   ins_pipe(pipe_slow);
13998 %}
13999 
14000 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
14001 %{
14002   match(If cmp (OverflowMulI op1 op2));
14003   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14004             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14005   effect(USE labl, KILL cr);
14006 
14007   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14008             "cmp   rscratch1, rscratch1, sxtw\n\t"
14009             "b$cmp   $labl" %}
14010   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
14011   ins_encode %{
14012     Label* L = $labl$$label;
14013     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14014     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14015     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14016     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14017   %}
14018 
14019   ins_pipe(pipe_serial);
14020 %}
14021 
14022 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14023 %{
14024   match(Set cr (OverflowMulL op1 op2));
14025 
14026   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
14027             "smulh rscratch2, $op1, $op2\n\t"
14028             "cmp   rscratch2, rscratch1, ASR #31\n\t"
14029             "movw  rscratch1, #0x80000000\n\t"
14030             "cselw rscratch1, rscratch1, zr, NE\n\t"
14031             "cmpw  rscratch1, #1" %}
14032   ins_cost(6 * INSN_COST);
14033   ins_encode %{
14034     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14035     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14036     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
14037     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
14038     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
14039     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
14040   %}
14041 
14042   ins_pipe(pipe_slow);
14043 %}
14044 
14045 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
14046 %{
14047   match(If cmp (OverflowMulL op1 op2));
14048   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14049             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14050   effect(USE labl, KILL cr);
14051 
14052   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
14053             "smulh rscratch2, $op1, $op2\n\t"
14054             "cmp   rscratch2, rscratch1, ASR #31\n\t"
14055             "b$cmp $labl" %}
14056   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
14057   ins_encode %{
14058     Label* L = $labl$$label;
14059     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14060     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14061     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14062     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
14063     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14064   %}
14065 
14066   ins_pipe(pipe_serial);
14067 %}
14068 
14069 // ============================================================================
14070 // Compare Instructions
14071 
14072 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
14073 %{
14074   match(Set cr (CmpI op1 op2));
14075 
14076   effect(DEF cr, USE op1, USE op2);
14077 
14078   ins_cost(INSN_COST);
14079   format %{ "cmpw  $op1, $op2" %}
14080 
14081   ins_encode(aarch64_enc_cmpw(op1, op2));
14082 
14083   ins_pipe(icmp_reg_reg);
14084 %}
14085 
14086 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
14087 %{
14088   match(Set cr (CmpI op1 zero));
14089 
14090   effect(DEF cr, USE op1);
14091 
14092   ins_cost(INSN_COST);
14093   format %{ "cmpw $op1, 0" %}
14094 
14095   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14096 
14097   ins_pipe(icmp_reg_imm);
14098 %}
14099 
14100 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
14101 %{
14102   match(Set cr (CmpI op1 op2));
14103 
14104   effect(DEF cr, USE op1);
14105 
14106   ins_cost(INSN_COST);
14107   format %{ "cmpw  $op1, $op2" %}
14108 
14109   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14110 
14111   ins_pipe(icmp_reg_imm);
14112 %}
14113 
14114 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
14115 %{
14116   match(Set cr (CmpI op1 op2));
14117 
14118   effect(DEF cr, USE op1);
14119 
14120   ins_cost(INSN_COST * 2);
14121   format %{ "cmpw  $op1, $op2" %}
14122 
14123   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14124 
14125   ins_pipe(icmp_reg_imm);
14126 %}
14127 
14128 // Unsigned compare Instructions; really, same as signed compare
14129 // except it should only be used to feed an If or a CMovI which takes a
14130 // cmpOpU.
14131 
14132 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
14133 %{
14134   match(Set cr (CmpU op1 op2));
14135 
14136   effect(DEF cr, USE op1, USE op2);
14137 
14138   ins_cost(INSN_COST);
14139   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14140 
14141   ins_encode(aarch64_enc_cmpw(op1, op2));
14142 
14143   ins_pipe(icmp_reg_reg);
14144 %}
14145 
14146 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
14147 %{
14148   match(Set cr (CmpU op1 zero));
14149 
14150   effect(DEF cr, USE op1);
14151 
14152   ins_cost(INSN_COST);
14153   format %{ "cmpw $op1, #0\t# unsigned" %}
14154 
14155   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14156 
14157   ins_pipe(icmp_reg_imm);
14158 %}
14159 
14160 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
14161 %{
14162   match(Set cr (CmpU op1 op2));
14163 
14164   effect(DEF cr, USE op1);
14165 
14166   ins_cost(INSN_COST);
14167   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14168 
14169   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14170 
14171   ins_pipe(icmp_reg_imm);
14172 %}
14173 
14174 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
14175 %{
14176   match(Set cr (CmpU op1 op2));
14177 
14178   effect(DEF cr, USE op1);
14179 
14180   ins_cost(INSN_COST * 2);
14181   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14182 
14183   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14184 
14185   ins_pipe(icmp_reg_imm);
14186 %}
14187 
14188 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14189 %{
14190   match(Set cr (CmpL op1 op2));
14191 
14192   effect(DEF cr, USE op1, USE op2);
14193 
14194   ins_cost(INSN_COST);
14195   format %{ "cmp  $op1, $op2" %}
14196 
14197   ins_encode(aarch64_enc_cmp(op1, op2));
14198 
14199   ins_pipe(icmp_reg_reg);
14200 %}
14201 
14202 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
14203 %{
14204   match(Set cr (CmpL op1 zero));
14205 
14206   effect(DEF cr, USE op1);
14207 
14208   ins_cost(INSN_COST);
14209   format %{ "tst  $op1" %}
14210 
14211   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14212 
14213   ins_pipe(icmp_reg_imm);
14214 %}
14215 
14216 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
14217 %{
14218   match(Set cr (CmpL op1 op2));
14219 
14220   effect(DEF cr, USE op1);
14221 
14222   ins_cost(INSN_COST);
14223   format %{ "cmp  $op1, $op2" %}
14224 
14225   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
14226 
14227   ins_pipe(icmp_reg_imm);
14228 %}
14229 
14230 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
14231 %{
14232   match(Set cr (CmpL op1 op2));
14233 
14234   effect(DEF cr, USE op1);
14235 
14236   ins_cost(INSN_COST * 2);
14237   format %{ "cmp  $op1, $op2" %}
14238 
14239   ins_encode(aarch64_enc_cmp_imm(op1, op2));
14240 
14241   ins_pipe(icmp_reg_imm);
14242 %}
14243 
14244 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
14245 %{
14246   match(Set cr (CmpUL op1 op2));
14247 
14248   effect(DEF cr, USE op1, USE op2);
14249 
14250   ins_cost(INSN_COST);
14251   format %{ "cmp  $op1, $op2" %}
14252 
14253   ins_encode(aarch64_enc_cmp(op1, op2));
14254 
14255   ins_pipe(icmp_reg_reg);
14256 %}
14257 
14258 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
14259 %{
14260   match(Set cr (CmpUL op1 zero));
14261 
14262   effect(DEF cr, USE op1);
14263 
14264   ins_cost(INSN_COST);
14265   format %{ "tst  $op1" %}
14266 
14267   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14268 
14269   ins_pipe(icmp_reg_imm);
14270 %}
14271 
14272 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
14273 %{
14274   match(Set cr (CmpUL op1 op2));
14275 
14276   effect(DEF cr, USE op1);
14277 
14278   ins_cost(INSN_COST);
14279   format %{ "cmp  $op1, $op2" %}
14280 
14281   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
14282 
14283   ins_pipe(icmp_reg_imm);
14284 %}
14285 
14286 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
14287 %{
14288   match(Set cr (CmpUL op1 op2));
14289 
14290   effect(DEF cr, USE op1);
14291 
14292   ins_cost(INSN_COST * 2);
14293   format %{ "cmp  $op1, $op2" %}
14294 
14295   ins_encode(aarch64_enc_cmp_imm(op1, op2));
14296 
14297   ins_pipe(icmp_reg_imm);
14298 %}
14299 
14300 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
14301 %{
14302   match(Set cr (CmpP op1 op2));
14303 
14304   effect(DEF cr, USE op1, USE op2);
14305 
14306   ins_cost(INSN_COST);
14307   format %{ "cmp  $op1, $op2\t // ptr" %}
14308 
14309   ins_encode(aarch64_enc_cmpp(op1, op2));
14310 
14311   ins_pipe(icmp_reg_reg);
14312 %}
14313 
14314 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
14315 %{
14316   match(Set cr (CmpN op1 op2));
14317 
14318   effect(DEF cr, USE op1, USE op2);
14319 
14320   ins_cost(INSN_COST);
14321   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
14322 
14323   ins_encode(aarch64_enc_cmpn(op1, op2));
14324 
14325   ins_pipe(icmp_reg_reg);
14326 %}
14327 
14328 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
14329 %{
14330   match(Set cr (CmpP op1 zero));
14331 
14332   effect(DEF cr, USE op1, USE zero);
14333 
14334   ins_cost(INSN_COST);
14335   format %{ "cmp  $op1, 0\t // ptr" %}
14336 
14337   ins_encode(aarch64_enc_testp(op1));
14338 
14339   ins_pipe(icmp_reg_imm);
14340 %}
14341 
14342 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
14343 %{
14344   match(Set cr (CmpN op1 zero));
14345 
14346   effect(DEF cr, USE op1, USE zero);
14347 
14348   ins_cost(INSN_COST);
14349   format %{ "cmp  $op1, 0\t // compressed ptr" %}
14350 
14351   ins_encode(aarch64_enc_testn(op1));
14352 
14353   ins_pipe(icmp_reg_imm);
14354 %}
14355 
14356 // FP comparisons
14357 //
14358 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
14359 // using normal cmpOp. See declaration of rFlagsReg for details.
14360 
14361 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
14362 %{
14363   match(Set cr (CmpF src1 src2));
14364 
14365   ins_cost(3 * INSN_COST);
14366   format %{ "fcmps $src1, $src2" %}
14367 
14368   ins_encode %{
14369     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14370   %}
14371 
14372   ins_pipe(pipe_class_compare);
14373 %}
14374 
14375 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
14376 %{
14377   match(Set cr (CmpF src1 src2));
14378 
14379   ins_cost(3 * INSN_COST);
14380   format %{ "fcmps $src1, 0.0" %}
14381 
14382   ins_encode %{
14383     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
14384   %}
14385 
14386   ins_pipe(pipe_class_compare);
14387 %}
14388 // FROM HERE
14389 
14390 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
14391 %{
14392   match(Set cr (CmpD src1 src2));
14393 
14394   ins_cost(3 * INSN_COST);
14395   format %{ "fcmpd $src1, $src2" %}
14396 
14397   ins_encode %{
14398     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14399   %}
14400 
14401   ins_pipe(pipe_class_compare);
14402 %}
14403 
14404 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
14405 %{
14406   match(Set cr (CmpD src1 src2));
14407 
14408   ins_cost(3 * INSN_COST);
14409   format %{ "fcmpd $src1, 0.0" %}
14410 
14411   ins_encode %{
14412     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
14413   %}
14414 
14415   ins_pipe(pipe_class_compare);
14416 %}
14417 
14418 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
14419 %{
14420   match(Set dst (CmpF3 src1 src2));
14421   effect(KILL cr);
14422 
14423   ins_cost(5 * INSN_COST);
14424   format %{ "fcmps $src1, $src2\n\t"
14425             "csinvw($dst, zr, zr, eq\n\t"
14426             "csnegw($dst, $dst, $dst, lt)"
14427   %}
14428 
14429   ins_encode %{
14430     Label done;
14431     FloatRegister s1 = as_FloatRegister($src1$$reg);
14432     FloatRegister s2 = as_FloatRegister($src2$$reg);
14433     Register d = as_Register($dst$$reg);
14434     __ fcmps(s1, s2);
14435     // installs 0 if EQ else -1
14436     __ csinvw(d, zr, zr, Assembler::EQ);
14437     // keeps -1 if less or unordered else installs 1
14438     __ csnegw(d, d, d, Assembler::LT);
14439     __ bind(done);
14440   %}
14441 
14442   ins_pipe(pipe_class_default);
14443 
14444 %}
14445 
14446 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
14447 %{
14448   match(Set dst (CmpD3 src1 src2));
14449   effect(KILL cr);
14450 
14451   ins_cost(5 * INSN_COST);
14452   format %{ "fcmpd $src1, $src2\n\t"
14453             "csinvw($dst, zr, zr, eq\n\t"
14454             "csnegw($dst, $dst, $dst, lt)"
14455   %}
14456 
14457   ins_encode %{
14458     Label done;
14459     FloatRegister s1 = as_FloatRegister($src1$$reg);
14460     FloatRegister s2 = as_FloatRegister($src2$$reg);
14461     Register d = as_Register($dst$$reg);
14462     __ fcmpd(s1, s2);
14463     // installs 0 if EQ else -1
14464     __ csinvw(d, zr, zr, Assembler::EQ);
14465     // keeps -1 if less or unordered else installs 1
14466     __ csnegw(d, d, d, Assembler::LT);
14467     __ bind(done);
14468   %}
14469   ins_pipe(pipe_class_default);
14470 
14471 %}
14472 
14473 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
14474 %{
14475   match(Set dst (CmpF3 src1 zero));
14476   effect(KILL cr);
14477 
14478   ins_cost(5 * INSN_COST);
14479   format %{ "fcmps $src1, 0.0\n\t"
14480             "csinvw($dst, zr, zr, eq\n\t"
14481             "csnegw($dst, $dst, $dst, lt)"
14482   %}
14483 
14484   ins_encode %{
14485     Label done;
14486     FloatRegister s1 = as_FloatRegister($src1$$reg);
14487     Register d = as_Register($dst$$reg);
14488     __ fcmps(s1, 0.0D);
14489     // installs 0 if EQ else -1
14490     __ csinvw(d, zr, zr, Assembler::EQ);
14491     // keeps -1 if less or unordered else installs 1
14492     __ csnegw(d, d, d, Assembler::LT);
14493     __ bind(done);
14494   %}
14495 
14496   ins_pipe(pipe_class_default);
14497 
14498 %}
14499 
14500 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
14501 %{
14502   match(Set dst (CmpD3 src1 zero));
14503   effect(KILL cr);
14504 
14505   ins_cost(5 * INSN_COST);
14506   format %{ "fcmpd $src1, 0.0\n\t"
14507             "csinvw($dst, zr, zr, eq\n\t"
14508             "csnegw($dst, $dst, $dst, lt)"
14509   %}
14510 
14511   ins_encode %{
14512     Label done;
14513     FloatRegister s1 = as_FloatRegister($src1$$reg);
14514     Register d = as_Register($dst$$reg);
14515     __ fcmpd(s1, 0.0D);
14516     // installs 0 if EQ else -1
14517     __ csinvw(d, zr, zr, Assembler::EQ);
14518     // keeps -1 if less or unordered else installs 1
14519     __ csnegw(d, d, d, Assembler::LT);
14520     __ bind(done);
14521   %}
14522   ins_pipe(pipe_class_default);
14523 
14524 %}
14525 
14526 // Manifest a CmpL result in an integer register.
14527 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
14528 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
14529 %{
14530   match(Set dst (CmpL3 src1 src2));
14531   effect(KILL flags);
14532 
14533   ins_cost(INSN_COST * 6);
14534   format %{
14535       "cmp $src1, $src2"
14536       "csetw $dst, ne"
14537       "cnegw $dst, lt"
14538   %}
14539   // format %{ "CmpL3 $dst, $src1, $src2" %}
14540   ins_encode %{
14541     __ cmp($src1$$Register, $src2$$Register);
14542     __ csetw($dst$$Register, Assembler::NE);
14543     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
14544   %}
14545 
14546   ins_pipe(ialu_reg_reg);
14547 %}
14548 
14549 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
14550 %{
14551   match(Set dst (CmpLTMask p q));
14552   effect(KILL cr);
14553 
14554   ins_cost(3 * INSN_COST);
14555 
14556   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
14557             "csetw $dst, lt\n\t"
14558             "subw $dst, zr, $dst"
14559   %}
14560 
14561   ins_encode %{
14562     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
14563     __ csetw(as_Register($dst$$reg), Assembler::LT);
14564     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
14565   %}
14566 
14567   ins_pipe(ialu_reg_reg);
14568 %}
14569 
14570 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
14571 %{
14572   match(Set dst (CmpLTMask src zero));
14573   effect(KILL cr);
14574 
14575   ins_cost(INSN_COST);
14576 
14577   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
14578 
14579   ins_encode %{
14580     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
14581   %}
14582 
14583   ins_pipe(ialu_reg_shift);
14584 %}
14585 
14586 // ============================================================================
14587 // Max and Min
14588 
14589 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14590 %{
14591   match(Set dst (MinI src1 src2));
14592 
14593   effect(DEF dst, USE src1, USE src2, KILL cr);
14594   size(8);
14595 
14596   ins_cost(INSN_COST * 3);
14597   format %{
14598     "cmpw $src1 $src2\t signed int\n\t"
14599     "cselw $dst, $src1, $src2 lt\t"
14600   %}
14601 
14602   ins_encode %{
14603     __ cmpw(as_Register($src1$$reg),
14604             as_Register($src2$$reg));
14605     __ cselw(as_Register($dst$$reg),
14606              as_Register($src1$$reg),
14607              as_Register($src2$$reg),
14608              Assembler::LT);
14609   %}
14610 
14611   ins_pipe(ialu_reg_reg);
14612 %}
14613 // FROM HERE
14614 
14615 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14616 %{
14617   match(Set dst (MaxI src1 src2));
14618 
14619   effect(DEF dst, USE src1, USE src2, KILL cr);
14620   size(8);
14621 
14622   ins_cost(INSN_COST * 3);
14623   format %{
14624     "cmpw $src1 $src2\t signed int\n\t"
14625     "cselw $dst, $src1, $src2 gt\t"
14626   %}
14627 
14628   ins_encode %{
14629     __ cmpw(as_Register($src1$$reg),
14630             as_Register($src2$$reg));
14631     __ cselw(as_Register($dst$$reg),
14632              as_Register($src1$$reg),
14633              as_Register($src2$$reg),
14634              Assembler::GT);
14635   %}
14636 
14637   ins_pipe(ialu_reg_reg);
14638 %}
14639 
14640 // ============================================================================
14641 // Branch Instructions
14642 
14643 // Direct Branch.
14644 instruct branch(label lbl)
14645 %{
14646   match(Goto);
14647 
14648   effect(USE lbl);
14649 
14650   ins_cost(BRANCH_COST);
14651   format %{ "b  $lbl" %}
14652 
14653   ins_encode(aarch64_enc_b(lbl));
14654 
14655   ins_pipe(pipe_branch);
14656 %}
14657 
14658 // Conditional Near Branch
14659 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
14660 %{
14661   // Same match rule as `branchConFar'.
14662   match(If cmp cr);
14663 
14664   effect(USE lbl);
14665 
14666   ins_cost(BRANCH_COST);
14667   // If set to 1 this indicates that the current instruction is a
14668   // short variant of a long branch. This avoids using this
14669   // instruction in first-pass matching. It will then only be used in
14670   // the `Shorten_branches' pass.
14671   // ins_short_branch(1);
14672   format %{ "b$cmp  $lbl" %}
14673 
14674   ins_encode(aarch64_enc_br_con(cmp, lbl));
14675 
14676   ins_pipe(pipe_branch_cond);
14677 %}
14678 
14679 // Conditional Near Branch Unsigned
14680 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14681 %{
14682   // Same match rule as `branchConFar'.
14683   match(If cmp cr);
14684 
14685   effect(USE lbl);
14686 
14687   ins_cost(BRANCH_COST);
14688   // If set to 1 this indicates that the current instruction is a
14689   // short variant of a long branch. This avoids using this
14690   // instruction in first-pass matching. It will then only be used in
14691   // the `Shorten_branches' pass.
14692   // ins_short_branch(1);
14693   format %{ "b$cmp  $lbl\t# unsigned" %}
14694 
14695   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14696 
14697   ins_pipe(pipe_branch_cond);
14698 %}
14699 
14700 // Make use of CBZ and CBNZ.  These instructions, as well as being
14701 // shorter than (cmp; branch), have the additional benefit of not
14702 // killing the flags.
14703 
14704 instruct cmpI_imm0_branch(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
14705   match(If cmp (CmpI op1 op2));
14706   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14707             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14708   effect(USE labl);
14709 
14710   ins_cost(BRANCH_COST);
14711   format %{ "cbw$cmp   $op1, $labl" %}
14712   ins_encode %{
14713     Label* L = $labl$$label;
14714     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14715     if (cond == Assembler::EQ)
14716       __ cbzw($op1$$Register, *L);
14717     else
14718       __ cbnzw($op1$$Register, *L);
14719   %}
14720   ins_pipe(pipe_cmp_branch);
14721 %}
14722 
14723 instruct cmpL_imm0_branch(cmpOp cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
14724   match(If cmp (CmpL op1 op2));
14725   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14726             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14727   effect(USE labl);
14728 
14729   ins_cost(BRANCH_COST);
14730   format %{ "cb$cmp   $op1, $labl" %}
14731   ins_encode %{
14732     Label* L = $labl$$label;
14733     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14734     if (cond == Assembler::EQ)
14735       __ cbz($op1$$Register, *L);
14736     else
14737       __ cbnz($op1$$Register, *L);
14738   %}
14739   ins_pipe(pipe_cmp_branch);
14740 %}
14741 
14742 instruct cmpP_imm0_branch(cmpOp cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
14743   match(If cmp (CmpP op1 op2));
14744   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14745             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14746   effect(USE labl);
14747 
14748   ins_cost(BRANCH_COST);
14749   format %{ "cb$cmp   $op1, $labl" %}
14750   ins_encode %{
14751     Label* L = $labl$$label;
14752     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14753     if (cond == Assembler::EQ)
14754       __ cbz($op1$$Register, *L);
14755     else
14756       __ cbnz($op1$$Register, *L);
14757   %}
14758   ins_pipe(pipe_cmp_branch);
14759 %}
14760 
14761 instruct cmpN_imm0_branch(cmpOp cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
14762   match(If cmp (CmpN op1 op2));
14763   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14764             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14765   effect(USE labl);
14766 
14767   ins_cost(BRANCH_COST);
14768   format %{ "cbw$cmp   $op1, $labl" %}
14769   ins_encode %{
14770     Label* L = $labl$$label;
14771     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14772     if (cond == Assembler::EQ)
14773       __ cbzw($op1$$Register, *L);
14774     else
14775       __ cbnzw($op1$$Register, *L);
14776   %}
14777   ins_pipe(pipe_cmp_branch);
14778 %}
14779 
14780 instruct cmpP_narrowOop_imm0_branch(cmpOp cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
14781   match(If cmp (CmpP (DecodeN oop) zero));
14782   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14783             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14784   effect(USE labl);
14785 
14786   ins_cost(BRANCH_COST);
14787   format %{ "cb$cmp   $oop, $labl" %}
14788   ins_encode %{
14789     Label* L = $labl$$label;
14790     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14791     if (cond == Assembler::EQ)
14792       __ cbzw($oop$$Register, *L);
14793     else
14794       __ cbnzw($oop$$Register, *L);
14795   %}
14796   ins_pipe(pipe_cmp_branch);
14797 %}
14798 
14799 instruct cmpUI_imm0_branch(cmpOpU cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
14800   match(If cmp (CmpU op1 op2));
14801   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14802             || n->in(1)->as_Bool()->_test._test == BoolTest::eq
14803             || n->in(1)->as_Bool()->_test._test == BoolTest::gt
14804             ||  n->in(1)->as_Bool()->_test._test == BoolTest::le);
14805   effect(USE labl);
14806 
14807   ins_cost(BRANCH_COST);
14808   format %{ "cbw$cmp   $op1, $labl" %}
14809   ins_encode %{
14810     Label* L = $labl$$label;
14811     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14812     if (cond == Assembler::EQ || cond == Assembler::LS)
14813       __ cbzw($op1$$Register, *L);
14814     else
14815       __ cbnzw($op1$$Register, *L);
14816   %}
14817   ins_pipe(pipe_cmp_branch);
14818 %}
14819 
14820 instruct cmpUL_imm0_branch(cmpOpU cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
14821   match(If cmp (CmpUL op1 op2));
14822   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14823             || n->in(1)->as_Bool()->_test._test == BoolTest::eq
14824             || n->in(1)->as_Bool()->_test._test == BoolTest::gt
14825             || n->in(1)->as_Bool()->_test._test == BoolTest::le);
14826   effect(USE labl);
14827 
14828   ins_cost(BRANCH_COST);
14829   format %{ "cb$cmp   $op1, $labl" %}
14830   ins_encode %{
14831     Label* L = $labl$$label;
14832     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14833     if (cond == Assembler::EQ || cond == Assembler::LS)
14834       __ cbz($op1$$Register, *L);
14835     else
14836       __ cbnz($op1$$Register, *L);
14837   %}
14838   ins_pipe(pipe_cmp_branch);
14839 %}
14840 
14841 // Test bit and Branch
14842 
14843 // Patterns for short (< 32KiB) variants
14844 instruct cmpL_branch_sign(cmpOp cmp, iRegL op1, immL0 op2, label labl) %{
14845   match(If cmp (CmpL op1 op2));
14846   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14847             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14848   effect(USE labl);
14849 
14850   ins_cost(BRANCH_COST);
14851   format %{ "cb$cmp   $op1, $labl # long" %}
14852   ins_encode %{
14853     Label* L = $labl$$label;
14854     Assembler::Condition cond =
14855       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14856     __ tbr(cond, $op1$$Register, 63, *L);
14857   %}
14858   ins_pipe(pipe_cmp_branch);
14859   ins_short_branch(1);
14860 %}
14861 
14862 instruct cmpI_branch_sign(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14863   match(If cmp (CmpI op1 op2));
14864   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14865             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14866   effect(USE labl);
14867 
14868   ins_cost(BRANCH_COST);
14869   format %{ "cb$cmp   $op1, $labl # int" %}
14870   ins_encode %{
14871     Label* L = $labl$$label;
14872     Assembler::Condition cond =
14873       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14874     __ tbr(cond, $op1$$Register, 31, *L);
14875   %}
14876   ins_pipe(pipe_cmp_branch);
14877   ins_short_branch(1);
14878 %}
14879 
14880 instruct cmpL_branch_bit(cmpOp cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14881   match(If cmp (CmpL (AndL op1 op2) op3));
14882   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14883             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14884             && is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14885   effect(USE labl);
14886 
14887   ins_cost(BRANCH_COST);
14888   format %{ "tb$cmp   $op1, $op2, $labl" %}
14889   ins_encode %{
14890     Label* L = $labl$$label;
14891     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14892     int bit = exact_log2($op2$$constant);
14893     __ tbr(cond, $op1$$Register, bit, *L);
14894   %}
14895   ins_pipe(pipe_cmp_branch);
14896   ins_short_branch(1);
14897 %}
14898 
14899 instruct cmpI_branch_bit(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14900   match(If cmp (CmpI (AndI op1 op2) op3));
14901   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14902             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14903             && is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14904   effect(USE labl);
14905 
14906   ins_cost(BRANCH_COST);
14907   format %{ "tb$cmp   $op1, $op2, $labl" %}
14908   ins_encode %{
14909     Label* L = $labl$$label;
14910     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14911     int bit = exact_log2($op2$$constant);
14912     __ tbr(cond, $op1$$Register, bit, *L);
14913   %}
14914   ins_pipe(pipe_cmp_branch);
14915   ins_short_branch(1);
14916 %}
14917 
14918 // And far variants
14919 instruct far_cmpL_branch_sign(cmpOp cmp, iRegL op1, immL0 op2, label labl) %{
14920   match(If cmp (CmpL op1 op2));
14921   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14922             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14923   effect(USE labl);
14924 
14925   ins_cost(BRANCH_COST);
14926   format %{ "cb$cmp   $op1, $labl # long" %}
14927   ins_encode %{
14928     Label* L = $labl$$label;
14929     Assembler::Condition cond =
14930       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14931     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
14932   %}
14933   ins_pipe(pipe_cmp_branch);
14934 %}
14935 
14936 instruct far_cmpI_branch_sign(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14937   match(If cmp (CmpI op1 op2));
14938   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14939             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14940   effect(USE labl);
14941 
14942   ins_cost(BRANCH_COST);
14943   format %{ "cb$cmp   $op1, $labl # int" %}
14944   ins_encode %{
14945     Label* L = $labl$$label;
14946     Assembler::Condition cond =
14947       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14948     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
14949   %}
14950   ins_pipe(pipe_cmp_branch);
14951 %}
14952 
14953 instruct far_cmpL_branch_bit(cmpOp cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14954   match(If cmp (CmpL (AndL op1 op2) op3));
14955   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14956             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14957             && is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14958   effect(USE labl);
14959 
14960   ins_cost(BRANCH_COST);
14961   format %{ "tb$cmp   $op1, $op2, $labl" %}
14962   ins_encode %{
14963     Label* L = $labl$$label;
14964     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14965     int bit = exact_log2($op2$$constant);
14966     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14967   %}
14968   ins_pipe(pipe_cmp_branch);
14969 %}
14970 
14971 instruct far_cmpI_branch_bit(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14972   match(If cmp (CmpI (AndI op1 op2) op3));
14973   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14974             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14975             && is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14976   effect(USE labl);
14977 
14978   ins_cost(BRANCH_COST);
14979   format %{ "tb$cmp   $op1, $op2, $labl" %}
14980   ins_encode %{
14981     Label* L = $labl$$label;
14982     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14983     int bit = exact_log2($op2$$constant);
14984     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14985   %}
14986   ins_pipe(pipe_cmp_branch);
14987 %}
14988 
14989 // Test bits
14990 
14991 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
14992   match(Set cr (CmpL (AndL op1 op2) op3));
14993   predicate(Assembler::operand_valid_for_logical_immediate
14994             (/*is_32*/false, n->in(1)->in(2)->get_long()));
14995 
14996   ins_cost(INSN_COST);
14997   format %{ "tst $op1, $op2 # long" %}
14998   ins_encode %{
14999     __ tst($op1$$Register, $op2$$constant);
15000   %}
15001   ins_pipe(ialu_reg_reg);
15002 %}
15003 
15004 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
15005   match(Set cr (CmpI (AndI op1 op2) op3));
15006   predicate(Assembler::operand_valid_for_logical_immediate
15007             (/*is_32*/true, n->in(1)->in(2)->get_int()));
15008 
15009   ins_cost(INSN_COST);
15010   format %{ "tst $op1, $op2 # int" %}
15011   ins_encode %{
15012     __ tstw($op1$$Register, $op2$$constant);
15013   %}
15014   ins_pipe(ialu_reg_reg);
15015 %}
15016 
15017 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
15018   match(Set cr (CmpL (AndL op1 op2) op3));
15019 
15020   ins_cost(INSN_COST);
15021   format %{ "tst $op1, $op2 # long" %}
15022   ins_encode %{
15023     __ tst($op1$$Register, $op2$$Register);
15024   %}
15025   ins_pipe(ialu_reg_reg);
15026 %}
15027 
15028 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
15029   match(Set cr (CmpI (AndI op1 op2) op3));
15030 
15031   ins_cost(INSN_COST);
15032   format %{ "tstw $op1, $op2 # int" %}
15033   ins_encode %{
15034     __ tstw($op1$$Register, $op2$$Register);
15035   %}
15036   ins_pipe(ialu_reg_reg);
15037 %}
15038 
15039 
15040 // Conditional Far Branch
15041 // Conditional Far Branch Unsigned
15042 // TODO: fixme
15043 
15044 // counted loop end branch near
15045 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
15046 %{
15047   match(CountedLoopEnd cmp cr);
15048 
15049   effect(USE lbl);
15050 
15051   ins_cost(BRANCH_COST);
15052   // short variant.
15053   // ins_short_branch(1);
15054   format %{ "b$cmp $lbl \t// counted loop end" %}
15055 
15056   ins_encode(aarch64_enc_br_con(cmp, lbl));
15057 
15058   ins_pipe(pipe_branch);
15059 %}
15060 
15061 // counted loop end branch near Unsigned
15062 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
15063 %{
15064   match(CountedLoopEnd cmp cr);
15065 
15066   effect(USE lbl);
15067 
15068   ins_cost(BRANCH_COST);
15069   // short variant.
15070   // ins_short_branch(1);
15071   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
15072 
15073   ins_encode(aarch64_enc_br_conU(cmp, lbl));
15074 
15075   ins_pipe(pipe_branch);
15076 %}
15077 
15078 // counted loop end branch far
15079 // counted loop end branch far unsigned
15080 // TODO: fixme
15081 
15082 // ============================================================================
15083 // inlined locking and unlocking
15084 
15085 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
15086 %{
15087   match(Set cr (FastLock object box));
15088   effect(TEMP tmp, TEMP tmp2);
15089 
15090   // TODO
15091   // identify correct cost
15092   ins_cost(5 * INSN_COST);
15093   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
15094 
15095   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
15096 
15097   ins_pipe(pipe_serial);
15098 %}
15099 
15100 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
15101 %{
15102   match(Set cr (FastUnlock object box));
15103   effect(TEMP tmp, TEMP tmp2);
15104 
15105   ins_cost(5 * INSN_COST);
15106   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
15107 
15108   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
15109 
15110   ins_pipe(pipe_serial);
15111 %}
15112 
15113 
15114 // ============================================================================
15115 // Safepoint Instructions
15116 
15117 // TODO
15118 // provide a near and far version of this code
15119 
15120 instruct safePoint(iRegP poll)
15121 %{
15122   match(SafePoint poll);
15123 
15124   format %{
15125     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
15126   %}
15127   ins_encode %{
15128     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
15129   %}
15130   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
15131 %}
15132 
15133 
15134 // ============================================================================
15135 // Procedure Call/Return Instructions
15136 
15137 // Call Java Static Instruction
15138 
15139 instruct CallStaticJavaDirect(method meth)
15140 %{
15141   match(CallStaticJava);
15142 
15143   effect(USE meth);
15144 
15145   predicate(!((CallStaticJavaNode*)n)->is_method_handle_invoke());
15146 
15147   ins_cost(CALL_COST);
15148 
15149   format %{ "call,static $meth \t// ==> " %}
15150 
15151   ins_encode( aarch64_enc_java_static_call(meth),
15152               aarch64_enc_call_epilog );
15153 
15154   ins_pipe(pipe_class_call);
15155 %}
15156 
15157 // TO HERE
15158 
15159 // Call Java Static Instruction (method handle version)
15160 
15161 instruct CallStaticJavaDirectHandle(method meth, iRegP_FP reg_mh_save)
15162 %{
15163   match(CallStaticJava);
15164 
15165   effect(USE meth);
15166 
15167   predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
15168 
15169   ins_cost(CALL_COST);
15170 
15171   format %{ "call,static $meth \t// (methodhandle) ==> " %}
15172 
15173   ins_encode( aarch64_enc_java_handle_call(meth),
15174               aarch64_enc_call_epilog );
15175 
15176   ins_pipe(pipe_class_call);
15177 %}
15178 
15179 // Call Java Dynamic Instruction
15180 instruct CallDynamicJavaDirect(method meth)
15181 %{
15182   match(CallDynamicJava);
15183 
15184   effect(USE meth);
15185 
15186   ins_cost(CALL_COST);
15187 
15188   format %{ "CALL,dynamic $meth \t// ==> " %}
15189 
15190   ins_encode( aarch64_enc_java_dynamic_call(meth),
15191                aarch64_enc_call_epilog );
15192 
15193   ins_pipe(pipe_class_call);
15194 %}
15195 
15196 // Call Runtime Instruction
15197 
15198 instruct CallRuntimeDirect(method meth)
15199 %{
15200   match(CallRuntime);
15201 
15202   effect(USE meth);
15203 
15204   ins_cost(CALL_COST);
15205 
15206   format %{ "CALL, runtime $meth" %}
15207 
15208   ins_encode( aarch64_enc_java_to_runtime(meth) );
15209 
15210   ins_pipe(pipe_class_call);
15211 %}
15212 
15213 // Call Runtime Instruction
15214 
15215 instruct CallLeafDirect(method meth)
15216 %{
15217   match(CallLeaf);
15218 
15219   effect(USE meth);
15220 
15221   ins_cost(CALL_COST);
15222 
15223   format %{ "CALL, runtime leaf $meth" %}
15224 
15225   ins_encode( aarch64_enc_java_to_runtime(meth) );
15226 
15227   ins_pipe(pipe_class_call);
15228 %}
15229 
15230 // Call Runtime Instruction
15231 
15232 instruct CallLeafNoFPDirect(method meth)
15233 %{
15234   match(CallLeafNoFP);
15235 
15236   effect(USE meth);
15237 
15238   ins_cost(CALL_COST);
15239 
15240   format %{ "CALL, runtime leaf nofp $meth" %}
15241 
15242   ins_encode( aarch64_enc_java_to_runtime(meth) );
15243 
15244   ins_pipe(pipe_class_call);
15245 %}
15246 
15247 // Tail Call; Jump from runtime stub to Java code.
15248 // Also known as an 'interprocedural jump'.
15249 // Target of jump will eventually return to caller.
15250 // TailJump below removes the return address.
15251 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
15252 %{
15253   match(TailCall jump_target method_oop);
15254 
15255   ins_cost(CALL_COST);
15256 
15257   format %{ "br $jump_target\t# $method_oop holds method oop" %}
15258 
15259   ins_encode(aarch64_enc_tail_call(jump_target));
15260 
15261   ins_pipe(pipe_class_call);
15262 %}
15263 
15264 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
15265 %{
15266   match(TailJump jump_target ex_oop);
15267 
15268   ins_cost(CALL_COST);
15269 
15270   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
15271 
15272   ins_encode(aarch64_enc_tail_jmp(jump_target));
15273 
15274   ins_pipe(pipe_class_call);
15275 %}
15276 
15277 // Create exception oop: created by stack-crawling runtime code.
15278 // Created exception is now available to this handler, and is setup
15279 // just prior to jumping to this handler. No code emitted.
15280 // TODO check
15281 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
15282 instruct CreateException(iRegP_R0 ex_oop)
15283 %{
15284   match(Set ex_oop (CreateEx));
15285 
15286   format %{ " -- \t// exception oop; no code emitted" %}
15287 
15288   size(0);
15289 
15290   ins_encode( /*empty*/ );
15291 
15292   ins_pipe(pipe_class_empty);
15293 %}
15294 
15295 // Rethrow exception: The exception oop will come in the first
15296 // argument position. Then JUMP (not call) to the rethrow stub code.
15297 instruct RethrowException() %{
15298   match(Rethrow);
15299   ins_cost(CALL_COST);
15300 
15301   format %{ "b rethrow_stub" %}
15302 
15303   ins_encode( aarch64_enc_rethrow() );
15304 
15305   ins_pipe(pipe_class_call);
15306 %}
15307 
15308 
15309 // Return Instruction
15310 // epilog node loads ret address into lr as part of frame pop
15311 instruct Ret()
15312 %{
15313   match(Return);
15314 
15315   format %{ "ret\t// return register" %}
15316 
15317   ins_encode( aarch64_enc_ret() );
15318 
15319   ins_pipe(pipe_branch);
15320 %}
15321 
15322 // Die now.
15323 instruct ShouldNotReachHere() %{
15324   match(Halt);
15325 
15326   ins_cost(CALL_COST);
15327   format %{ "ShouldNotReachHere" %}
15328 
15329   ins_encode %{
15330     // TODO
15331     // implement proper trap call here
15332     __ brk(999);
15333   %}
15334 
15335   ins_pipe(pipe_class_default);
15336 %}
15337 
15338 // ============================================================================
15339 // Partial Subtype Check
15340 // 
15341 // superklass array for an instance of the superklass.  Set a hidden
15342 // internal cache on a hit (cache is checked with exposed code in
15343 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
15344 // encoding ALSO sets flags.
15345 
15346 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
15347 %{
15348   match(Set result (PartialSubtypeCheck sub super));
15349   effect(KILL cr, KILL temp);
15350 
15351   ins_cost(1100);  // slightly larger than the next version
15352   format %{ "partialSubtypeCheck $result, $sub, $super" %}
15353 
15354   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
15355 
15356   opcode(0x1); // Force zero of result reg on hit
15357 
15358   ins_pipe(pipe_class_memory);
15359 %}
15360 
15361 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
15362 %{
15363   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
15364   effect(KILL temp, KILL result);
15365 
15366   ins_cost(1100);  // slightly larger than the next version
15367   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
15368 
15369   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
15370 
15371   opcode(0x0); // Don't zero result reg on hit
15372 
15373   ins_pipe(pipe_class_memory);
15374 %}
15375 
15376 instruct string_compare(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15377                         iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
15378 %{
15379   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15380   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15381 
15382   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15383   ins_encode %{
15384     __ string_compare($str1$$Register, $str2$$Register,
15385                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15386                       $tmp1$$Register);
15387   %}
15388   ins_pipe(pipe_class_memory);
15389 %}
15390 
15391 instruct string_indexof(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15392        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15393 %{
15394   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15395   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15396          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15397   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result" %}
15398 
15399   ins_encode %{
15400     __ string_indexof($str1$$Register, $str2$$Register,
15401                       $cnt1$$Register, $cnt2$$Register,
15402                       $tmp1$$Register, $tmp2$$Register,
15403                       $tmp3$$Register, $tmp4$$Register,
15404                       -1, $result$$Register);
15405   %}
15406   ins_pipe(pipe_class_memory);
15407 %}
15408 
15409 instruct string_indexof_con(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15410                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15411                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15412 %{
15413   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15414   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15415          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15416   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result" %}
15417 
15418   ins_encode %{
15419     int icnt2 = (int)$int_cnt2$$constant;
15420     __ string_indexof($str1$$Register, $str2$$Register,
15421                       $cnt1$$Register, zr,
15422                       $tmp1$$Register, $tmp2$$Register,
15423                       $tmp3$$Register, $tmp4$$Register,
15424                       icnt2, $result$$Register);
15425   %}
15426   ins_pipe(pipe_class_memory);
15427 %}
15428 
15429 instruct string_equals(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15430                         iRegI_R0 result, iRegP_R10 tmp, rFlagsReg cr)
15431 %{
15432   match(Set result (StrEquals (Binary str1 str2) cnt));
15433   effect(KILL tmp, USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15434 
15435   format %{ "String Equals $str1,$str2,$cnt -> $result    // KILL $tmp" %}
15436   ins_encode %{
15437     __ string_equals($str1$$Register, $str2$$Register,
15438                       $cnt$$Register, $result$$Register,
15439                       $tmp$$Register);
15440   %}
15441   ins_pipe(pipe_class_memory);
15442 %}
15443 
15444 instruct array_equals(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15445                       iRegP_R10 tmp, rFlagsReg cr)
15446 %{
15447   match(Set result (AryEq ary1 ary2));
15448   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
15449 
15450   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15451   ins_encode %{
15452     __ char_arrays_equals($ary1$$Register, $ary2$$Register,
15453                           $result$$Register, $tmp$$Register);
15454   %}
15455   ins_pipe(pipe_class_memory);
15456 %}
15457 
15458 // encode char[] to byte[] in ISO_8859_1
15459 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15460                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
15461                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
15462                           iRegI_R0 result, rFlagsReg cr)
15463 %{
15464   match(Set result (EncodeISOArray src (Binary dst len)));
15465   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
15466          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
15467 
15468   format %{ "Encode array $src,$dst,$len -> $result" %}
15469   ins_encode %{
15470     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
15471          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
15472          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
15473   %}
15474   ins_pipe( pipe_class_memory );
15475 %}
15476 
15477 // ============================================================================
15478 // This name is KNOWN by the ADLC and cannot be changed.
15479 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
15480 // for this guy.
15481 instruct tlsLoadP(thread_RegP dst)
15482 %{
15483   match(Set dst (ThreadLocal));
15484 
15485   ins_cost(0);
15486 
15487   format %{ " -- \t// $dst=Thread::current(), empty" %}
15488 
15489   size(0);
15490 
15491   ins_encode( /*empty*/ );
15492 
15493   ins_pipe(pipe_class_empty);
15494 %}
15495 
15496 // ====================VECTOR INSTRUCTIONS=====================================
15497 
15498 // Load vector (32 bits)
15499 instruct loadV4(vecD dst, vmem4 mem)
15500 %{
15501   predicate(n->as_LoadVector()->memory_size() == 4);
15502   match(Set dst (LoadVector mem));
15503   ins_cost(4 * INSN_COST);
15504   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
15505   ins_encode( aarch64_enc_ldrvS(dst, mem) );
15506   ins_pipe(vload_reg_mem64);
15507 %}
15508 
15509 // Load vector (64 bits)
15510 instruct loadV8(vecD dst, vmem8 mem)
15511 %{
15512   predicate(n->as_LoadVector()->memory_size() == 8);
15513   match(Set dst (LoadVector mem));
15514   ins_cost(4 * INSN_COST);
15515   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
15516   ins_encode( aarch64_enc_ldrvD(dst, mem) );
15517   ins_pipe(vload_reg_mem64);
15518 %}
15519 
15520 // Load Vector (128 bits)
15521 instruct loadV16(vecX dst, vmem16 mem)
15522 %{
15523   predicate(n->as_LoadVector()->memory_size() == 16);
15524   match(Set dst (LoadVector mem));
15525   ins_cost(4 * INSN_COST);
15526   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
15527   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
15528   ins_pipe(vload_reg_mem128);
15529 %}
15530 
15531 // Store Vector (32 bits)
15532 instruct storeV4(vecD src, vmem4 mem)
15533 %{
15534   predicate(n->as_StoreVector()->memory_size() == 4);
15535   match(Set mem (StoreVector mem src));
15536   ins_cost(4 * INSN_COST);
15537   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
15538   ins_encode( aarch64_enc_strvS(src, mem) );
15539   ins_pipe(vstore_reg_mem64);
15540 %}
15541 
15542 // Store Vector (64 bits)
15543 instruct storeV8(vecD src, vmem8 mem)
15544 %{
15545   predicate(n->as_StoreVector()->memory_size() == 8);
15546   match(Set mem (StoreVector mem src));
15547   ins_cost(4 * INSN_COST);
15548   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
15549   ins_encode( aarch64_enc_strvD(src, mem) );
15550   ins_pipe(vstore_reg_mem64);
15551 %}
15552 
15553 // Store Vector (128 bits)
15554 instruct storeV16(vecX src, vmem16 mem)
15555 %{
15556   predicate(n->as_StoreVector()->memory_size() == 16);
15557   match(Set mem (StoreVector mem src));
15558   ins_cost(4 * INSN_COST);
15559   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
15560   ins_encode( aarch64_enc_strvQ(src, mem) );
15561   ins_pipe(vstore_reg_mem128);
15562 %}
15563 
15564 instruct replicate8B(vecD dst, iRegIorL2I src)
15565 %{
15566   predicate(n->as_Vector()->length() == 4 ||
15567             n->as_Vector()->length() == 8);
15568   match(Set dst (ReplicateB src));
15569   ins_cost(INSN_COST);
15570   format %{ "dup  $dst, $src\t# vector (8B)" %}
15571   ins_encode %{
15572     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
15573   %}
15574   ins_pipe(vdup_reg_reg64);
15575 %}
15576 
15577 instruct replicate16B(vecX dst, iRegIorL2I src)
15578 %{
15579   predicate(n->as_Vector()->length() == 16);
15580   match(Set dst (ReplicateB src));
15581   ins_cost(INSN_COST);
15582   format %{ "dup  $dst, $src\t# vector (16B)" %}
15583   ins_encode %{
15584     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
15585   %}
15586   ins_pipe(vdup_reg_reg128);
15587 %}
15588 
15589 instruct replicate8B_imm(vecD dst, immI con)
15590 %{
15591   predicate(n->as_Vector()->length() == 4 ||
15592             n->as_Vector()->length() == 8);
15593   match(Set dst (ReplicateB con));
15594   ins_cost(INSN_COST);
15595   format %{ "movi  $dst, $con\t# vector(8B)" %}
15596   ins_encode %{
15597     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
15598   %}
15599   ins_pipe(vmovi_reg_imm64);
15600 %}
15601 
15602 instruct replicate16B_imm(vecX dst, immI con)
15603 %{
15604   predicate(n->as_Vector()->length() == 16);
15605   match(Set dst (ReplicateB con));
15606   ins_cost(INSN_COST);
15607   format %{ "movi  $dst, $con\t# vector(16B)" %}
15608   ins_encode %{
15609     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
15610   %}
15611   ins_pipe(vmovi_reg_imm128);
15612 %}
15613 
15614 instruct replicate4S(vecD dst, iRegIorL2I src)
15615 %{
15616   predicate(n->as_Vector()->length() == 2 ||
15617             n->as_Vector()->length() == 4);
15618   match(Set dst (ReplicateS src));
15619   ins_cost(INSN_COST);
15620   format %{ "dup  $dst, $src\t# vector (4S)" %}
15621   ins_encode %{
15622     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
15623   %}
15624   ins_pipe(vdup_reg_reg64);
15625 %}
15626 
15627 instruct replicate8S(vecX dst, iRegIorL2I src)
15628 %{
15629   predicate(n->as_Vector()->length() == 8);
15630   match(Set dst (ReplicateS src));
15631   ins_cost(INSN_COST);
15632   format %{ "dup  $dst, $src\t# vector (8S)" %}
15633   ins_encode %{
15634     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
15635   %}
15636   ins_pipe(vdup_reg_reg128);
15637 %}
15638 
15639 instruct replicate4S_imm(vecD dst, immI con)
15640 %{
15641   predicate(n->as_Vector()->length() == 2 ||
15642             n->as_Vector()->length() == 4);
15643   match(Set dst (ReplicateS con));
15644   ins_cost(INSN_COST);
15645   format %{ "movi  $dst, $con\t# vector(4H)" %}
15646   ins_encode %{
15647     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
15648   %}
15649   ins_pipe(vmovi_reg_imm64);
15650 %}
15651 
15652 instruct replicate8S_imm(vecX dst, immI con)
15653 %{
15654   predicate(n->as_Vector()->length() == 8);
15655   match(Set dst (ReplicateS con));
15656   ins_cost(INSN_COST);
15657   format %{ "movi  $dst, $con\t# vector(8H)" %}
15658   ins_encode %{
15659     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
15660   %}
15661   ins_pipe(vmovi_reg_imm128);
15662 %}
15663 
15664 instruct replicate2I(vecD dst, iRegIorL2I src)
15665 %{
15666   predicate(n->as_Vector()->length() == 2);
15667   match(Set dst (ReplicateI src));
15668   ins_cost(INSN_COST);
15669   format %{ "dup  $dst, $src\t# vector (2I)" %}
15670   ins_encode %{
15671     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
15672   %}
15673   ins_pipe(vdup_reg_reg64);
15674 %}
15675 
15676 instruct replicate4I(vecX dst, iRegIorL2I src)
15677 %{
15678   predicate(n->as_Vector()->length() == 4);
15679   match(Set dst (ReplicateI src));
15680   ins_cost(INSN_COST);
15681   format %{ "dup  $dst, $src\t# vector (4I)" %}
15682   ins_encode %{
15683     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
15684   %}
15685   ins_pipe(vdup_reg_reg128);
15686 %}
15687 
15688 instruct replicate2I_imm(vecD dst, immI con)
15689 %{
15690   predicate(n->as_Vector()->length() == 2);
15691   match(Set dst (ReplicateI con));
15692   ins_cost(INSN_COST);
15693   format %{ "movi  $dst, $con\t# vector(2I)" %}
15694   ins_encode %{
15695     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
15696   %}
15697   ins_pipe(vmovi_reg_imm64);
15698 %}
15699 
15700 instruct replicate4I_imm(vecX dst, immI con)
15701 %{
15702   predicate(n->as_Vector()->length() == 4);
15703   match(Set dst (ReplicateI con));
15704   ins_cost(INSN_COST);
15705   format %{ "movi  $dst, $con\t# vector(4I)" %}
15706   ins_encode %{
15707     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
15708   %}
15709   ins_pipe(vmovi_reg_imm128);
15710 %}
15711 
15712 instruct replicate2L(vecX dst, iRegL src)
15713 %{
15714   predicate(n->as_Vector()->length() == 2);
15715   match(Set dst (ReplicateL src));
15716   ins_cost(INSN_COST);
15717   format %{ "dup  $dst, $src\t# vector (2L)" %}
15718   ins_encode %{
15719     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
15720   %}
15721   ins_pipe(vdup_reg_reg128);
15722 %}
15723 
15724 instruct replicate2L_zero(vecX dst, immI0 zero)
15725 %{
15726   predicate(n->as_Vector()->length() == 2);
15727   match(Set dst (ReplicateI zero));
15728   ins_cost(INSN_COST);
15729   format %{ "movi  $dst, $zero\t# vector(4I)" %}
15730   ins_encode %{
15731     __ eor(as_FloatRegister($dst$$reg), __ T16B,
15732            as_FloatRegister($dst$$reg),
15733            as_FloatRegister($dst$$reg));
15734   %}
15735   ins_pipe(vmovi_reg_imm128);
15736 %}
15737 
15738 instruct replicate2F(vecD dst, vRegF src)
15739 %{
15740   predicate(n->as_Vector()->length() == 2);
15741   match(Set dst (ReplicateF src));
15742   ins_cost(INSN_COST);
15743   format %{ "dup  $dst, $src\t# vector (2F)" %}
15744   ins_encode %{
15745     __ dup(as_FloatRegister($dst$$reg), __ T2S,
15746            as_FloatRegister($src$$reg));
15747   %}
15748   ins_pipe(vdup_reg_freg64);
15749 %}
15750 
15751 instruct replicate4F(vecX dst, vRegF src)
15752 %{
15753   predicate(n->as_Vector()->length() == 4);
15754   match(Set dst (ReplicateF src));
15755   ins_cost(INSN_COST);
15756   format %{ "dup  $dst, $src\t# vector (4F)" %}
15757   ins_encode %{
15758     __ dup(as_FloatRegister($dst$$reg), __ T4S,
15759            as_FloatRegister($src$$reg));
15760   %}
15761   ins_pipe(vdup_reg_freg128);
15762 %}
15763 
15764 instruct replicate2D(vecX dst, vRegD src)
15765 %{
15766   predicate(n->as_Vector()->length() == 2);
15767   match(Set dst (ReplicateD src));
15768   ins_cost(INSN_COST);
15769   format %{ "dup  $dst, $src\t# vector (2D)" %}
15770   ins_encode %{
15771     __ dup(as_FloatRegister($dst$$reg), __ T2D,
15772            as_FloatRegister($src$$reg));
15773   %}
15774   ins_pipe(vdup_reg_dreg128);
15775 %}
15776 
15777 // ====================VECTOR ARITHMETIC=======================================
15778 
15779 // --------------------------------- ADD --------------------------------------
15780 
15781 instruct vadd8B(vecD dst, vecD src1, vecD src2)
15782 %{
15783   predicate(n->as_Vector()->length() == 4 ||
15784             n->as_Vector()->length() == 8);
15785   match(Set dst (AddVB src1 src2));
15786   ins_cost(INSN_COST);
15787   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
15788   ins_encode %{
15789     __ addv(as_FloatRegister($dst$$reg), __ T8B,
15790             as_FloatRegister($src1$$reg),
15791             as_FloatRegister($src2$$reg));
15792   %}
15793   ins_pipe(vdop64);
15794 %}
15795 
15796 instruct vadd16B(vecX dst, vecX src1, vecX src2)
15797 %{
15798   predicate(n->as_Vector()->length() == 16);
15799   match(Set dst (AddVB src1 src2));
15800   ins_cost(INSN_COST);
15801   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
15802   ins_encode %{
15803     __ addv(as_FloatRegister($dst$$reg), __ T16B,
15804             as_FloatRegister($src1$$reg),
15805             as_FloatRegister($src2$$reg));
15806   %}
15807   ins_pipe(vdop128);
15808 %}
15809 
15810 instruct vadd4S(vecD dst, vecD src1, vecD src2)
15811 %{
15812   predicate(n->as_Vector()->length() == 2 ||
15813             n->as_Vector()->length() == 4);
15814   match(Set dst (AddVS src1 src2));
15815   ins_cost(INSN_COST);
15816   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
15817   ins_encode %{
15818     __ addv(as_FloatRegister($dst$$reg), __ T4H,
15819             as_FloatRegister($src1$$reg),
15820             as_FloatRegister($src2$$reg));
15821   %}
15822   ins_pipe(vdop64);
15823 %}
15824 
15825 instruct vadd8S(vecX dst, vecX src1, vecX src2)
15826 %{
15827   predicate(n->as_Vector()->length() == 8);
15828   match(Set dst (AddVS src1 src2));
15829   ins_cost(INSN_COST);
15830   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
15831   ins_encode %{
15832     __ addv(as_FloatRegister($dst$$reg), __ T8H,
15833             as_FloatRegister($src1$$reg),
15834             as_FloatRegister($src2$$reg));
15835   %}
15836   ins_pipe(vdop128);
15837 %}
15838 
15839 instruct vadd2I(vecD dst, vecD src1, vecD src2)
15840 %{
15841   predicate(n->as_Vector()->length() == 2);
15842   match(Set dst (AddVI src1 src2));
15843   ins_cost(INSN_COST);
15844   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
15845   ins_encode %{
15846     __ addv(as_FloatRegister($dst$$reg), __ T2S,
15847             as_FloatRegister($src1$$reg),
15848             as_FloatRegister($src2$$reg));
15849   %}
15850   ins_pipe(vdop64);
15851 %}
15852 
15853 instruct vadd4I(vecX dst, vecX src1, vecX src2)
15854 %{
15855   predicate(n->as_Vector()->length() == 4);
15856   match(Set dst (AddVI src1 src2));
15857   ins_cost(INSN_COST);
15858   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
15859   ins_encode %{
15860     __ addv(as_FloatRegister($dst$$reg), __ T4S,
15861             as_FloatRegister($src1$$reg),
15862             as_FloatRegister($src2$$reg));
15863   %}
15864   ins_pipe(vdop128);
15865 %}
15866 
15867 instruct vadd2L(vecX dst, vecX src1, vecX src2)
15868 %{
15869   predicate(n->as_Vector()->length() == 2);
15870   match(Set dst (AddVL src1 src2));
15871   ins_cost(INSN_COST);
15872   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
15873   ins_encode %{
15874     __ addv(as_FloatRegister($dst$$reg), __ T2D,
15875             as_FloatRegister($src1$$reg),
15876             as_FloatRegister($src2$$reg));
15877   %}
15878   ins_pipe(vdop128);
15879 %}
15880 
15881 instruct vadd2F(vecD dst, vecD src1, vecD src2)
15882 %{
15883   predicate(n->as_Vector()->length() == 2);
15884   match(Set dst (AddVF src1 src2));
15885   ins_cost(INSN_COST);
15886   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
15887   ins_encode %{
15888     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
15889             as_FloatRegister($src1$$reg),
15890             as_FloatRegister($src2$$reg));
15891   %}
15892   ins_pipe(vdop_fp64);
15893 %}
15894 
15895 instruct vadd4F(vecX dst, vecX src1, vecX src2)
15896 %{
15897   predicate(n->as_Vector()->length() == 4);
15898   match(Set dst (AddVF src1 src2));
15899   ins_cost(INSN_COST);
15900   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
15901   ins_encode %{
15902     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
15903             as_FloatRegister($src1$$reg),
15904             as_FloatRegister($src2$$reg));
15905   %}
15906   ins_pipe(vdop_fp128);
15907 %}
15908 
15909 instruct vadd2D(vecX dst, vecX src1, vecX src2)
15910 %{
15911   match(Set dst (AddVD src1 src2));
15912   ins_cost(INSN_COST);
15913   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
15914   ins_encode %{
15915     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
15916             as_FloatRegister($src1$$reg),
15917             as_FloatRegister($src2$$reg));
15918   %}
15919   ins_pipe(vdop_fp128);
15920 %}
15921 
15922 // --------------------------------- SUB --------------------------------------
15923 
15924 instruct vsub8B(vecD dst, vecD src1, vecD src2)
15925 %{
15926   predicate(n->as_Vector()->length() == 4 ||
15927             n->as_Vector()->length() == 8);
15928   match(Set dst (SubVB src1 src2));
15929   ins_cost(INSN_COST);
15930   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
15931   ins_encode %{
15932     __ subv(as_FloatRegister($dst$$reg), __ T8B,
15933             as_FloatRegister($src1$$reg),
15934             as_FloatRegister($src2$$reg));
15935   %}
15936   ins_pipe(vdop64);
15937 %}
15938 
15939 instruct vsub16B(vecX dst, vecX src1, vecX src2)
15940 %{
15941   predicate(n->as_Vector()->length() == 16);
15942   match(Set dst (SubVB src1 src2));
15943   ins_cost(INSN_COST);
15944   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
15945   ins_encode %{
15946     __ subv(as_FloatRegister($dst$$reg), __ T16B,
15947             as_FloatRegister($src1$$reg),
15948             as_FloatRegister($src2$$reg));
15949   %}
15950   ins_pipe(vdop128);
15951 %}
15952 
15953 instruct vsub4S(vecD dst, vecD src1, vecD src2)
15954 %{
15955   predicate(n->as_Vector()->length() == 2 ||
15956             n->as_Vector()->length() == 4);
15957   match(Set dst (SubVS src1 src2));
15958   ins_cost(INSN_COST);
15959   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
15960   ins_encode %{
15961     __ subv(as_FloatRegister($dst$$reg), __ T4H,
15962             as_FloatRegister($src1$$reg),
15963             as_FloatRegister($src2$$reg));
15964   %}
15965   ins_pipe(vdop64);
15966 %}
15967 
15968 instruct vsub8S(vecX dst, vecX src1, vecX src2)
15969 %{
15970   predicate(n->as_Vector()->length() == 8);
15971   match(Set dst (SubVS src1 src2));
15972   ins_cost(INSN_COST);
15973   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
15974   ins_encode %{
15975     __ subv(as_FloatRegister($dst$$reg), __ T8H,
15976             as_FloatRegister($src1$$reg),
15977             as_FloatRegister($src2$$reg));
15978   %}
15979   ins_pipe(vdop128);
15980 %}
15981 
15982 instruct vsub2I(vecD dst, vecD src1, vecD src2)
15983 %{
15984   predicate(n->as_Vector()->length() == 2);
15985   match(Set dst (SubVI src1 src2));
15986   ins_cost(INSN_COST);
15987   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
15988   ins_encode %{
15989     __ subv(as_FloatRegister($dst$$reg), __ T2S,
15990             as_FloatRegister($src1$$reg),
15991             as_FloatRegister($src2$$reg));
15992   %}
15993   ins_pipe(vdop64);
15994 %}
15995 
15996 instruct vsub4I(vecX dst, vecX src1, vecX src2)
15997 %{
15998   predicate(n->as_Vector()->length() == 4);
15999   match(Set dst (SubVI src1 src2));
16000   ins_cost(INSN_COST);
16001   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
16002   ins_encode %{
16003     __ subv(as_FloatRegister($dst$$reg), __ T4S,
16004             as_FloatRegister($src1$$reg),
16005             as_FloatRegister($src2$$reg));
16006   %}
16007   ins_pipe(vdop128);
16008 %}
16009 
16010 instruct vsub2L(vecX dst, vecX src1, vecX src2)
16011 %{
16012   predicate(n->as_Vector()->length() == 2);
16013   match(Set dst (SubVL src1 src2));
16014   ins_cost(INSN_COST);
16015   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
16016   ins_encode %{
16017     __ subv(as_FloatRegister($dst$$reg), __ T2D,
16018             as_FloatRegister($src1$$reg),
16019             as_FloatRegister($src2$$reg));
16020   %}
16021   ins_pipe(vdop128);
16022 %}
16023 
16024 instruct vsub2F(vecD dst, vecD src1, vecD src2)
16025 %{
16026   predicate(n->as_Vector()->length() == 2);
16027   match(Set dst (SubVF src1 src2));
16028   ins_cost(INSN_COST);
16029   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
16030   ins_encode %{
16031     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
16032             as_FloatRegister($src1$$reg),
16033             as_FloatRegister($src2$$reg));
16034   %}
16035   ins_pipe(vdop_fp64);
16036 %}
16037 
16038 instruct vsub4F(vecX dst, vecX src1, vecX src2)
16039 %{
16040   predicate(n->as_Vector()->length() == 4);
16041   match(Set dst (SubVF src1 src2));
16042   ins_cost(INSN_COST);
16043   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
16044   ins_encode %{
16045     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
16046             as_FloatRegister($src1$$reg),
16047             as_FloatRegister($src2$$reg));
16048   %}
16049   ins_pipe(vdop_fp128);
16050 %}
16051 
16052 instruct vsub2D(vecX dst, vecX src1, vecX src2)
16053 %{
16054   predicate(n->as_Vector()->length() == 2);
16055   match(Set dst (SubVD src1 src2));
16056   ins_cost(INSN_COST);
16057   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
16058   ins_encode %{
16059     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
16060             as_FloatRegister($src1$$reg),
16061             as_FloatRegister($src2$$reg));
16062   %}
16063   ins_pipe(vdop_fp128);
16064 %}
16065 
16066 // --------------------------------- MUL --------------------------------------
16067 
16068 instruct vmul4S(vecD dst, vecD src1, vecD src2)
16069 %{
16070   predicate(n->as_Vector()->length() == 2 ||
16071             n->as_Vector()->length() == 4);
16072   match(Set dst (MulVS src1 src2));
16073   ins_cost(INSN_COST);
16074   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
16075   ins_encode %{
16076     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
16077             as_FloatRegister($src1$$reg),
16078             as_FloatRegister($src2$$reg));
16079   %}
16080   ins_pipe(vmul64);
16081 %}
16082 
16083 instruct vmul8S(vecX dst, vecX src1, vecX src2)
16084 %{
16085   predicate(n->as_Vector()->length() == 8);
16086   match(Set dst (MulVS src1 src2));
16087   ins_cost(INSN_COST);
16088   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
16089   ins_encode %{
16090     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
16091             as_FloatRegister($src1$$reg),
16092             as_FloatRegister($src2$$reg));
16093   %}
16094   ins_pipe(vmul128);
16095 %}
16096 
16097 instruct vmul2I(vecD dst, vecD src1, vecD src2)
16098 %{
16099   predicate(n->as_Vector()->length() == 2);
16100   match(Set dst (MulVI src1 src2));
16101   ins_cost(INSN_COST);
16102   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
16103   ins_encode %{
16104     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
16105             as_FloatRegister($src1$$reg),
16106             as_FloatRegister($src2$$reg));
16107   %}
16108   ins_pipe(vmul64);
16109 %}
16110 
16111 instruct vmul4I(vecX dst, vecX src1, vecX src2)
16112 %{
16113   predicate(n->as_Vector()->length() == 4);
16114   match(Set dst (MulVI src1 src2));
16115   ins_cost(INSN_COST);
16116   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
16117   ins_encode %{
16118     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
16119             as_FloatRegister($src1$$reg),
16120             as_FloatRegister($src2$$reg));
16121   %}
16122   ins_pipe(vmul128);
16123 %}
16124 
16125 instruct vmul2F(vecD dst, vecD src1, vecD src2)
16126 %{
16127   predicate(n->as_Vector()->length() == 2);
16128   match(Set dst (MulVF src1 src2));
16129   ins_cost(INSN_COST);
16130   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
16131   ins_encode %{
16132     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
16133             as_FloatRegister($src1$$reg),
16134             as_FloatRegister($src2$$reg));
16135   %}
16136   ins_pipe(vmuldiv_fp64);
16137 %}
16138 
16139 instruct vmul4F(vecX dst, vecX src1, vecX src2)
16140 %{
16141   predicate(n->as_Vector()->length() == 4);
16142   match(Set dst (MulVF src1 src2));
16143   ins_cost(INSN_COST);
16144   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
16145   ins_encode %{
16146     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
16147             as_FloatRegister($src1$$reg),
16148             as_FloatRegister($src2$$reg));
16149   %}
16150   ins_pipe(vmuldiv_fp128);
16151 %}
16152 
16153 instruct vmul2D(vecX dst, vecX src1, vecX src2)
16154 %{
16155   predicate(n->as_Vector()->length() == 2);
16156   match(Set dst (MulVD src1 src2));
16157   ins_cost(INSN_COST);
16158   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
16159   ins_encode %{
16160     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
16161             as_FloatRegister($src1$$reg),
16162             as_FloatRegister($src2$$reg));
16163   %}
16164   ins_pipe(vmuldiv_fp128);
16165 %}
16166 
16167 // --------------------------------- MLA --------------------------------------
16168 
16169 instruct vmla4S(vecD dst, vecD src1, vecD src2)
16170 %{
16171   predicate(n->as_Vector()->length() == 2 ||
16172             n->as_Vector()->length() == 4);
16173   match(Set dst (AddVS dst (MulVS src1 src2)));
16174   ins_cost(INSN_COST);
16175   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
16176   ins_encode %{
16177     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
16178             as_FloatRegister($src1$$reg),
16179             as_FloatRegister($src2$$reg));
16180   %}
16181   ins_pipe(vmla64);
16182 %}
16183 
16184 instruct vmla8S(vecX dst, vecX src1, vecX src2)
16185 %{
16186   predicate(n->as_Vector()->length() == 8);
16187   match(Set dst (AddVS dst (MulVS src1 src2)));
16188   ins_cost(INSN_COST);
16189   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
16190   ins_encode %{
16191     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
16192             as_FloatRegister($src1$$reg),
16193             as_FloatRegister($src2$$reg));
16194   %}
16195   ins_pipe(vmla128);
16196 %}
16197 
16198 instruct vmla2I(vecD dst, vecD src1, vecD src2)
16199 %{
16200   predicate(n->as_Vector()->length() == 2);
16201   match(Set dst (AddVI dst (MulVI src1 src2)));
16202   ins_cost(INSN_COST);
16203   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
16204   ins_encode %{
16205     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
16206             as_FloatRegister($src1$$reg),
16207             as_FloatRegister($src2$$reg));
16208   %}
16209   ins_pipe(vmla64);
16210 %}
16211 
16212 instruct vmla4I(vecX dst, vecX src1, vecX src2)
16213 %{
16214   predicate(n->as_Vector()->length() == 4);
16215   match(Set dst (AddVI dst (MulVI src1 src2)));
16216   ins_cost(INSN_COST);
16217   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
16218   ins_encode %{
16219     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
16220             as_FloatRegister($src1$$reg),
16221             as_FloatRegister($src2$$reg));
16222   %}
16223   ins_pipe(vmla128);
16224 %}
16225 
16226 // --------------------------------- MLS --------------------------------------
16227 
16228 instruct vmls4S(vecD dst, vecD src1, vecD src2)
16229 %{
16230   predicate(n->as_Vector()->length() == 2 ||
16231             n->as_Vector()->length() == 4);
16232   match(Set dst (SubVS dst (MulVS src1 src2)));
16233   ins_cost(INSN_COST);
16234   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
16235   ins_encode %{
16236     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
16237             as_FloatRegister($src1$$reg),
16238             as_FloatRegister($src2$$reg));
16239   %}
16240   ins_pipe(vmla64);
16241 %}
16242 
16243 instruct vmls8S(vecX dst, vecX src1, vecX src2)
16244 %{
16245   predicate(n->as_Vector()->length() == 8);
16246   match(Set dst (SubVS dst (MulVS src1 src2)));
16247   ins_cost(INSN_COST);
16248   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
16249   ins_encode %{
16250     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
16251             as_FloatRegister($src1$$reg),
16252             as_FloatRegister($src2$$reg));
16253   %}
16254   ins_pipe(vmla128);
16255 %}
16256 
16257 instruct vmls2I(vecD dst, vecD src1, vecD src2)
16258 %{
16259   predicate(n->as_Vector()->length() == 2);
16260   match(Set dst (SubVI dst (MulVI src1 src2)));
16261   ins_cost(INSN_COST);
16262   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
16263   ins_encode %{
16264     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
16265             as_FloatRegister($src1$$reg),
16266             as_FloatRegister($src2$$reg));
16267   %}
16268   ins_pipe(vmla64);
16269 %}
16270 
16271 instruct vmls4I(vecX dst, vecX src1, vecX src2)
16272 %{
16273   predicate(n->as_Vector()->length() == 4);
16274   match(Set dst (SubVI dst (MulVI src1 src2)));
16275   ins_cost(INSN_COST);
16276   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
16277   ins_encode %{
16278     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
16279             as_FloatRegister($src1$$reg),
16280             as_FloatRegister($src2$$reg));
16281   %}
16282   ins_pipe(vmla128);
16283 %}
16284 
16285 // --------------------------------- DIV --------------------------------------
16286 
16287 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
16288 %{
16289   predicate(n->as_Vector()->length() == 2);
16290   match(Set dst (DivVF src1 src2));
16291   ins_cost(INSN_COST);
16292   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
16293   ins_encode %{
16294     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
16295             as_FloatRegister($src1$$reg),
16296             as_FloatRegister($src2$$reg));
16297   %}
16298   ins_pipe(vmuldiv_fp64);
16299 %}
16300 
16301 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
16302 %{
16303   predicate(n->as_Vector()->length() == 4);
16304   match(Set dst (DivVF src1 src2));
16305   ins_cost(INSN_COST);
16306   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
16307   ins_encode %{
16308     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
16309             as_FloatRegister($src1$$reg),
16310             as_FloatRegister($src2$$reg));
16311   %}
16312   ins_pipe(vmuldiv_fp128);
16313 %}
16314 
16315 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
16316 %{
16317   predicate(n->as_Vector()->length() == 2);
16318   match(Set dst (DivVD src1 src2));
16319   ins_cost(INSN_COST);
16320   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
16321   ins_encode %{
16322     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
16323             as_FloatRegister($src1$$reg),
16324             as_FloatRegister($src2$$reg));
16325   %}
16326   ins_pipe(vmuldiv_fp128);
16327 %}
16328 
16329 // --------------------------------- AND --------------------------------------
16330 
16331 instruct vand8B(vecD dst, vecD src1, vecD src2)
16332 %{
16333   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16334             n->as_Vector()->length_in_bytes() == 8);
16335   match(Set dst (AndV src1 src2));
16336   ins_cost(INSN_COST);
16337   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16338   ins_encode %{
16339     __ andr(as_FloatRegister($dst$$reg), __ T8B,
16340             as_FloatRegister($src1$$reg),
16341             as_FloatRegister($src2$$reg));
16342   %}
16343   ins_pipe(vlogical64);
16344 %}
16345 
16346 instruct vand16B(vecX dst, vecX src1, vecX src2)
16347 %{
16348   predicate(n->as_Vector()->length_in_bytes() == 16);
16349   match(Set dst (AndV src1 src2));
16350   ins_cost(INSN_COST);
16351   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
16352   ins_encode %{
16353     __ andr(as_FloatRegister($dst$$reg), __ T16B,
16354             as_FloatRegister($src1$$reg),
16355             as_FloatRegister($src2$$reg));
16356   %}
16357   ins_pipe(vlogical128);
16358 %}
16359 
16360 // --------------------------------- OR ---------------------------------------
16361 
16362 instruct vor8B(vecD dst, vecD src1, vecD src2)
16363 %{
16364   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16365             n->as_Vector()->length_in_bytes() == 8);
16366   match(Set dst (OrV src1 src2));
16367   ins_cost(INSN_COST);
16368   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16369   ins_encode %{
16370     __ orr(as_FloatRegister($dst$$reg), __ T8B,
16371             as_FloatRegister($src1$$reg),
16372             as_FloatRegister($src2$$reg));
16373   %}
16374   ins_pipe(vlogical64);
16375 %}
16376 
16377 instruct vor16B(vecX dst, vecX src1, vecX src2)
16378 %{
16379   predicate(n->as_Vector()->length_in_bytes() == 16);
16380   match(Set dst (OrV src1 src2));
16381   ins_cost(INSN_COST);
16382   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
16383   ins_encode %{
16384     __ orr(as_FloatRegister($dst$$reg), __ T16B,
16385             as_FloatRegister($src1$$reg),
16386             as_FloatRegister($src2$$reg));
16387   %}
16388   ins_pipe(vlogical128);
16389 %}
16390 
16391 // --------------------------------- XOR --------------------------------------
16392 
16393 instruct vxor8B(vecD dst, vecD src1, vecD src2)
16394 %{
16395   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16396             n->as_Vector()->length_in_bytes() == 8);
16397   match(Set dst (XorV src1 src2));
16398   ins_cost(INSN_COST);
16399   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
16400   ins_encode %{
16401     __ eor(as_FloatRegister($dst$$reg), __ T8B,
16402             as_FloatRegister($src1$$reg),
16403             as_FloatRegister($src2$$reg));
16404   %}
16405   ins_pipe(vlogical64);
16406 %}
16407 
16408 instruct vxor16B(vecX dst, vecX src1, vecX src2)
16409 %{
16410   predicate(n->as_Vector()->length_in_bytes() == 16);
16411   match(Set dst (XorV src1 src2));
16412   ins_cost(INSN_COST);
16413   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
16414   ins_encode %{
16415     __ eor(as_FloatRegister($dst$$reg), __ T16B,
16416             as_FloatRegister($src1$$reg),
16417             as_FloatRegister($src2$$reg));
16418   %}
16419   ins_pipe(vlogical128);
16420 %}
16421 
16422 // ------------------------------ Shift ---------------------------------------
16423 
16424 instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
16425   match(Set dst (LShiftCntV cnt));
16426   format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
16427   ins_encode %{
16428     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16429   %}
16430   ins_pipe(vdup_reg_reg128);
16431 %}
16432 
16433 // Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
16434 instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
16435   match(Set dst (RShiftCntV cnt));
16436   format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
16437   ins_encode %{
16438     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16439     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
16440   %}
16441   ins_pipe(vdup_reg_reg128);
16442 %}
16443 
16444 instruct vsll8B(vecD dst, vecD src, vecX shift) %{
16445   predicate(n->as_Vector()->length() == 4 ||
16446             n->as_Vector()->length() == 8);
16447   match(Set dst (LShiftVB src shift));
16448   match(Set dst (RShiftVB src shift));
16449   ins_cost(INSN_COST);
16450   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
16451   ins_encode %{
16452     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
16453             as_FloatRegister($src$$reg),
16454             as_FloatRegister($shift$$reg));
16455   %}
16456   ins_pipe(vshift64);
16457 %}
16458 
16459 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
16460   predicate(n->as_Vector()->length() == 16);
16461   match(Set dst (LShiftVB src shift));
16462   match(Set dst (RShiftVB src shift));
16463   ins_cost(INSN_COST);
16464   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
16465   ins_encode %{
16466     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
16467             as_FloatRegister($src$$reg),
16468             as_FloatRegister($shift$$reg));
16469   %}
16470   ins_pipe(vshift128);
16471 %}
16472 
16473 instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
16474   predicate(n->as_Vector()->length() == 4 ||
16475             n->as_Vector()->length() == 8);
16476   match(Set dst (URShiftVB src shift));
16477   ins_cost(INSN_COST);
16478   format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
16479   ins_encode %{
16480     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
16481             as_FloatRegister($src$$reg),
16482             as_FloatRegister($shift$$reg));
16483   %}
16484   ins_pipe(vshift64);
16485 %}
16486 
16487 instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
16488   predicate(n->as_Vector()->length() == 16);
16489   match(Set dst (URShiftVB src shift));
16490   ins_cost(INSN_COST);
16491   format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
16492   ins_encode %{
16493     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
16494             as_FloatRegister($src$$reg),
16495             as_FloatRegister($shift$$reg));
16496   %}
16497   ins_pipe(vshift128);
16498 %}
16499 
16500 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
16501   predicate(n->as_Vector()->length() == 4 ||
16502             n->as_Vector()->length() == 8);
16503   match(Set dst (LShiftVB src shift));
16504   ins_cost(INSN_COST);
16505   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
16506   ins_encode %{
16507     int sh = (int)$shift$$constant & 31;
16508     if (sh >= 8) {
16509       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16510              as_FloatRegister($src$$reg),
16511              as_FloatRegister($src$$reg));
16512     } else {
16513       __ shl(as_FloatRegister($dst$$reg), __ T8B,
16514              as_FloatRegister($src$$reg), sh);
16515     }
16516   %}
16517   ins_pipe(vshift64_imm);
16518 %}
16519 
16520 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
16521   predicate(n->as_Vector()->length() == 16);
16522   match(Set dst (LShiftVB src shift));
16523   ins_cost(INSN_COST);
16524   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
16525   ins_encode %{
16526     int sh = (int)$shift$$constant & 31;
16527     if (sh >= 8) {
16528       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16529              as_FloatRegister($src$$reg),
16530              as_FloatRegister($src$$reg));
16531     } else {
16532       __ shl(as_FloatRegister($dst$$reg), __ T16B,
16533              as_FloatRegister($src$$reg), sh);
16534     }
16535   %}
16536   ins_pipe(vshift128_imm);
16537 %}
16538 
16539 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
16540   predicate(n->as_Vector()->length() == 4 ||
16541             n->as_Vector()->length() == 8);
16542   match(Set dst (RShiftVB src shift));
16543   ins_cost(INSN_COST);
16544   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
16545   ins_encode %{
16546     int sh = (int)$shift$$constant & 31;
16547     if (sh >= 8) sh = 7;
16548     sh = -sh & 7;
16549     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
16550            as_FloatRegister($src$$reg), sh);
16551   %}
16552   ins_pipe(vshift64_imm);
16553 %}
16554 
16555 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
16556   predicate(n->as_Vector()->length() == 16);
16557   match(Set dst (RShiftVB src shift));
16558   ins_cost(INSN_COST);
16559   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
16560   ins_encode %{
16561     int sh = (int)$shift$$constant & 31;
16562     if (sh >= 8) sh = 7;
16563     sh = -sh & 7;
16564     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
16565            as_FloatRegister($src$$reg), sh);
16566   %}
16567   ins_pipe(vshift128_imm);
16568 %}
16569 
16570 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
16571   predicate(n->as_Vector()->length() == 4 ||
16572             n->as_Vector()->length() == 8);
16573   match(Set dst (URShiftVB src shift));
16574   ins_cost(INSN_COST);
16575   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
16576   ins_encode %{
16577     int sh = (int)$shift$$constant & 31;
16578     if (sh >= 8) {
16579       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16580              as_FloatRegister($src$$reg),
16581              as_FloatRegister($src$$reg));
16582     } else {
16583       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
16584              as_FloatRegister($src$$reg), -sh & 7);
16585     }
16586   %}
16587   ins_pipe(vshift64_imm);
16588 %}
16589 
16590 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
16591   predicate(n->as_Vector()->length() == 16);
16592   match(Set dst (URShiftVB src shift));
16593   ins_cost(INSN_COST);
16594   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
16595   ins_encode %{
16596     int sh = (int)$shift$$constant & 31;
16597     if (sh >= 8) {
16598       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16599              as_FloatRegister($src$$reg),
16600              as_FloatRegister($src$$reg));
16601     } else {
16602       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
16603              as_FloatRegister($src$$reg), -sh & 7);
16604     }
16605   %}
16606   ins_pipe(vshift128_imm);
16607 %}
16608 
16609 instruct vsll4S(vecD dst, vecD src, vecX shift) %{
16610   predicate(n->as_Vector()->length() == 2 ||
16611             n->as_Vector()->length() == 4);
16612   match(Set dst (LShiftVS src shift));
16613   match(Set dst (RShiftVS src shift));
16614   ins_cost(INSN_COST);
16615   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
16616   ins_encode %{
16617     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
16618             as_FloatRegister($src$$reg),
16619             as_FloatRegister($shift$$reg));
16620   %}
16621   ins_pipe(vshift64);
16622 %}
16623 
16624 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
16625   predicate(n->as_Vector()->length() == 8);
16626   match(Set dst (LShiftVS src shift));
16627   match(Set dst (RShiftVS src shift));
16628   ins_cost(INSN_COST);
16629   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
16630   ins_encode %{
16631     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
16632             as_FloatRegister($src$$reg),
16633             as_FloatRegister($shift$$reg));
16634   %}
16635   ins_pipe(vshift128);
16636 %}
16637 
16638 instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
16639   predicate(n->as_Vector()->length() == 2 ||
16640             n->as_Vector()->length() == 4);
16641   match(Set dst (URShiftVS src shift));
16642   ins_cost(INSN_COST);
16643   format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
16644   ins_encode %{
16645     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
16646             as_FloatRegister($src$$reg),
16647             as_FloatRegister($shift$$reg));
16648   %}
16649   ins_pipe(vshift64);
16650 %}
16651 
16652 instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
16653   predicate(n->as_Vector()->length() == 8);
16654   match(Set dst (URShiftVS src shift));
16655   ins_cost(INSN_COST);
16656   format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
16657   ins_encode %{
16658     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
16659             as_FloatRegister($src$$reg),
16660             as_FloatRegister($shift$$reg));
16661   %}
16662   ins_pipe(vshift128);
16663 %}
16664 
16665 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
16666   predicate(n->as_Vector()->length() == 2 ||
16667             n->as_Vector()->length() == 4);
16668   match(Set dst (LShiftVS src shift));
16669   ins_cost(INSN_COST);
16670   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
16671   ins_encode %{
16672     int sh = (int)$shift$$constant & 31;
16673     if (sh >= 16) {
16674       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16675              as_FloatRegister($src$$reg),
16676              as_FloatRegister($src$$reg));
16677     } else {
16678       __ shl(as_FloatRegister($dst$$reg), __ T4H,
16679              as_FloatRegister($src$$reg), sh);
16680     }
16681   %}
16682   ins_pipe(vshift64_imm);
16683 %}
16684 
16685 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
16686   predicate(n->as_Vector()->length() == 8);
16687   match(Set dst (LShiftVS src shift));
16688   ins_cost(INSN_COST);
16689   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
16690   ins_encode %{
16691     int sh = (int)$shift$$constant & 31;
16692     if (sh >= 16) {
16693       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16694              as_FloatRegister($src$$reg),
16695              as_FloatRegister($src$$reg));
16696     } else {
16697       __ shl(as_FloatRegister($dst$$reg), __ T8H,
16698              as_FloatRegister($src$$reg), sh);
16699     }
16700   %}
16701   ins_pipe(vshift128_imm);
16702 %}
16703 
16704 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
16705   predicate(n->as_Vector()->length() == 2 ||
16706             n->as_Vector()->length() == 4);
16707   match(Set dst (RShiftVS src shift));
16708   ins_cost(INSN_COST);
16709   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
16710   ins_encode %{
16711     int sh = (int)$shift$$constant & 31;
16712     if (sh >= 16) sh = 15;
16713     sh = -sh & 15;
16714     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
16715            as_FloatRegister($src$$reg), sh);
16716   %}
16717   ins_pipe(vshift64_imm);
16718 %}
16719 
16720 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
16721   predicate(n->as_Vector()->length() == 8);
16722   match(Set dst (RShiftVS src shift));
16723   ins_cost(INSN_COST);
16724   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
16725   ins_encode %{
16726     int sh = (int)$shift$$constant & 31;
16727     if (sh >= 16) sh = 15;
16728     sh = -sh & 15;
16729     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
16730            as_FloatRegister($src$$reg), sh);
16731   %}
16732   ins_pipe(vshift128_imm);
16733 %}
16734 
16735 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
16736   predicate(n->as_Vector()->length() == 2 ||
16737             n->as_Vector()->length() == 4);
16738   match(Set dst (URShiftVS src shift));
16739   ins_cost(INSN_COST);
16740   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
16741   ins_encode %{
16742     int sh = (int)$shift$$constant & 31;
16743     if (sh >= 16) {
16744       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16745              as_FloatRegister($src$$reg),
16746              as_FloatRegister($src$$reg));
16747     } else {
16748       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
16749              as_FloatRegister($src$$reg), -sh & 15);
16750     }
16751   %}
16752   ins_pipe(vshift64_imm);
16753 %}
16754 
16755 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
16756   predicate(n->as_Vector()->length() == 8);
16757   match(Set dst (URShiftVS src shift));
16758   ins_cost(INSN_COST);
16759   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
16760   ins_encode %{
16761     int sh = (int)$shift$$constant & 31;
16762     if (sh >= 16) {
16763       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16764              as_FloatRegister($src$$reg),
16765              as_FloatRegister($src$$reg));
16766     } else {
16767       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
16768              as_FloatRegister($src$$reg), -sh & 15);
16769     }
16770   %}
16771   ins_pipe(vshift128_imm);
16772 %}
16773 
16774 instruct vsll2I(vecD dst, vecD src, vecX shift) %{
16775   predicate(n->as_Vector()->length() == 2);
16776   match(Set dst (LShiftVI src shift));
16777   match(Set dst (RShiftVI src shift));
16778   ins_cost(INSN_COST);
16779   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
16780   ins_encode %{
16781     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
16782             as_FloatRegister($src$$reg),
16783             as_FloatRegister($shift$$reg));
16784   %}
16785   ins_pipe(vshift64);
16786 %}
16787 
16788 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
16789   predicate(n->as_Vector()->length() == 4);
16790   match(Set dst (LShiftVI src shift));
16791   match(Set dst (RShiftVI src shift));
16792   ins_cost(INSN_COST);
16793   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
16794   ins_encode %{
16795     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
16796             as_FloatRegister($src$$reg),
16797             as_FloatRegister($shift$$reg));
16798   %}
16799   ins_pipe(vshift128);
16800 %}
16801 
16802 instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
16803   predicate(n->as_Vector()->length() == 2);
16804   match(Set dst (URShiftVI src shift));
16805   ins_cost(INSN_COST);
16806   format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
16807   ins_encode %{
16808     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
16809             as_FloatRegister($src$$reg),
16810             as_FloatRegister($shift$$reg));
16811   %}
16812   ins_pipe(vshift64);
16813 %}
16814 
16815 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
16816   predicate(n->as_Vector()->length() == 4);
16817   match(Set dst (URShiftVI src shift));
16818   ins_cost(INSN_COST);
16819   format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
16820   ins_encode %{
16821     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
16822             as_FloatRegister($src$$reg),
16823             as_FloatRegister($shift$$reg));
16824   %}
16825   ins_pipe(vshift128);
16826 %}
16827 
16828 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
16829   predicate(n->as_Vector()->length() == 2);
16830   match(Set dst (LShiftVI src shift));
16831   ins_cost(INSN_COST);
16832   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
16833   ins_encode %{
16834     __ shl(as_FloatRegister($dst$$reg), __ T2S,
16835            as_FloatRegister($src$$reg),
16836            (int)$shift$$constant & 31);
16837   %}
16838   ins_pipe(vshift64_imm);
16839 %}
16840 
16841 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
16842   predicate(n->as_Vector()->length() == 4);
16843   match(Set dst (LShiftVI src shift));
16844   ins_cost(INSN_COST);
16845   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
16846   ins_encode %{
16847     __ shl(as_FloatRegister($dst$$reg), __ T4S,
16848            as_FloatRegister($src$$reg),
16849            (int)$shift$$constant & 31);
16850   %}
16851   ins_pipe(vshift128_imm);
16852 %}
16853 
16854 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
16855   predicate(n->as_Vector()->length() == 2);
16856   match(Set dst (RShiftVI src shift));
16857   ins_cost(INSN_COST);
16858   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
16859   ins_encode %{
16860     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
16861             as_FloatRegister($src$$reg),
16862             -(int)$shift$$constant & 31);
16863   %}
16864   ins_pipe(vshift64_imm);
16865 %}
16866 
16867 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
16868   predicate(n->as_Vector()->length() == 4);
16869   match(Set dst (RShiftVI src shift));
16870   ins_cost(INSN_COST);
16871   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
16872   ins_encode %{
16873     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
16874             as_FloatRegister($src$$reg),
16875             -(int)$shift$$constant & 31);
16876   %}
16877   ins_pipe(vshift128_imm);
16878 %}
16879 
16880 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
16881   predicate(n->as_Vector()->length() == 2);
16882   match(Set dst (URShiftVI src shift));
16883   ins_cost(INSN_COST);
16884   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
16885   ins_encode %{
16886     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
16887             as_FloatRegister($src$$reg),
16888             -(int)$shift$$constant & 31);
16889   %}
16890   ins_pipe(vshift64_imm);
16891 %}
16892 
16893 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
16894   predicate(n->as_Vector()->length() == 4);
16895   match(Set dst (URShiftVI src shift));
16896   ins_cost(INSN_COST);
16897   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
16898   ins_encode %{
16899     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
16900             as_FloatRegister($src$$reg),
16901             -(int)$shift$$constant & 31);
16902   %}
16903   ins_pipe(vshift128_imm);
16904 %}
16905 
16906 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
16907   predicate(n->as_Vector()->length() == 2);
16908   match(Set dst (LShiftVL src shift));
16909   match(Set dst (RShiftVL src shift));
16910   ins_cost(INSN_COST);
16911   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
16912   ins_encode %{
16913     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
16914             as_FloatRegister($src$$reg),
16915             as_FloatRegister($shift$$reg));
16916   %}
16917   ins_pipe(vshift128);
16918 %}
16919 
16920 instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
16921   predicate(n->as_Vector()->length() == 2);
16922   match(Set dst (URShiftVL src shift));
16923   ins_cost(INSN_COST);
16924   format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
16925   ins_encode %{
16926     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
16927             as_FloatRegister($src$$reg),
16928             as_FloatRegister($shift$$reg));
16929   %}
16930   ins_pipe(vshift128);
16931 %}
16932 
16933 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
16934   predicate(n->as_Vector()->length() == 2);
16935   match(Set dst (LShiftVL src shift));
16936   ins_cost(INSN_COST);
16937   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
16938   ins_encode %{
16939     __ shl(as_FloatRegister($dst$$reg), __ T2D,
16940            as_FloatRegister($src$$reg),
16941            (int)$shift$$constant & 63);
16942   %}
16943   ins_pipe(vshift128_imm);
16944 %}
16945 
16946 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
16947   predicate(n->as_Vector()->length() == 2);
16948   match(Set dst (RShiftVL src shift));
16949   ins_cost(INSN_COST);
16950   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
16951   ins_encode %{
16952     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
16953             as_FloatRegister($src$$reg),
16954             -(int)$shift$$constant & 63);
16955   %}
16956   ins_pipe(vshift128_imm);
16957 %}
16958 
16959 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
16960   predicate(n->as_Vector()->length() == 2);
16961   match(Set dst (URShiftVL src shift));
16962   ins_cost(INSN_COST);
16963   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
16964   ins_encode %{
16965     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
16966             as_FloatRegister($src$$reg),
16967             -(int)$shift$$constant & 63);
16968   %}
16969   ins_pipe(vshift128_imm);
16970 %}
16971 
16972 //----------PEEPHOLE RULES-----------------------------------------------------
16973 // These must follow all instruction definitions as they use the names
16974 // defined in the instructions definitions.
16975 //
16976 // peepmatch ( root_instr_name [preceding_instruction]* );
16977 //
16978 // peepconstraint %{
16979 // (instruction_number.operand_name relational_op instruction_number.operand_name
16980 //  [, ...] );
16981 // // instruction numbers are zero-based using left to right order in peepmatch
16982 //
16983 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
16984 // // provide an instruction_number.operand_name for each operand that appears
16985 // // in the replacement instruction's match rule
16986 //
16987 // ---------VM FLAGS---------------------------------------------------------
16988 //
16989 // All peephole optimizations can be turned off using -XX:-OptoPeephole
16990 //
16991 // Each peephole rule is given an identifying number starting with zero and
16992 // increasing by one in the order seen by the parser.  An individual peephole
16993 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
16994 // on the command-line.
16995 //
16996 // ---------CURRENT LIMITATIONS----------------------------------------------
16997 //
16998 // Only match adjacent instructions in same basic block
16999 // Only equality constraints
17000 // Only constraints between operands, not (0.dest_reg == RAX_enc)
17001 // Only one replacement instruction
17002 //
17003 // ---------EXAMPLE----------------------------------------------------------
17004 //
17005 // // pertinent parts of existing instructions in architecture description
17006 // instruct movI(iRegINoSp dst, iRegI src)
17007 // %{
17008 //   match(Set dst (CopyI src));
17009 // %}
17010 //
17011 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
17012 // %{
17013 //   match(Set dst (AddI dst src));
17014 //   effect(KILL cr);
17015 // %}
17016 //
17017 // // Change (inc mov) to lea
17018 // peephole %{
17019 //   // increment preceeded by register-register move
17020 //   peepmatch ( incI_iReg movI );
17021 //   // require that the destination register of the increment
17022 //   // match the destination register of the move
17023 //   peepconstraint ( 0.dst == 1.dst );
17024 //   // construct a replacement instruction that sets
17025 //   // the destination to ( move's source register + one )
17026 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
17027 // %}
17028 //
17029 
17030 // Implementation no longer uses movX instructions since
17031 // machine-independent system no longer uses CopyX nodes.
17032 //
17033 // peephole
17034 // %{
17035 //   peepmatch (incI_iReg movI);
17036 //   peepconstraint (0.dst == 1.dst);
17037 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17038 // %}
17039 
17040 // peephole
17041 // %{
17042 //   peepmatch (decI_iReg movI);
17043 //   peepconstraint (0.dst == 1.dst);
17044 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17045 // %}
17046 
17047 // peephole
17048 // %{
17049 //   peepmatch (addI_iReg_imm movI);
17050 //   peepconstraint (0.dst == 1.dst);
17051 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17052 // %}
17053 
17054 // peephole
17055 // %{
17056 //   peepmatch (incL_iReg movL);
17057 //   peepconstraint (0.dst == 1.dst);
17058 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17059 // %}
17060 
17061 // peephole
17062 // %{
17063 //   peepmatch (decL_iReg movL);
17064 //   peepconstraint (0.dst == 1.dst);
17065 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17066 // %}
17067 
17068 // peephole
17069 // %{
17070 //   peepmatch (addL_iReg_imm movL);
17071 //   peepconstraint (0.dst == 1.dst);
17072 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17073 // %}
17074 
17075 // peephole
17076 // %{
17077 //   peepmatch (addP_iReg_imm movP);
17078 //   peepconstraint (0.dst == 1.dst);
17079 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
17080 // %}
17081 
17082 // // Change load of spilled value to only a spill
17083 // instruct storeI(memory mem, iRegI src)
17084 // %{
17085 //   match(Set mem (StoreI mem src));
17086 // %}
17087 //
17088 // instruct loadI(iRegINoSp dst, memory mem)
17089 // %{
17090 //   match(Set dst (LoadI mem));
17091 // %}
17092 //
17093 
17094 //----------SMARTSPILL RULES---------------------------------------------------
17095 // These must follow all instruction definitions as they use the names
17096 // defined in the instructions definitions.
17097 
17098 // Local Variables:
17099 // mode: c++
17100 // End: