1 //
   2 // Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580     R29,                        // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649     R29, R29_H,                 // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "gc/shared/cardTableModRefBS.hpp"
 999 
1000 class CallStubImpl {
1001 
1002   //--------------------------------------------------------------
1003   //---<  Used for optimization in Compile::shorten_branches  >---
1004   //--------------------------------------------------------------
1005 
1006  public:
1007   // Size of call trampoline stub.
1008   static uint size_call_trampoline() {
1009     return 0; // no call trampolines on this platform
1010   }
1011 
1012   // number of relocations needed by a call trampoline stub
1013   static uint reloc_call_trampoline() {
1014     return 0; // no call trampolines on this platform
1015   }
1016 };
1017 
1018 class HandlerImpl {
1019 
1020  public:
1021 
1022   static int emit_exception_handler(CodeBuffer &cbuf);
1023   static int emit_deopt_handler(CodeBuffer& cbuf);
1024 
1025   static uint size_exception_handler() {
1026     return MacroAssembler::far_branch_size();
1027   }
1028 
1029   static uint size_deopt_handler() {
1030     // count one adr and one far branch instruction
1031     return 4 * NativeInstruction::instruction_size;
1032   }
1033 };
1034 
1035   // graph traversal helpers
1036 
1037   MemBarNode *parent_membar(const Node *n);
1038   MemBarNode *child_membar(const MemBarNode *n);
1039   bool leading_membar(const MemBarNode *barrier);
1040 
1041   bool is_card_mark_membar(const MemBarNode *barrier);
1042 
1043   MemBarNode *leading_to_normal(MemBarNode *leading);
1044   MemBarNode *normal_to_leading(const MemBarNode *barrier);
1045   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier);
1046   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing);
1047   MemBarNode *trailing_to_leading(const MemBarNode *trailing);
1048 
1049   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1050 
1051   bool unnecessary_acquire(const Node *barrier);
1052   bool needs_acquiring_load(const Node *load);
1053 
1054   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1055 
1056   bool unnecessary_release(const Node *barrier);
1057   bool unnecessary_volatile(const Node *barrier);
1058   bool needs_releasing_store(const Node *store);
1059 
1060   // predicate controlling translation of StoreCM
1061   bool unnecessary_storestore(const Node *storecm);
1062 %}
1063 
1064 source %{
1065 
1066   // Optimizaton of volatile gets and puts
1067   // -------------------------------------
1068   //
1069   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1070   // use to implement volatile reads and writes. For a volatile read
1071   // we simply need
1072   //
1073   //   ldar<x>
1074   //
1075   // and for a volatile write we need
1076   //
1077   //   stlr<x>
1078   // 
1079   // Alternatively, we can implement them by pairing a normal
1080   // load/store with a memory barrier. For a volatile read we need
1081   // 
1082   //   ldr<x>
1083   //   dmb ishld
1084   //
1085   // for a volatile write
1086   //
1087   //   dmb ish
1088   //   str<x>
1089   //   dmb ish
1090   //
1091   // In order to generate the desired instruction sequence we need to
1092   // be able to identify specific 'signature' ideal graph node
1093   // sequences which i) occur as a translation of a volatile reads or
1094   // writes and ii) do not occur through any other translation or
1095   // graph transformation. We can then provide alternative aldc
1096   // matching rules which translate these node sequences to the
1097   // desired machine code sequences. Selection of the alternative
1098   // rules can be implemented by predicates which identify the
1099   // relevant node sequences.
1100   //
1101   // The ideal graph generator translates a volatile read to the node
1102   // sequence
1103   //
1104   //   LoadX[mo_acquire]
1105   //   MemBarAcquire
1106   //
1107   // As a special case when using the compressed oops optimization we
1108   // may also see this variant
1109   //
1110   //   LoadN[mo_acquire]
1111   //   DecodeN
1112   //   MemBarAcquire
1113   //
1114   // A volatile write is translated to the node sequence
1115   //
1116   //   MemBarRelease
1117   //   StoreX[mo_release] {CardMark}-optional
1118   //   MemBarVolatile
1119   //
1120   // n.b. the above node patterns are generated with a strict
1121   // 'signature' configuration of input and output dependencies (see
1122   // the predicates below for exact details). The card mark may be as
1123   // simple as a few extra nodes or, in a few GC configurations, may
1124   // include more complex control flow between the leading and
1125   // trailing memory barriers. However, whatever the card mark
1126   // configuration these signatures are unique to translated volatile
1127   // reads/stores -- they will not appear as a result of any other
1128   // bytecode translation or inlining nor as a consequence of
1129   // optimizing transforms.
1130   //
1131   // We also want to catch inlined unsafe volatile gets and puts and
1132   // be able to implement them using either ldar<x>/stlr<x> or some
1133   // combination of ldr<x>/stlr<x> and dmb instructions.
1134   //
1135   // Inlined unsafe volatiles puts manifest as a minor variant of the
1136   // normal volatile put node sequence containing an extra cpuorder
1137   // membar
1138   //
1139   //   MemBarRelease
1140   //   MemBarCPUOrder
1141   //   StoreX[mo_release] {CardMark}-optional
1142   //   MemBarVolatile
1143   //
1144   // n.b. as an aside, the cpuorder membar is not itself subject to
1145   // matching and translation by adlc rules.  However, the rule
1146   // predicates need to detect its presence in order to correctly
1147   // select the desired adlc rules.
1148   //
1149   // Inlined unsafe volatile gets manifest as a somewhat different
1150   // node sequence to a normal volatile get
1151   //
1152   //   MemBarCPUOrder
1153   //        ||       \\
1154   //   MemBarAcquire LoadX[mo_acquire]
1155   //        ||
1156   //   MemBarCPUOrder
1157   //
1158   // In this case the acquire membar does not directly depend on the
1159   // load. However, we can be sure that the load is generated from an
1160   // inlined unsafe volatile get if we see it dependent on this unique
1161   // sequence of membar nodes. Similarly, given an acquire membar we
1162   // can know that it was added because of an inlined unsafe volatile
1163   // get if it is fed and feeds a cpuorder membar and if its feed
1164   // membar also feeds an acquiring load.
1165   //
1166   // So, where we can identify these volatile read and write
1167   // signatures we can choose to plant either of the above two code
1168   // sequences. For a volatile read we can simply plant a normal
1169   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1170   // also choose to inhibit translation of the MemBarAcquire and
1171   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1172   //
1173   // When we recognise a volatile store signature we can choose to
1174   // plant at a dmb ish as a translation for the MemBarRelease, a
1175   // normal str<x> and then a dmb ish for the MemBarVolatile.
1176   // Alternatively, we can inhibit translation of the MemBarRelease
1177   // and MemBarVolatile and instead plant a simple stlr<x>
1178   // instruction.
1179   //
1180   // Of course, the above only applies when we see these signature
1181   // configurations. We still want to plant dmb instructions in any
1182   // other cases where we may see a MemBarAcquire, MemBarRelease or
1183   // MemBarVolatile. For example, at the end of a constructor which
1184   // writes final/volatile fields we will see a MemBarRelease
1185   // instruction and this needs a 'dmb ish' lest we risk the
1186   // constructed object being visible without making the
1187   // final/volatile field writes visible.
1188   //
1189   // n.b. the translation rules below which rely on detection of the
1190   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1191   // If we see anything other than the signature configurations we
1192   // always just translate the loads and stores to ldr<x> and str<x>
1193   // and translate acquire, release and volatile membars to the
1194   // relevant dmb instructions.
1195   //
1196 
1197   // graph traversal helpers used for volatile put/get optimization
1198 
1199   // 1) general purpose helpers
1200 
1201   // if node n is linked to a parent MemBarNode by an intervening
1202   // Control and Memory ProjNode return the MemBarNode otherwise return
1203   // NULL.
1204   //
1205   // n may only be a Load or a MemBar.
1206 
1207   MemBarNode *parent_membar(const Node *n)
1208   {
1209     Node *ctl = NULL;
1210     Node *mem = NULL;
1211     Node *membar = NULL;
1212 
1213     if (n->is_Load()) {
1214       ctl = n->lookup(LoadNode::Control);
1215       mem = n->lookup(LoadNode::Memory);
1216     } else if (n->is_MemBar()) {
1217       ctl = n->lookup(TypeFunc::Control);
1218       mem = n->lookup(TypeFunc::Memory);
1219     } else {
1220         return NULL;
1221     }
1222 
1223     if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj())
1224       return NULL;
1225 
1226     membar = ctl->lookup(0);
1227 
1228     if (!membar || !membar->is_MemBar())
1229       return NULL;
1230 
1231     if (mem->lookup(0) != membar)
1232       return NULL;
1233 
1234     return membar->as_MemBar();
1235   }
1236 
1237   // if n is linked to a child MemBarNode by intervening Control and
1238   // Memory ProjNodes return the MemBarNode otherwise return NULL.
1239 
1240   MemBarNode *child_membar(const MemBarNode *n)
1241   {
1242     ProjNode *ctl = n->proj_out(TypeFunc::Control);
1243     ProjNode *mem = n->proj_out(TypeFunc::Memory);
1244 
1245     // MemBar needs to have both a Ctl and Mem projection
1246     if (! ctl || ! mem)
1247       return NULL;
1248 
1249     MemBarNode *child = NULL;
1250     Node *x;
1251 
1252     for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1253       x = ctl->fast_out(i);
1254       // if we see a membar we keep hold of it. we may also see a new
1255       // arena copy of the original but it will appear later
1256       if (x->is_MemBar()) {
1257           child = x->as_MemBar();
1258           break;
1259       }
1260     }
1261 
1262     if (child == NULL)
1263       return NULL;
1264 
1265     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1266       x = mem->fast_out(i);
1267       // if we see a membar we keep hold of it. we may also see a new
1268       // arena copy of the original but it will appear later
1269       if (x == child) {
1270         return child;
1271       }
1272     }
1273     return NULL;
1274   }
1275 
1276   // helper predicate use to filter candidates for a leading memory
1277   // barrier
1278   //
1279   // returns true if barrier is a MemBarRelease or a MemBarCPUOrder
1280   // whose Ctl and Mem feeds come from a MemBarRelease otherwise false
1281 
1282   bool leading_membar(const MemBarNode *barrier)
1283   {
1284     int opcode = barrier->Opcode();
1285     // if this is a release membar we are ok
1286     if (opcode == Op_MemBarRelease)
1287       return true;
1288     // if its a cpuorder membar . . .
1289     if (opcode != Op_MemBarCPUOrder)
1290       return false;
1291     // then the parent has to be a release membar
1292     MemBarNode *parent = parent_membar(barrier);
1293     if (!parent)
1294       return false;
1295     opcode = parent->Opcode();
1296     return opcode == Op_MemBarRelease;
1297   }
1298  
1299   // 2) card mark detection helper
1300 
1301   // helper predicate which can be used to detect a volatile membar
1302   // introduced as part of a conditional card mark sequence either by
1303   // G1 or by CMS when UseCondCardMark is true.
1304   //
1305   // membar can be definitively determined to be part of a card mark
1306   // sequence if and only if all the following hold
1307   //
1308   // i) it is a MemBarVolatile
1309   //
1310   // ii) either UseG1GC or (UseConcMarkSweepGC && UseCondCardMark) is
1311   // true
1312   //
1313   // iii) the node's Mem projection feeds a StoreCM node.
1314   
1315   bool is_card_mark_membar(const MemBarNode *barrier)
1316   {
1317     if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark))
1318       return false;
1319 
1320     if (barrier->Opcode() != Op_MemBarVolatile)
1321       return false;
1322 
1323     ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
1324 
1325     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax ; i++) {
1326       Node *y = mem->fast_out(i);
1327       if (y->Opcode() == Op_StoreCM) {
1328         return true;
1329       }
1330     }
1331   
1332     return false;
1333   }
1334 
1335 
1336   // 3) helper predicates to traverse volatile put graphs which may
1337   // contain GC barrier subgraphs
1338 
1339   // Preamble
1340   // --------
1341   //
1342   // for volatile writes we can omit generating barriers and employ a
1343   // releasing store when we see a node sequence sequence with a
1344   // leading MemBarRelease and a trailing MemBarVolatile as follows
1345   //
1346   //   MemBarRelease
1347   //  {      ||      } -- optional
1348   //  {MemBarCPUOrder}
1349   //         ||     \\
1350   //         ||     StoreX[mo_release]
1351   //         | \     /
1352   //         | MergeMem
1353   //         | /
1354   //   MemBarVolatile
1355   //
1356   // where
1357   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1358   //  | \ and / indicate further routing of the Ctl and Mem feeds
1359   // 
1360   // this is the graph we see for non-object stores. however, for a
1361   // volatile Object store (StoreN/P) we may see other nodes below the
1362   // leading membar because of the need for a GC pre- or post-write
1363   // barrier.
1364   //
1365   // with most GC configurations we with see this simple variant which
1366   // includes a post-write barrier card mark.
1367   //
1368   //   MemBarRelease______________________________
1369   //         ||    \\               Ctl \        \\
1370   //         ||    StoreN/P[mo_release] CastP2X  StoreB/CM
1371   //         | \     /                       . . .  /
1372   //         | MergeMem
1373   //         | /
1374   //         ||      /
1375   //   MemBarVolatile
1376   //
1377   // i.e. the leading membar feeds Ctl to a CastP2X (which converts
1378   // the object address to an int used to compute the card offset) and
1379   // Ctl+Mem to a StoreB node (which does the actual card mark).
1380   //
1381   // n.b. a StoreCM node will only appear in this configuration when
1382   // using CMS. StoreCM differs from a normal card mark write (StoreB)
1383   // because it implies a requirement to order visibility of the card
1384   // mark (StoreCM) relative to the object put (StoreP/N) using a
1385   // StoreStore memory barrier (arguably this ought to be represented
1386   // explicitly in the ideal graph but that is not how it works). This
1387   // ordering is required for both non-volatile and volatile
1388   // puts. Normally that means we need to translate a StoreCM using
1389   // the sequence
1390   //
1391   //   dmb ishst
1392   //   stlrb
1393   //
1394   // However, in the case of a volatile put if we can recognise this
1395   // configuration and plant an stlr for the object write then we can
1396   // omit the dmb and just plant an strb since visibility of the stlr
1397   // is ordered before visibility of subsequent stores. StoreCM nodes
1398   // also arise when using G1 or using CMS with conditional card
1399   // marking. In these cases (as we shall see) we don't need to insert
1400   // the dmb when translating StoreCM because there is already an
1401   // intervening StoreLoad barrier between it and the StoreP/N.
1402   //
1403   // It is also possible to perform the card mark conditionally on it
1404   // currently being unmarked in which case the volatile put graph
1405   // will look slightly different
1406   //
1407   //   MemBarRelease
1408   //   MemBarCPUOrder___________________________________________
1409   //         ||    \\               Ctl \     Ctl \     \\  Mem \
1410   //         ||    StoreN/P[mo_release] CastP2X   If   LoadB     |
1411   //         | \     /                              \            |
1412   //         | MergeMem                            . . .      StoreB
1413   //         | /                                                /
1414   //         ||     /
1415   //   MemBarVolatile
1416   //
1417   // It is worth noting at this stage that both the above
1418   // configurations can be uniquely identified by checking that the
1419   // memory flow includes the following subgraph:
1420   //
1421   //   MemBarRelease
1422   //   MemBarCPUOrder
1423   //          |  \      . . .
1424   //          |  StoreX[mo_release]  . . .
1425   //          |   /
1426   //         MergeMem
1427   //          |
1428   //   MemBarVolatile
1429   //
1430   // This is referred to as a *normal* subgraph. It can easily be
1431   // detected starting from any candidate MemBarRelease,
1432   // StoreX[mo_release] or MemBarVolatile.
1433   //
1434   // the code below uses two helper predicates, leading_to_normal and
1435   // normal_to_leading to identify this configuration, one validating
1436   // the layout starting from the top membar and searching down and
1437   // the other validating the layout starting from the lower membar
1438   // and searching up.
1439   //
1440   // There are two special case GC configurations when a normal graph
1441   // may not be generated: when using G1 (which always employs a
1442   // conditional card mark); and when using CMS with conditional card
1443   // marking configured. These GCs are both concurrent rather than
1444   // stop-the world GCs. So they introduce extra Ctl+Mem flow into the
1445   // graph between the leading and trailing membar nodes, in
1446   // particular enforcing stronger memory serialisation beween the
1447   // object put and the corresponding conditional card mark. CMS
1448   // employs a post-write GC barrier while G1 employs both a pre- and
1449   // post-write GC barrier. Of course the extra nodes may be absent --
1450   // they are only inserted for object puts. This significantly
1451   // complicates the task of identifying whether a MemBarRelease,
1452   // StoreX[mo_release] or MemBarVolatile forms part of a volatile put
1453   // when using these GC configurations (see below).
1454   //
1455   // In both cases the post-write subtree includes an auxiliary
1456   // MemBarVolatile (StoreLoad barrier) separating the object put and
1457   // the read of the corresponding card. This poses two additional
1458   // problems.
1459   //
1460   // Firstly, a card mark MemBarVolatile needs to be distinguished
1461   // from a normal trailing MemBarVolatile. Resolving this first
1462   // problem is straightforward: a card mark MemBarVolatile always
1463   // projects a Mem feed to a StoreCM node and that is a unique marker
1464   //
1465   //      MemBarVolatile (card mark)
1466   //       C |    \     . . .
1467   //         |   StoreCM   . . .
1468   //       . . .
1469   //
1470   // The second problem is how the code generator is to translate the
1471   // card mark barrier? It always needs to be translated to a "dmb
1472   // ish" instruction whether or not it occurs as part of a volatile
1473   // put. A StoreLoad barrier is needed after the object put to ensure
1474   // i) visibility to GC threads of the object put and ii) visibility
1475   // to the mutator thread of any card clearing write by a GC
1476   // thread. Clearly a normal store (str) will not guarantee this
1477   // ordering but neither will a releasing store (stlr). The latter
1478   // guarantees that the object put is visible but does not guarantee
1479   // that writes by other threads have also been observed.
1480   // 
1481   // So, returning to the task of translating the object put and the
1482   // leading/trailing membar nodes: what do the non-normal node graph
1483   // look like for these 2 special cases? and how can we determine the
1484   // status of a MemBarRelease, StoreX[mo_release] or MemBarVolatile
1485   // in both normal and non-normal cases?
1486   //
1487   // A CMS GC post-barrier wraps its card write (StoreCM) inside an If
1488   // which selects conditonal execution based on the value loaded
1489   // (LoadB) from the card. Ctl and Mem are fed to the If via an
1490   // intervening StoreLoad barrier (MemBarVolatile).
1491   //
1492   // So, with CMS we may see a node graph which looks like this
1493   //
1494   //   MemBarRelease
1495   //   MemBarCPUOrder_(leading)__________________
1496   //     C |    M \       \\                   C \
1497   //       |       \    StoreN/P[mo_release]  CastP2X
1498   //       |    Bot \    /
1499   //       |       MergeMem
1500   //       |         /
1501   //      MemBarVolatile (card mark)
1502   //     C |  ||    M |
1503   //       | LoadB    |
1504   //       |   |      |
1505   //       | Cmp      |\
1506   //       | /        | \
1507   //       If         |  \
1508   //       | \        |   \
1509   // IfFalse  IfTrue  |    \
1510   //       \     / \  |     \
1511   //        \   / StoreCM    |
1512   //         \ /      |      |
1513   //        Region   . . .   |
1514   //          | \           /
1515   //          |  . . .  \  / Bot
1516   //          |       MergeMem
1517   //          |          |
1518   //        MemBarVolatile (trailing)
1519   //
1520   // The first MergeMem merges the AliasIdxBot Mem slice from the
1521   // leading membar and the oopptr Mem slice from the Store into the
1522   // card mark membar. The trailing MergeMem merges the AliasIdxBot
1523   // Mem slice from the card mark membar and the AliasIdxRaw slice
1524   // from the StoreCM into the trailing membar (n.b. the latter
1525   // proceeds via a Phi associated with the If region).
1526   //
1527   // G1 is quite a lot more complicated. The nodes inserted on behalf
1528   // of G1 may comprise: a pre-write graph which adds the old value to
1529   // the SATB queue; the releasing store itself; and, finally, a
1530   // post-write graph which performs a card mark.
1531   //
1532   // The pre-write graph may be omitted, but only when the put is
1533   // writing to a newly allocated (young gen) object and then only if
1534   // there is a direct memory chain to the Initialize node for the
1535   // object allocation. This will not happen for a volatile put since
1536   // any memory chain passes through the leading membar.
1537   //
1538   // The pre-write graph includes a series of 3 If tests. The outermost
1539   // If tests whether SATB is enabled (no else case). The next If tests
1540   // whether the old value is non-NULL (no else case). The third tests
1541   // whether the SATB queue index is > 0, if so updating the queue. The
1542   // else case for this third If calls out to the runtime to allocate a
1543   // new queue buffer.
1544   //
1545   // So with G1 the pre-write and releasing store subgraph looks like
1546   // this (the nested Ifs are omitted).
1547   //
1548   //  MemBarRelease (leading)____________
1549   //     C |  ||  M \   M \    M \  M \ . . .
1550   //       | LoadB   \  LoadL  LoadN   \
1551   //       | /        \                 \
1552   //       If         |\                 \
1553   //       | \        | \                 \
1554   //  IfFalse  IfTrue |  \                 \
1555   //       |     |    |   \                 |
1556   //       |     If   |   /\                |
1557   //       |     |          \               |
1558   //       |                 \              |
1559   //       |    . . .         \             |
1560   //       | /       | /       |            |
1561   //      Region  Phi[M]       |            |
1562   //       | \       |         |            |
1563   //       |  \_____ | ___     |            |
1564   //     C | C \     |   C \ M |            |
1565   //       | CastP2X | StoreN/P[mo_release] |
1566   //       |         |         |            |
1567   //     C |       M |       M |          M |
1568   //        \        |         |           /
1569   //                  . . . 
1570   //          (post write subtree elided)
1571   //                    . . .
1572   //             C \         M /
1573   //         MemBarVolatile (trailing)
1574   //
1575   // n.b. the LoadB in this subgraph is not the card read -- it's a
1576   // read of the SATB queue active flag.
1577   //
1578   // The G1 post-write subtree is also optional, this time when the
1579   // new value being written is either null or can be identified as a
1580   // newly allocated (young gen) object with no intervening control
1581   // flow. The latter cannot happen but the former may, in which case
1582   // the card mark membar is omitted and the memory feeds from the
1583   // leading membar and the StoreN/P are merged direct into the
1584   // trailing membar as per the normal subgraph. So, the only special
1585   // case which arises is when the post-write subgraph is generated.
1586   //
1587   // The kernel of the post-write G1 subgraph is the card mark itself
1588   // which includes a card mark memory barrier (MemBarVolatile), a
1589   // card test (LoadB), and a conditional update (If feeding a
1590   // StoreCM). These nodes are surrounded by a series of nested Ifs
1591   // which try to avoid doing the card mark. The top level If skips if
1592   // the object reference does not cross regions (i.e. it tests if
1593   // (adr ^ val) >> log2(regsize) != 0) -- intra-region references
1594   // need not be recorded. The next If, which skips on a NULL value,
1595   // may be absent (it is not generated if the type of value is >=
1596   // OopPtr::NotNull). The 3rd If skips writes to young regions (by
1597   // checking if card_val != young).  n.b. although this test requires
1598   // a pre-read of the card it can safely be done before the StoreLoad
1599   // barrier. However that does not bypass the need to reread the card
1600   // after the barrier.
1601   //
1602   //                (pre-write subtree elided)
1603   //        . . .                  . . .    . . .  . . .
1604   //        C |                    M |     M |    M |
1605   //       Region                  Phi[M] StoreN    |
1606   //          |                     / \      |      |
1607   //         / \_______            /   \     |      |
1608   //      C / C \      . . .            \    |      |
1609   //       If   CastP2X . . .            |   |      |
1610   //       / \                           |   |      |
1611   //      /   \                          |   |      |
1612   // IfFalse IfTrue                      |   |      |
1613   //   |       |                         |   |     /|
1614   //   |       If                        |   |    / |
1615   //   |      / \                        |   |   /  |
1616   //   |     /   \                        \  |  /   |
1617   //   | IfFalse IfTrue                   MergeMem  |
1618   //   |  . . .    / \                       /      |
1619   //   |          /   \                     /       |
1620   //   |     IfFalse IfTrue                /        |
1621   //   |      . . .    |                  /         |
1622   //   |               If                /          |
1623   //   |               / \              /           |
1624   //   |              /   \            /            |
1625   //   |         IfFalse IfTrue       /             |
1626   //   |           . . .   |         /              |
1627   //   |                    \       /               |
1628   //   |                     \     /                |
1629   //   |             MemBarVolatile__(card mark)    |
1630   //   |                ||   C |  M \  M \          |
1631   //   |               LoadB   If    |    |         |
1632   //   |                      / \    |    |         |
1633   //   |                     . . .   |    |         |
1634   //   |                          \  |    |        /
1635   //   |                        StoreCM   |       /
1636   //   |                          . . .   |      /
1637   //   |                        _________/      /
1638   //   |                       /  _____________/
1639   //   |   . . .       . . .  |  /            /
1640   //   |    |                 | /   _________/
1641   //   |    |               Phi[M] /        /
1642   //   |    |                 |   /        /
1643   //   |    |                 |  /        /
1644   //   |  Region  . . .     Phi[M]  _____/
1645   //   |    /                 |    /
1646   //   |                      |   /   
1647   //   | . . .   . . .        |  /
1648   //   | /                    | /
1649   // Region           |  |  Phi[M]
1650   //   |              |  |  / Bot
1651   //    \            MergeMem 
1652   //     \            /
1653   //     MemBarVolatile
1654   //
1655   // As with CMS the initial MergeMem merges the AliasIdxBot Mem slice
1656   // from the leading membar and the oopptr Mem slice from the Store
1657   // into the card mark membar i.e. the memory flow to the card mark
1658   // membar still looks like a normal graph.
1659   //
1660   // The trailing MergeMem merges an AliasIdxBot Mem slice with other
1661   // Mem slices (from the StoreCM and other card mark queue stores).
1662   // However in this case the AliasIdxBot Mem slice does not come
1663   // direct from the card mark membar. It is merged through a series
1664   // of Phi nodes. These are needed to merge the AliasIdxBot Mem flow
1665   // from the leading membar with the Mem feed from the card mark
1666   // membar. Each Phi corresponds to one of the Ifs which may skip
1667   // around the card mark membar. So when the If implementing the NULL
1668   // value check has been elided the total number of Phis is 2
1669   // otherwise it is 3.
1670   //
1671   // So, the upshot is that in all cases the volatile put graph will
1672   // include a *normal* memory subgraph betwen the leading membar and
1673   // its child membar. When that child is not a card mark membar then
1674   // it marks the end of a volatile put subgraph. If the child is a
1675   // card mark membar then the normal subgraph will form part of a
1676   // volatile put subgraph if and only if the child feeds an
1677   // AliasIdxBot Mem feed to a trailing barrier via a MergeMem. That
1678   // feed is either direct (for CMS) or via 2 or 3 Phi nodes merging
1679   // the leading barrier memory flow (for G1).
1680   // 
1681   // The predicates controlling generation of instructions for store
1682   // and barrier nodes employ a few simple helper functions (described
1683   // below) which identify the presence or absence of these subgraph
1684   // configurations and provide a means of traversing from one node in
1685   // the subgraph to another.
1686 
1687   // leading_to_normal
1688   //
1689   //graph traversal helper which detects the normal case Mem feed
1690   // from a release membar (or, optionally, its cpuorder child) to a
1691   // dependent volatile membar i.e. it ensures that the following Mem
1692   // flow subgraph is present.
1693   //
1694   //   MemBarRelease
1695   //   MemBarCPUOrder
1696   //          |  \      . . .
1697   //          |  StoreN/P[mo_release]  . . .
1698   //          |   /
1699   //         MergeMem
1700   //          |
1701   //   MemBarVolatile
1702   //
1703   // if the correct configuration is present returns the volatile
1704   // membar otherwise NULL.
1705   //
1706   // the input membar is expected to be either a cpuorder membar or a
1707   // release membar. in the latter case it should not have a cpu membar
1708   // child.
1709   //
1710   // the returned membar may be a card mark membar rather than a
1711   // trailing membar.
1712 
1713   MemBarNode *leading_to_normal(MemBarNode *leading)
1714   {
1715     assert((leading->Opcode() == Op_MemBarRelease ||
1716             leading->Opcode() == Op_MemBarCPUOrder),
1717            "expecting a volatile or cpuroder membar!");
1718 
1719     // check the mem flow
1720     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
1721 
1722     if (!mem)
1723       return NULL;
1724 
1725     Node *x = NULL;
1726     StoreNode * st = NULL;
1727     MergeMemNode *mm = NULL;
1728 
1729     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1730       x = mem->fast_out(i);
1731       if (x->is_MergeMem()) {
1732         if (mm != NULL)
1733           return NULL;
1734         // two merge mems is one too many
1735         mm = x->as_MergeMem();
1736       } else if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
1737         // two releasing stores is one too many
1738         if (st != NULL)
1739           return NULL;
1740         st = x->as_Store();
1741       }
1742     }
1743 
1744     if (!mm || !st)
1745       return NULL;
1746 
1747     bool found = false;
1748     // ensure the store feeds the merge
1749     for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
1750       if (st->fast_out(i) == mm) {
1751         found = true;
1752         break;
1753       }
1754     }
1755 
1756     if (!found)
1757       return NULL;
1758 
1759     MemBarNode *mbvol = NULL;
1760     // ensure the merge feeds a volatile membar
1761     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
1762       x = mm->fast_out(i);
1763       if (x->is_MemBar() && x->Opcode() == Op_MemBarVolatile) {
1764         mbvol = x->as_MemBar();
1765         break;
1766       }
1767     }
1768 
1769     return mbvol;
1770   }
1771 
1772   // normal_to_leading
1773   //
1774   // graph traversal helper which detects the normal case Mem feed
1775   // from either a card mark or a trailing membar to a preceding
1776   // release membar (optionally its cpuorder child) i.e. it ensures
1777   // that the following Mem flow subgraph is present.
1778   //
1779   //   MemBarRelease
1780   //   MemBarCPUOrder {leading}
1781   //          |  \      . . .
1782   //          |  StoreN/P[mo_release]  . . .
1783   //          |   /
1784   //         MergeMem
1785   //          |
1786   //   MemBarVolatile
1787   //
1788   // this predicate checks for the same flow as the previous predicate
1789   // but starting from the bottom rather than the top.
1790   //
1791   // if the configuration is present returns the cpuorder member for
1792   // preference or when absent the release membar otherwise NULL.
1793   //
1794   // n.b. the input membar is expected to be a MemBarVolatile but
1795   // need not be a card mark membar.
1796 
1797   MemBarNode *normal_to_leading(const MemBarNode *barrier)
1798   {
1799     // input must be a volatile membar
1800     assert(barrier->Opcode() == Op_MemBarVolatile, "expecting a volatile membar");
1801     Node *x;
1802 
1803     // the Mem feed to the membar should be a merge
1804     x = barrier->in(TypeFunc::Memory);
1805     if (!x->is_MergeMem())
1806       return NULL;
1807 
1808     MergeMemNode *mm = x->as_MergeMem();
1809 
1810     // the AliasIdxBot slice should be another MemBar projection
1811     x = mm->in(Compile::AliasIdxBot);
1812     // ensure this is a non control projection
1813     if (!x->is_Proj() || x->is_CFG())
1814       return NULL;
1815     // if it is fed by a membar that's the one we want
1816     x = x->in(0);
1817 
1818     if (!x->is_MemBar())
1819       return NULL;
1820 
1821     MemBarNode *leading = x->as_MemBar();
1822     // reject invalid candidates
1823     if (!leading_membar(leading))
1824       return NULL;
1825 
1826     // ok, we have a leading ReleaseMembar, now for the sanity clauses
1827 
1828     // the leading membar must feed Mem to a releasing store
1829     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
1830     StoreNode *st = NULL;
1831     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1832       x = mem->fast_out(i);
1833       if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
1834         st = x->as_Store();
1835         break;
1836       }
1837     }
1838     if (st == NULL)
1839       return NULL;
1840 
1841     // the releasing store has to feed the same merge
1842     for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
1843       if (st->fast_out(i) == mm)
1844         return leading;
1845     }
1846 
1847     return NULL;
1848   }
1849 
1850   // card_mark_to_trailing
1851   //
1852   // graph traversal helper which detects extra, non-normal Mem feed
1853   // from a card mark volatile membar to a trailing membar i.e. it
1854   // ensures that one of the following three GC post-write Mem flow
1855   // subgraphs is present.
1856   //
1857   // 1)
1858   //     . . .
1859   //       |
1860   //   MemBarVolatile (card mark)
1861   //      |          |     
1862   //      |        StoreCM
1863   //      |          |
1864   //      |        . . .
1865   //  Bot |  / 
1866   //   MergeMem 
1867   //      |
1868   //   MemBarVolatile (trailing)
1869   //
1870   //
1871   // 2)
1872   //   MemBarRelease/CPUOrder (leading)
1873   //    |
1874   //    | 
1875   //    |\       . . .
1876   //    | \        | 
1877   //    |  \  MemBarVolatile (card mark) 
1878   //    |   \   |     |
1879   //     \   \  |   StoreCM    . . .
1880   //      \   \ |
1881   //       \  Phi
1882   //        \ /
1883   //        Phi  . . .
1884   //     Bot |   /
1885   //       MergeMem
1886   //         |
1887   //   MemBarVolatile (trailing)
1888   //
1889   // 3)
1890   //   MemBarRelease/CPUOrder (leading)
1891   //    |
1892   //    |\
1893   //    | \
1894   //    |  \      . . .
1895   //    |   \       |
1896   //    |\   \  MemBarVolatile (card mark)
1897   //    | \   \   |     |
1898   //    |  \   \  |   StoreCM    . . .
1899   //    |   \   \ |
1900   //     \   \  Phi
1901   //      \   \ /  
1902   //       \  Phi
1903   //        \ /
1904   //        Phi  . . .
1905   //     Bot |   /
1906   //       MergeMem
1907   //         |
1908   //   MemBarVolatile (trailing)
1909   //
1910   // configuration 1 is only valid if UseConcMarkSweepGC &&
1911   // UseCondCardMark
1912   //
1913   // configurations 2 and 3 are only valid if UseG1GC.
1914   //
1915   // if a valid configuration is present returns the trailing membar
1916   // otherwise NULL.
1917   //
1918   // n.b. the supplied membar is expected to be a card mark
1919   // MemBarVolatile i.e. the caller must ensure the input node has the
1920   // correct operand and feeds Mem to a StoreCM node
1921 
1922   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier)
1923   {
1924     // input must be a card mark volatile membar
1925     assert(is_card_mark_membar(barrier), "expecting a card mark membar");
1926 
1927     Node *feed = barrier->proj_out(TypeFunc::Memory);
1928     Node *x;
1929     MergeMemNode *mm = NULL;
1930 
1931     const int MAX_PHIS = 3;     // max phis we will search through
1932     int phicount = 0;           // current search count
1933 
1934     bool retry_feed = true;
1935     while (retry_feed) {
1936       // see if we have a direct MergeMem feed
1937       for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
1938         x = feed->fast_out(i);
1939         // the correct Phi will be merging a Bot memory slice
1940         if (x->is_MergeMem()) {
1941           mm = x->as_MergeMem();
1942           break;
1943         }
1944       }
1945       if (mm) {
1946         retry_feed = false;
1947       } else if (UseG1GC & phicount++ < MAX_PHIS) {
1948         // the barrier may feed indirectly via one or two Phi nodes
1949         PhiNode *phi = NULL;
1950         for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
1951           x = feed->fast_out(i);
1952           // the correct Phi will be merging a Bot memory slice
1953           if (x->is_Phi() && x->adr_type() == TypePtr::BOTTOM) {
1954             phi = x->as_Phi();
1955             break;
1956           }
1957         }
1958         if (!phi)
1959           return NULL;
1960         // look for another merge below this phi
1961         feed = phi;
1962       } else {
1963         // couldn't find a merge
1964         return NULL;
1965       }
1966     }
1967 
1968     // sanity check this feed turns up as the expected slice
1969     assert(mm->as_MergeMem()->in(Compile::AliasIdxBot) == feed, "expecting membar to feed AliasIdxBot slice to Merge");
1970 
1971     MemBarNode *trailing = NULL;
1972     // be sure we have a volatile membar below the merge
1973     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
1974       x = mm->fast_out(i);
1975       if (x->is_MemBar() && x->Opcode() == Op_MemBarVolatile) {
1976         trailing = x->as_MemBar();
1977         break;
1978       }
1979     }
1980 
1981     return trailing;
1982   }
1983 
1984   // trailing_to_card_mark
1985   //
1986   // graph traversal helper which detects extra, non-normal Mem feed
1987   // from a trailing membar to a preceding card mark volatile membar
1988   // i.e. it identifies whether one of the three possible extra GC
1989   // post-write Mem flow subgraphs is present
1990   //
1991   // this predicate checks for the same flow as the previous predicate
1992   // but starting from the bottom rather than the top.
1993   //
1994   // if the configurationis present returns the card mark membar
1995   // otherwise NULL
1996 
1997   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing)
1998   {
1999     assert(!is_card_mark_membar(trailing), "not expecting a card mark membar");
2000 
2001     Node *x = trailing->in(TypeFunc::Memory);
2002     // the Mem feed to the membar should be a merge
2003     if (!x->is_MergeMem())
2004       return NULL;
2005 
2006     MergeMemNode *mm = x->as_MergeMem();
2007 
2008     x = mm->in(Compile::AliasIdxBot);
2009     // with G1 we may possibly see a Phi or two before we see a Memory
2010     // Proj from the card mark membar
2011 
2012     const int MAX_PHIS = 3;     // max phis we will search through
2013     int phicount = 0;           // current search count
2014 
2015     bool retry_feed = !x->is_Proj();
2016 
2017     while (retry_feed) {
2018       if (UseG1GC && x->is_Phi() && phicount++ < MAX_PHIS) {
2019         PhiNode *phi = x->as_Phi();
2020         ProjNode *proj = NULL;
2021         PhiNode *nextphi = NULL;
2022         bool found_leading = false;
2023         for (uint i = 1; i < phi->req(); i++) {
2024           x = phi->in(i);
2025           if (x->is_Phi()) {
2026             nextphi = x->as_Phi();
2027           } else if (x->is_Proj()) {
2028             int opcode = x->in(0)->Opcode();
2029             if (opcode == Op_MemBarVolatile) {
2030               proj = x->as_Proj();
2031             } else if (opcode == Op_MemBarRelease ||
2032                        opcode == Op_MemBarCPUOrder) {
2033               // probably a leading membar
2034               found_leading = true;
2035             }
2036           }
2037         }
2038         // if we found a correct looking proj then retry from there
2039         // otherwise we must see a leading and a phi or this the
2040         // wrong config
2041         if (proj != NULL) {
2042           x = proj;
2043           retry_feed = false;
2044         } else if (found_leading && nextphi != NULL) {
2045           // retry from this phi to check phi2
2046           x = nextphi;
2047         } else {
2048           // not what we were looking for
2049           return NULL;
2050         }
2051       } else {
2052         return NULL;
2053       }
2054     }
2055     // the proj has to come from the card mark membar
2056     x = x->in(0);
2057     if (!x->is_MemBar())
2058       return NULL;
2059 
2060     MemBarNode *card_mark_membar = x->as_MemBar();
2061 
2062     if (!is_card_mark_membar(card_mark_membar))
2063       return NULL;
2064 
2065     return card_mark_membar;
2066   }
2067 
2068   // trailing_to_leading
2069   //
2070   // graph traversal helper which checks the Mem flow up the graph
2071   // from a (non-card mark) volatile membar attempting to locate and
2072   // return an associated leading membar. it first looks for a
2073   // subgraph in the normal configuration (relying on helper
2074   // normal_to_leading). failing that it then looks for one of the
2075   // possible post-write card mark subgraphs linking the trailing node
2076   // to a the card mark membar (relying on helper
2077   // trailing_to_card_mark), and then checks that the card mark membar
2078   // is fed by a leading membar (once again relying on auxiliary
2079   // predicate normal_to_leading).
2080   //
2081   // if the configuration is valid returns the cpuorder member for
2082   // preference or when absent the release membar otherwise NULL.
2083   //
2084   // n.b. the input membar is expected to be a volatile membar but
2085   // must *not* be a card mark membar.
2086 
2087   MemBarNode *trailing_to_leading(const MemBarNode *trailing)
2088   {
2089     assert(!is_card_mark_membar(trailing), "not expecting a card mark membar");
2090 
2091     MemBarNode *leading = normal_to_leading(trailing);
2092 
2093     if (leading)
2094       return leading;
2095 
2096     MemBarNode *card_mark_membar = trailing_to_card_mark(trailing);
2097 
2098     if (!card_mark_membar)
2099       return NULL;
2100 
2101     return normal_to_leading(card_mark_membar);
2102   }
2103 
2104   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
2105 
2106 bool unnecessary_acquire(const Node *barrier)
2107 {
2108   // assert barrier->is_MemBar();
2109   if (UseBarriersForVolatile)
2110     // we need to plant a dmb
2111     return false;
2112 
2113   // a volatile read derived from bytecode (or also from an inlined
2114   // SHA field read via LibraryCallKit::load_field_from_object)
2115   // manifests as a LoadX[mo_acquire] followed by an acquire membar
2116   // with a bogus read dependency on it's preceding load. so in those
2117   // cases we will find the load node at the PARMS offset of the
2118   // acquire membar.  n.b. there may be an intervening DecodeN node.
2119   //
2120   // a volatile load derived from an inlined unsafe field access
2121   // manifests as a cpuorder membar with Ctl and Mem projections
2122   // feeding both an acquire membar and a LoadX[mo_acquire]. The
2123   // acquire then feeds another cpuorder membar via Ctl and Mem
2124   // projections. The load has no output dependency on these trailing
2125   // membars because subsequent nodes inserted into the graph take
2126   // their control feed from the final membar cpuorder meaning they
2127   // are all ordered after the load.
2128 
2129   Node *x = barrier->lookup(TypeFunc::Parms);
2130   if (x) {
2131     // we are starting from an acquire and it has a fake dependency
2132     //
2133     // need to check for
2134     //
2135     //   LoadX[mo_acquire]
2136     //   {  |1   }
2137     //   {DecodeN}
2138     //      |Parms
2139     //   MemBarAcquire*
2140     //
2141     // where * tags node we were passed
2142     // and |k means input k
2143     if (x->is_DecodeNarrowPtr())
2144       x = x->in(1);
2145 
2146     return (x->is_Load() && x->as_Load()->is_acquire());
2147   }
2148   
2149   // now check for an unsafe volatile get
2150 
2151   // need to check for
2152   //
2153   //   MemBarCPUOrder
2154   //        ||       \\
2155   //   MemBarAcquire* LoadX[mo_acquire]
2156   //        ||
2157   //   MemBarCPUOrder
2158   //
2159   // where * tags node we were passed
2160   // and || or \\ are Ctl+Mem feeds via intermediate Proj Nodes
2161 
2162   // check for a parent MemBarCPUOrder
2163   ProjNode *ctl;
2164   ProjNode *mem;
2165   MemBarNode *parent = parent_membar(barrier);
2166   if (!parent || parent->Opcode() != Op_MemBarCPUOrder)
2167     return false;
2168   ctl = parent->proj_out(TypeFunc::Control);
2169   mem = parent->proj_out(TypeFunc::Memory);
2170   if (!ctl || !mem)
2171     return false;
2172   // ensure the proj nodes both feed a LoadX[mo_acquire]
2173   LoadNode *ld = NULL;
2174   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
2175     x = ctl->fast_out(i);
2176     // if we see a load we keep hold of it and stop searching
2177     if (x->is_Load()) {
2178       ld = x->as_Load();
2179       break;
2180     }
2181   }
2182   // it must be an acquiring load
2183   if (! ld || ! ld->is_acquire())
2184     return false;
2185   for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2186     x = mem->fast_out(i);
2187     // if we see the same load we drop it and stop searching
2188     if (x == ld) {
2189       ld = NULL;
2190       break;
2191     }
2192   }
2193   // we must have dropped the load
2194   if (ld)
2195     return false;
2196   // check for a child cpuorder membar
2197   MemBarNode *child  = child_membar(barrier->as_MemBar());
2198   if (!child || child->Opcode() != Op_MemBarCPUOrder)
2199     return false;
2200 
2201   return true;
2202 }
2203 
2204 bool needs_acquiring_load(const Node *n)
2205 {
2206   // assert n->is_Load();
2207   if (UseBarriersForVolatile)
2208     // we use a normal load and a dmb
2209     return false;
2210 
2211   LoadNode *ld = n->as_Load();
2212 
2213   if (!ld->is_acquire())
2214     return false;
2215 
2216   // check if this load is feeding an acquire membar
2217   //
2218   //   LoadX[mo_acquire]
2219   //   {  |1   }
2220   //   {DecodeN}
2221   //      |Parms
2222   //   MemBarAcquire*
2223   //
2224   // where * tags node we were passed
2225   // and |k means input k
2226 
2227   Node *start = ld;
2228   Node *mbacq = NULL;
2229 
2230   // if we hit a DecodeNarrowPtr we reset the start node and restart
2231   // the search through the outputs
2232  restart:
2233 
2234   for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) {
2235     Node *x = start->fast_out(i);
2236     if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) {
2237       mbacq = x;
2238     } else if (!mbacq &&
2239                (x->is_DecodeNarrowPtr() ||
2240                 (x->is_Mach() && x->Opcode() == Op_DecodeN))) {
2241       start = x;
2242       goto restart;
2243     }
2244   }
2245 
2246   if (mbacq) {
2247     return true;
2248   }
2249 
2250   // now check for an unsafe volatile get
2251 
2252   // check if Ctl and Proj feed comes from a MemBarCPUOrder
2253   //
2254   //     MemBarCPUOrder
2255   //        ||       \\
2256   //   MemBarAcquire* LoadX[mo_acquire]
2257   //        ||
2258   //   MemBarCPUOrder
2259 
2260   MemBarNode *membar;
2261 
2262   membar = parent_membar(ld);
2263 
2264   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder)
2265     return false;
2266 
2267   // ensure that there is a CPUOrder->Acquire->CPUOrder membar chain
2268 
2269   membar = child_membar(membar);
2270 
2271   if (!membar || !membar->Opcode() == Op_MemBarAcquire)
2272     return false;
2273 
2274   membar = child_membar(membar);
2275   
2276   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder)
2277     return false;
2278 
2279   return true;
2280 }
2281 
2282 bool unnecessary_release(const Node *n)
2283 {
2284   assert((n->is_MemBar() &&
2285           n->Opcode() == Op_MemBarRelease),
2286          "expecting a release membar");
2287 
2288   if (UseBarriersForVolatile)
2289     // we need to plant a dmb
2290     return false;
2291 
2292   // if there is a dependent CPUOrder barrier then use that as the
2293   // leading
2294 
2295   MemBarNode *barrier = n->as_MemBar();
2296   // check for an intervening cpuorder membar
2297   MemBarNode *b = child_membar(barrier);
2298   if (b && b->Opcode() == Op_MemBarCPUOrder) {
2299     // ok, so start the check from the dependent cpuorder barrier
2300     barrier = b;
2301   }
2302 
2303   // must start with a normal feed
2304   MemBarNode *child_barrier = leading_to_normal(barrier);
2305 
2306   if (!child_barrier)
2307     return false;
2308 
2309   if (!is_card_mark_membar(child_barrier))
2310     // this is the trailing membar and we are done
2311     return true;
2312 
2313   // must be sure this card mark feeds a trailing membar
2314   MemBarNode *trailing = card_mark_to_trailing(child_barrier);
2315   return (trailing != NULL);
2316 }
2317 
2318 bool unnecessary_volatile(const Node *n)
2319 {
2320   // assert n->is_MemBar();
2321   if (UseBarriersForVolatile)
2322     // we need to plant a dmb
2323     return false;
2324 
2325   MemBarNode *mbvol = n->as_MemBar();
2326 
2327   // first we check if this is part of a card mark. if so then we have
2328   // to generate a StoreLoad barrier
2329   
2330   if (is_card_mark_membar(mbvol))
2331       return false;
2332 
2333   // ok, if it's not a card mark then we still need to check if it is
2334   // a trailing membar of a volatile put hgraph.
2335 
2336   return (trailing_to_leading(mbvol) != NULL);
2337 }
2338 
2339 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
2340 
2341 bool needs_releasing_store(const Node *n)
2342 {
2343   // assert n->is_Store();
2344   if (UseBarriersForVolatile)
2345     // we use a normal store and dmb combination
2346     return false;
2347 
2348   StoreNode *st = n->as_Store();
2349 
2350   // the store must be marked as releasing
2351   if (!st->is_release())
2352     return false;
2353 
2354   // the store must be fed by a membar
2355 
2356   Node *x = st->lookup(StoreNode::Memory);
2357 
2358   if (! x || !x->is_Proj())
2359     return false;
2360 
2361   ProjNode *proj = x->as_Proj();
2362 
2363   x = proj->lookup(0);
2364 
2365   if (!x || !x->is_MemBar())
2366     return false;
2367 
2368   MemBarNode *barrier = x->as_MemBar();
2369 
2370   // if the barrier is a release membar or a cpuorder mmebar fed by a
2371   // release membar then we need to check whether that forms part of a
2372   // volatile put graph.
2373 
2374   // reject invalid candidates
2375   if (!leading_membar(barrier))
2376     return false;
2377 
2378   // does this lead a normal subgraph?
2379   MemBarNode *mbvol = leading_to_normal(barrier);
2380 
2381   if (!mbvol)
2382     return false;
2383 
2384   // all done unless this is a card mark
2385   if (!is_card_mark_membar(mbvol))
2386     return true;
2387   
2388   // we found a card mark -- just make sure we have a trailing barrier
2389 
2390   return (card_mark_to_trailing(mbvol) != NULL);
2391 }
2392 
2393 // predicate controlling translation of StoreCM
2394 //
2395 // returns true if a StoreStore must precede the card write otherwise
2396 // false
2397 
2398 bool unnecessary_storestore(const Node *storecm)
2399 {
2400   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
2401 
2402   // we only ever need to generate a dmb ishst between an object put
2403   // and the associated card mark when we are using CMS without
2404   // conditional card marking
2405 
2406   if (!UseConcMarkSweepGC || UseCondCardMark)
2407     return true;
2408 
2409   // if we are implementing volatile puts using barriers then the
2410   // object put as an str so we must insert the dmb ishst
2411 
2412   if (UseBarriersForVolatile)
2413     return false;
2414 
2415   // we can omit the dmb ishst if this StoreCM is part of a volatile
2416   // put because in thta case the put will be implemented by stlr
2417   //
2418   // we need to check for a normal subgraph feeding this StoreCM.
2419   // that means the StoreCM must be fed Memory from a leading membar,
2420   // either a MemBarRelease or its dependent MemBarCPUOrder, and the
2421   // leading membar must be part of a normal subgraph
2422 
2423   Node *x = storecm->in(StoreNode::Memory);
2424 
2425   if (!x->is_Proj())
2426     return false;
2427 
2428   x = x->in(0);
2429 
2430   if (!x->is_MemBar())
2431     return false;
2432 
2433   MemBarNode *leading = x->as_MemBar();
2434 
2435   // reject invalid candidates
2436   if (!leading_membar(leading))
2437     return false;
2438 
2439   // we can omit the StoreStore if it is the head of a normal subgraph
2440   return (leading_to_normal(leading) != NULL);
2441 }
2442 
2443 
2444 #define __ _masm.
2445 
2446 // advance declarations for helper functions to convert register
2447 // indices to register objects
2448 
2449 // the ad file has to provide implementations of certain methods
2450 // expected by the generic code
2451 //
2452 // REQUIRED FUNCTIONALITY
2453 
2454 //=============================================================================
2455 
2456 // !!!!! Special hack to get all types of calls to specify the byte offset
2457 //       from the start of the call to the point where the return address
2458 //       will point.
2459 
2460 int MachCallStaticJavaNode::ret_addr_offset()
2461 {
2462   // call should be a simple bl
2463   int off = 4;
2464   return off;
2465 }
2466 
2467 int MachCallDynamicJavaNode::ret_addr_offset()
2468 {
2469   return 16; // movz, movk, movk, bl
2470 }
2471 
2472 int MachCallRuntimeNode::ret_addr_offset() {
2473   // for generated stubs the call will be
2474   //   far_call(addr)
2475   // for real runtime callouts it will be six instructions
2476   // see aarch64_enc_java_to_runtime
2477   //   adr(rscratch2, retaddr)
2478   //   lea(rscratch1, RuntimeAddress(addr)
2479   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
2480   //   blrt rscratch1
2481   CodeBlob *cb = CodeCache::find_blob(_entry_point);
2482   if (cb) {
2483     return MacroAssembler::far_branch_size();
2484   } else {
2485     return 6 * NativeInstruction::instruction_size;
2486   }
2487 }
2488 
2489 // Indicate if the safepoint node needs the polling page as an input
2490 
2491 // the shared code plants the oop data at the start of the generated
2492 // code for the safepoint node and that needs ot be at the load
2493 // instruction itself. so we cannot plant a mov of the safepoint poll
2494 // address followed by a load. setting this to true means the mov is
2495 // scheduled as a prior instruction. that's better for scheduling
2496 // anyway.
2497 
2498 bool SafePointNode::needs_polling_address_input()
2499 {
2500   return true;
2501 }
2502 
2503 //=============================================================================
2504 
2505 #ifndef PRODUCT
2506 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2507   st->print("BREAKPOINT");
2508 }
2509 #endif
2510 
2511 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2512   MacroAssembler _masm(&cbuf);
2513   __ brk(0);
2514 }
2515 
2516 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
2517   return MachNode::size(ra_);
2518 }
2519 
2520 //=============================================================================
2521 
2522 #ifndef PRODUCT
2523   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
2524     st->print("nop \t# %d bytes pad for loops and calls", _count);
2525   }
2526 #endif
2527 
2528   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
2529     MacroAssembler _masm(&cbuf);
2530     for (int i = 0; i < _count; i++) {
2531       __ nop();
2532     }
2533   }
2534 
2535   uint MachNopNode::size(PhaseRegAlloc*) const {
2536     return _count * NativeInstruction::instruction_size;
2537   }
2538 
2539 //=============================================================================
2540 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
2541 
2542 int Compile::ConstantTable::calculate_table_base_offset() const {
2543   return 0;  // absolute addressing, no offset
2544 }
2545 
2546 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
2547 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
2548   ShouldNotReachHere();
2549 }
2550 
2551 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
2552   // Empty encoding
2553 }
2554 
2555 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
2556   return 0;
2557 }
2558 
2559 #ifndef PRODUCT
2560 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
2561   st->print("-- \t// MachConstantBaseNode (empty encoding)");
2562 }
2563 #endif
2564 
2565 #ifndef PRODUCT
2566 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2567   Compile* C = ra_->C;
2568 
2569   int framesize = C->frame_slots() << LogBytesPerInt;
2570 
2571   if (C->need_stack_bang(framesize))
2572     st->print("# stack bang size=%d\n\t", framesize);
2573 
2574   if (framesize < ((1 << 9) + 2 * wordSize)) {
2575     st->print("sub  sp, sp, #%d\n\t", framesize);
2576     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
2577     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
2578   } else {
2579     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
2580     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
2581     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
2582     st->print("sub  sp, sp, rscratch1");
2583   }
2584 }
2585 #endif
2586 
2587 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2588   Compile* C = ra_->C;
2589   MacroAssembler _masm(&cbuf);
2590 
2591   // n.b. frame size includes space for return pc and rfp
2592   const long framesize = C->frame_size_in_bytes();
2593   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
2594 
2595   // insert a nop at the start of the prolog so we can patch in a
2596   // branch if we need to invalidate the method later
2597   __ nop();
2598 
2599   int bangsize = C->bang_size_in_bytes();
2600   if (C->need_stack_bang(bangsize) && UseStackBanging)
2601     __ generate_stack_overflow_check(bangsize);
2602 
2603   __ build_frame(framesize);
2604 
2605   if (NotifySimulator) {
2606     __ notify(Assembler::method_entry);
2607   }
2608 
2609   if (VerifyStackAtCalls) {
2610     Unimplemented();
2611   }
2612 
2613   C->set_frame_complete(cbuf.insts_size());
2614 
2615   if (C->has_mach_constant_base_node()) {
2616     // NOTE: We set the table base offset here because users might be
2617     // emitted before MachConstantBaseNode.
2618     Compile::ConstantTable& constant_table = C->constant_table();
2619     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
2620   }
2621 }
2622 
2623 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
2624 {
2625   return MachNode::size(ra_); // too many variables; just compute it
2626                               // the hard way
2627 }
2628 
2629 int MachPrologNode::reloc() const
2630 {
2631   return 0;
2632 }
2633 
2634 //=============================================================================
2635 
2636 #ifndef PRODUCT
2637 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2638   Compile* C = ra_->C;
2639   int framesize = C->frame_slots() << LogBytesPerInt;
2640 
2641   st->print("# pop frame %d\n\t",framesize);
2642 
2643   if (framesize == 0) {
2644     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
2645   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
2646     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
2647     st->print("add  sp, sp, #%d\n\t", framesize);
2648   } else {
2649     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
2650     st->print("add  sp, sp, rscratch1\n\t");
2651     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
2652   }
2653 
2654   if (do_polling() && C->is_method_compilation()) {
2655     st->print("# touch polling page\n\t");
2656     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
2657     st->print("ldr zr, [rscratch1]");
2658   }
2659 }
2660 #endif
2661 
2662 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2663   Compile* C = ra_->C;
2664   MacroAssembler _masm(&cbuf);
2665   int framesize = C->frame_slots() << LogBytesPerInt;
2666 
2667   __ remove_frame(framesize);
2668 
2669   if (NotifySimulator) {
2670     __ notify(Assembler::method_reentry);
2671   }
2672 
2673   if (do_polling() && C->is_method_compilation()) {
2674     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
2675   }
2676 }
2677 
2678 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
2679   // Variable size. Determine dynamically.
2680   return MachNode::size(ra_);
2681 }
2682 
2683 int MachEpilogNode::reloc() const {
2684   // Return number of relocatable values contained in this instruction.
2685   return 1; // 1 for polling page.
2686 }
2687 
2688 const Pipeline * MachEpilogNode::pipeline() const {
2689   return MachNode::pipeline_class();
2690 }
2691 
2692 // This method seems to be obsolete. It is declared in machnode.hpp
2693 // and defined in all *.ad files, but it is never called. Should we
2694 // get rid of it?
2695 int MachEpilogNode::safepoint_offset() const {
2696   assert(do_polling(), "no return for this epilog node");
2697   return 4;
2698 }
2699 
2700 //=============================================================================
2701 
2702 // Figure out which register class each belongs in: rc_int, rc_float or
2703 // rc_stack.
2704 enum RC { rc_bad, rc_int, rc_float, rc_stack };
2705 
2706 static enum RC rc_class(OptoReg::Name reg) {
2707 
2708   if (reg == OptoReg::Bad) {
2709     return rc_bad;
2710   }
2711 
2712   // we have 30 int registers * 2 halves
2713   // (rscratch1 and rscratch2 are omitted)
2714 
2715   if (reg < 60) {
2716     return rc_int;
2717   }
2718 
2719   // we have 32 float register * 2 halves
2720   if (reg < 60 + 128) {
2721     return rc_float;
2722   }
2723 
2724   // Between float regs & stack is the flags regs.
2725   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
2726 
2727   return rc_stack;
2728 }
2729 
2730 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
2731   Compile* C = ra_->C;
2732 
2733   // Get registers to move.
2734   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
2735   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
2736   OptoReg::Name dst_hi = ra_->get_reg_second(this);
2737   OptoReg::Name dst_lo = ra_->get_reg_first(this);
2738 
2739   enum RC src_hi_rc = rc_class(src_hi);
2740   enum RC src_lo_rc = rc_class(src_lo);
2741   enum RC dst_hi_rc = rc_class(dst_hi);
2742   enum RC dst_lo_rc = rc_class(dst_lo);
2743 
2744   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
2745 
2746   if (src_hi != OptoReg::Bad) {
2747     assert((src_lo&1)==0 && src_lo+1==src_hi &&
2748            (dst_lo&1)==0 && dst_lo+1==dst_hi,
2749            "expected aligned-adjacent pairs");
2750   }
2751 
2752   if (src_lo == dst_lo && src_hi == dst_hi) {
2753     return 0;            // Self copy, no move.
2754   }
2755 
2756   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
2757               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
2758   int src_offset = ra_->reg2offset(src_lo);
2759   int dst_offset = ra_->reg2offset(dst_lo);
2760 
2761   if (bottom_type()->isa_vect() != NULL) {
2762     uint ireg = ideal_reg();
2763     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
2764     if (cbuf) {
2765       MacroAssembler _masm(cbuf);
2766       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
2767       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
2768         // stack->stack
2769         assert((src_offset & 7) && (dst_offset & 7), "unaligned stack offset");
2770         if (ireg == Op_VecD) {
2771           __ unspill(rscratch1, true, src_offset);
2772           __ spill(rscratch1, true, dst_offset);
2773         } else {
2774           __ spill_copy128(src_offset, dst_offset);
2775         }
2776       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
2777         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2778                ireg == Op_VecD ? __ T8B : __ T16B,
2779                as_FloatRegister(Matcher::_regEncode[src_lo]));
2780       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
2781         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
2782                        ireg == Op_VecD ? __ D : __ Q,
2783                        ra_->reg2offset(dst_lo));
2784       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
2785         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2786                        ireg == Op_VecD ? __ D : __ Q,
2787                        ra_->reg2offset(src_lo));
2788       } else {
2789         ShouldNotReachHere();
2790       }
2791     }
2792   } else if (cbuf) {
2793     MacroAssembler _masm(cbuf);
2794     switch (src_lo_rc) {
2795     case rc_int:
2796       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
2797         if (is64) {
2798             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
2799                    as_Register(Matcher::_regEncode[src_lo]));
2800         } else {
2801             MacroAssembler _masm(cbuf);
2802             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
2803                     as_Register(Matcher::_regEncode[src_lo]));
2804         }
2805       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
2806         if (is64) {
2807             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2808                      as_Register(Matcher::_regEncode[src_lo]));
2809         } else {
2810             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2811                      as_Register(Matcher::_regEncode[src_lo]));
2812         }
2813       } else {                    // gpr --> stack spill
2814         assert(dst_lo_rc == rc_stack, "spill to bad register class");
2815         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
2816       }
2817       break;
2818     case rc_float:
2819       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
2820         if (is64) {
2821             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
2822                      as_FloatRegister(Matcher::_regEncode[src_lo]));
2823         } else {
2824             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
2825                      as_FloatRegister(Matcher::_regEncode[src_lo]));
2826         }
2827       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
2828           if (cbuf) {
2829             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2830                      as_FloatRegister(Matcher::_regEncode[src_lo]));
2831         } else {
2832             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2833                      as_FloatRegister(Matcher::_regEncode[src_lo]));
2834         }
2835       } else {                    // fpr --> stack spill
2836         assert(dst_lo_rc == rc_stack, "spill to bad register class");
2837         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
2838                  is64 ? __ D : __ S, dst_offset);
2839       }
2840       break;
2841     case rc_stack:
2842       if (dst_lo_rc == rc_int) {  // stack --> gpr load
2843         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
2844       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
2845         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2846                    is64 ? __ D : __ S, src_offset);
2847       } else {                    // stack --> stack copy
2848         assert(dst_lo_rc == rc_stack, "spill to bad register class");
2849         __ unspill(rscratch1, is64, src_offset);
2850         __ spill(rscratch1, is64, dst_offset);
2851       }
2852       break;
2853     default:
2854       assert(false, "bad rc_class for spill");
2855       ShouldNotReachHere();
2856     }
2857   }
2858 
2859   if (st) {
2860     st->print("spill ");
2861     if (src_lo_rc == rc_stack) {
2862       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
2863     } else {
2864       st->print("%s -> ", Matcher::regName[src_lo]);
2865     }
2866     if (dst_lo_rc == rc_stack) {
2867       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
2868     } else {
2869       st->print("%s", Matcher::regName[dst_lo]);
2870     }
2871     if (bottom_type()->isa_vect() != NULL) {
2872       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
2873     } else {
2874       st->print("\t# spill size = %d", is64 ? 64:32);
2875     }
2876   }
2877 
2878   return 0;
2879 
2880 }
2881 
2882 #ifndef PRODUCT
2883 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2884   if (!ra_)
2885     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
2886   else
2887     implementation(NULL, ra_, false, st);
2888 }
2889 #endif
2890 
2891 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2892   implementation(&cbuf, ra_, false, NULL);
2893 }
2894 
2895 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
2896   return MachNode::size(ra_);
2897 }
2898 
2899 //=============================================================================
2900 
2901 #ifndef PRODUCT
2902 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2903   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
2904   int reg = ra_->get_reg_first(this);
2905   st->print("add %s, rsp, #%d]\t# box lock",
2906             Matcher::regName[reg], offset);
2907 }
2908 #endif
2909 
2910 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2911   MacroAssembler _masm(&cbuf);
2912 
2913   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
2914   int reg    = ra_->get_encode(this);
2915 
2916   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
2917     __ add(as_Register(reg), sp, offset);
2918   } else {
2919     ShouldNotReachHere();
2920   }
2921 }
2922 
2923 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
2924   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
2925   return 4;
2926 }
2927 
2928 //=============================================================================
2929 
2930 #ifndef PRODUCT
2931 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
2932 {
2933   st->print_cr("# MachUEPNode");
2934   if (UseCompressedClassPointers) {
2935     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
2936     if (Universe::narrow_klass_shift() != 0) {
2937       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
2938     }
2939   } else {
2940    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
2941   }
2942   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
2943   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
2944 }
2945 #endif
2946 
2947 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
2948 {
2949   // This is the unverified entry point.
2950   MacroAssembler _masm(&cbuf);
2951 
2952   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
2953   Label skip;
2954   // TODO
2955   // can we avoid this skip and still use a reloc?
2956   __ br(Assembler::EQ, skip);
2957   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
2958   __ bind(skip);
2959 }
2960 
2961 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
2962 {
2963   return MachNode::size(ra_);
2964 }
2965 
2966 // REQUIRED EMIT CODE
2967 
2968 //=============================================================================
2969 
2970 // Emit exception handler code.
2971 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
2972 {
2973   // mov rscratch1 #exception_blob_entry_point
2974   // br rscratch1
2975   // Note that the code buffer's insts_mark is always relative to insts.
2976   // That's why we must use the macroassembler to generate a handler.
2977   MacroAssembler _masm(&cbuf);
2978   address base = __ start_a_stub(size_exception_handler());
2979   if (base == NULL) {
2980     ciEnv::current()->record_failure("CodeCache is full");
2981     return 0;  // CodeBuffer::expand failed
2982   }
2983   int offset = __ offset();
2984   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
2985   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
2986   __ end_a_stub();
2987   return offset;
2988 }
2989 
2990 // Emit deopt handler code.
2991 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
2992 {
2993   // Note that the code buffer's insts_mark is always relative to insts.
2994   // That's why we must use the macroassembler to generate a handler.
2995   MacroAssembler _masm(&cbuf);
2996   address base = __ start_a_stub(size_deopt_handler());
2997   if (base == NULL) {
2998     ciEnv::current()->record_failure("CodeCache is full");
2999     return 0;  // CodeBuffer::expand failed
3000   }
3001   int offset = __ offset();
3002 
3003   __ adr(lr, __ pc());
3004   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
3005 
3006   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
3007   __ end_a_stub();
3008   return offset;
3009 }
3010 
3011 // REQUIRED MATCHER CODE
3012 
3013 //=============================================================================
3014 
3015 const bool Matcher::match_rule_supported(int opcode) {
3016 
3017   // TODO
3018   // identify extra cases that we might want to provide match rules for
3019   // e.g. Op_StrEquals and other intrinsics
3020   if (!has_match_rule(opcode)) {
3021     return false;
3022   }
3023 
3024   return true;  // Per default match rules are supported.
3025 }
3026 
3027 int Matcher::regnum_to_fpu_offset(int regnum)
3028 {
3029   Unimplemented();
3030   return 0;
3031 }
3032 
3033 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset)
3034 {
3035   Unimplemented();
3036   return false;
3037 }
3038 
3039 const bool Matcher::isSimpleConstant64(jlong value) {
3040   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
3041   // Probably always true, even if a temp register is required.
3042   return true;
3043 }
3044 
3045 // true just means we have fast l2f conversion
3046 const bool Matcher::convL2FSupported(void) {
3047   return true;
3048 }
3049 
3050 // Vector width in bytes.
3051 const int Matcher::vector_width_in_bytes(BasicType bt) {
3052   int size = MIN2(16,(int)MaxVectorSize);
3053   // Minimum 2 values in vector
3054   if (size < 2*type2aelembytes(bt)) size = 0;
3055   // But never < 4
3056   if (size < 4) size = 0;
3057   return size;
3058 }
3059 
3060 // Limits on vector size (number of elements) loaded into vector.
3061 const int Matcher::max_vector_size(const BasicType bt) {
3062   return vector_width_in_bytes(bt)/type2aelembytes(bt);
3063 }
3064 const int Matcher::min_vector_size(const BasicType bt) {
3065 //  For the moment limit the vector size to 8 bytes
3066     int size = 8 / type2aelembytes(bt);
3067     if (size < 2) size = 2;
3068     return size;
3069 }
3070 
3071 // Vector ideal reg.
3072 const int Matcher::vector_ideal_reg(int len) {
3073   switch(len) {
3074     case  8: return Op_VecD;
3075     case 16: return Op_VecX;
3076   }
3077   ShouldNotReachHere();
3078   return 0;
3079 }
3080 
3081 const int Matcher::vector_shift_count_ideal_reg(int size) {
3082   return Op_VecX;
3083 }
3084 
3085 // AES support not yet implemented
3086 const bool Matcher::pass_original_key_for_aes() {
3087   return false;
3088 }
3089 
3090 // x86 supports misaligned vectors store/load.
3091 const bool Matcher::misaligned_vectors_ok() {
3092   return !AlignVector; // can be changed by flag
3093 }
3094 
3095 // false => size gets scaled to BytesPerLong, ok.
3096 const bool Matcher::init_array_count_is_in_bytes = false;
3097 
3098 // Threshold size for cleararray.
3099 const int Matcher::init_array_short_size = 18 * BytesPerLong;
3100 
3101 // Use conditional move (CMOVL)
3102 const int Matcher::long_cmove_cost() {
3103   // long cmoves are no more expensive than int cmoves
3104   return 0;
3105 }
3106 
3107 const int Matcher::float_cmove_cost() {
3108   // float cmoves are no more expensive than int cmoves
3109   return 0;
3110 }
3111 
3112 // Does the CPU require late expand (see block.cpp for description of late expand)?
3113 const bool Matcher::require_postalloc_expand = false;
3114 
3115 // Should the Matcher clone shifts on addressing modes, expecting them
3116 // to be subsumed into complex addressing expressions or compute them
3117 // into registers?  True for Intel but false for most RISCs
3118 const bool Matcher::clone_shift_expressions = false;
3119 
3120 // Do we need to mask the count passed to shift instructions or does
3121 // the cpu only look at the lower 5/6 bits anyway?
3122 const bool Matcher::need_masked_shift_count = false;
3123 
3124 // This affects two different things:
3125 //  - how Decode nodes are matched
3126 //  - how ImplicitNullCheck opportunities are recognized
3127 // If true, the matcher will try to remove all Decodes and match them
3128 // (as operands) into nodes. NullChecks are not prepared to deal with
3129 // Decodes by final_graph_reshaping().
3130 // If false, final_graph_reshaping() forces the decode behind the Cmp
3131 // for a NullCheck. The matcher matches the Decode node into a register.
3132 // Implicit_null_check optimization moves the Decode along with the
3133 // memory operation back up before the NullCheck.
3134 bool Matcher::narrow_oop_use_complex_address() {
3135   return Universe::narrow_oop_shift() == 0;
3136 }
3137 
3138 bool Matcher::narrow_klass_use_complex_address() {
3139 // TODO
3140 // decide whether we need to set this to true
3141   return false;
3142 }
3143 
3144 // Is it better to copy float constants, or load them directly from
3145 // memory?  Intel can load a float constant from a direct address,
3146 // requiring no extra registers.  Most RISCs will have to materialize
3147 // an address into a register first, so they would do better to copy
3148 // the constant from stack.
3149 const bool Matcher::rematerialize_float_constants = false;
3150 
3151 // If CPU can load and store mis-aligned doubles directly then no
3152 // fixup is needed.  Else we split the double into 2 integer pieces
3153 // and move it piece-by-piece.  Only happens when passing doubles into
3154 // C code as the Java calling convention forces doubles to be aligned.
3155 const bool Matcher::misaligned_doubles_ok = true;
3156 
3157 // No-op on amd64
3158 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
3159   Unimplemented();
3160 }
3161 
3162 // Advertise here if the CPU requires explicit rounding operations to
3163 // implement the UseStrictFP mode.
3164 const bool Matcher::strict_fp_requires_explicit_rounding = false;
3165 
3166 // Are floats converted to double when stored to stack during
3167 // deoptimization?
3168 bool Matcher::float_in_double() { return true; }
3169 
3170 // Do ints take an entire long register or just half?
3171 // The relevant question is how the int is callee-saved:
3172 // the whole long is written but de-opt'ing will have to extract
3173 // the relevant 32 bits.
3174 const bool Matcher::int_in_long = true;
3175 
3176 // Return whether or not this register is ever used as an argument.
3177 // This function is used on startup to build the trampoline stubs in
3178 // generateOptoStub.  Registers not mentioned will be killed by the VM
3179 // call in the trampoline, and arguments in those registers not be
3180 // available to the callee.
3181 bool Matcher::can_be_java_arg(int reg)
3182 {
3183   return
3184     reg ==  R0_num || reg == R0_H_num ||
3185     reg ==  R1_num || reg == R1_H_num ||
3186     reg ==  R2_num || reg == R2_H_num ||
3187     reg ==  R3_num || reg == R3_H_num ||
3188     reg ==  R4_num || reg == R4_H_num ||
3189     reg ==  R5_num || reg == R5_H_num ||
3190     reg ==  R6_num || reg == R6_H_num ||
3191     reg ==  R7_num || reg == R7_H_num ||
3192     reg ==  V0_num || reg == V0_H_num ||
3193     reg ==  V1_num || reg == V1_H_num ||
3194     reg ==  V2_num || reg == V2_H_num ||
3195     reg ==  V3_num || reg == V3_H_num ||
3196     reg ==  V4_num || reg == V4_H_num ||
3197     reg ==  V5_num || reg == V5_H_num ||
3198     reg ==  V6_num || reg == V6_H_num ||
3199     reg ==  V7_num || reg == V7_H_num;
3200 }
3201 
3202 bool Matcher::is_spillable_arg(int reg)
3203 {
3204   return can_be_java_arg(reg);
3205 }
3206 
3207 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
3208   return false;
3209 }
3210 
3211 RegMask Matcher::divI_proj_mask() {
3212   ShouldNotReachHere();
3213   return RegMask();
3214 }
3215 
3216 // Register for MODI projection of divmodI.
3217 RegMask Matcher::modI_proj_mask() {
3218   ShouldNotReachHere();
3219   return RegMask();
3220 }
3221 
3222 // Register for DIVL projection of divmodL.
3223 RegMask Matcher::divL_proj_mask() {
3224   ShouldNotReachHere();
3225   return RegMask();
3226 }
3227 
3228 // Register for MODL projection of divmodL.
3229 RegMask Matcher::modL_proj_mask() {
3230   ShouldNotReachHere();
3231   return RegMask();
3232 }
3233 
3234 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
3235   return FP_REG_mask();
3236 }
3237 
3238 // helper for encoding java_to_runtime calls on sim
3239 //
3240 // this is needed to compute the extra arguments required when
3241 // planting a call to the simulator blrt instruction. the TypeFunc
3242 // can be queried to identify the counts for integral, and floating
3243 // arguments and the return type
3244 
3245 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
3246 {
3247   int gps = 0;
3248   int fps = 0;
3249   const TypeTuple *domain = tf->domain();
3250   int max = domain->cnt();
3251   for (int i = TypeFunc::Parms; i < max; i++) {
3252     const Type *t = domain->field_at(i);
3253     switch(t->basic_type()) {
3254     case T_FLOAT:
3255     case T_DOUBLE:
3256       fps++;
3257     default:
3258       gps++;
3259     }
3260   }
3261   gpcnt = gps;
3262   fpcnt = fps;
3263   BasicType rt = tf->return_type();
3264   switch (rt) {
3265   case T_VOID:
3266     rtype = MacroAssembler::ret_type_void;
3267     break;
3268   default:
3269     rtype = MacroAssembler::ret_type_integral;
3270     break;
3271   case T_FLOAT:
3272     rtype = MacroAssembler::ret_type_float;
3273     break;
3274   case T_DOUBLE:
3275     rtype = MacroAssembler::ret_type_double;
3276     break;
3277   }
3278 }
3279 
3280 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
3281   MacroAssembler _masm(&cbuf);                                          \
3282   {                                                                     \
3283     guarantee(INDEX == -1, "mode not permitted for volatile");          \
3284     guarantee(DISP == 0, "mode not permitted for volatile");            \
3285     guarantee(SCALE == 0, "mode not permitted for volatile");           \
3286     __ INSN(REG, as_Register(BASE));                                    \
3287   }
3288 
3289 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
3290 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
3291 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
3292                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
3293 
3294   // Used for all non-volatile memory accesses.  The use of
3295   // $mem->opcode() to discover whether this pattern uses sign-extended
3296   // offsets is something of a kludge.
3297   static void loadStore(MacroAssembler masm, mem_insn insn,
3298                          Register reg, int opcode,
3299                          Register base, int index, int size, int disp)
3300   {
3301     Address::extend scale;
3302 
3303     // Hooboy, this is fugly.  We need a way to communicate to the
3304     // encoder that the index needs to be sign extended, so we have to
3305     // enumerate all the cases.
3306     switch (opcode) {
3307     case INDINDEXSCALEDOFFSETI2L:
3308     case INDINDEXSCALEDI2L:
3309     case INDINDEXSCALEDOFFSETI2LN:
3310     case INDINDEXSCALEDI2LN:
3311     case INDINDEXOFFSETI2L:
3312     case INDINDEXOFFSETI2LN:
3313       scale = Address::sxtw(size);
3314       break;
3315     default:
3316       scale = Address::lsl(size);
3317     }
3318 
3319     if (index == -1) {
3320       (masm.*insn)(reg, Address(base, disp));
3321     } else {
3322       if (disp == 0) {
3323         (masm.*insn)(reg, Address(base, as_Register(index), scale));
3324       } else {
3325         masm.lea(rscratch1, Address(base, disp));
3326         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
3327       }
3328     }
3329   }
3330 
3331   static void loadStore(MacroAssembler masm, mem_float_insn insn,
3332                          FloatRegister reg, int opcode,
3333                          Register base, int index, int size, int disp)
3334   {
3335     Address::extend scale;
3336 
3337     switch (opcode) {
3338     case INDINDEXSCALEDOFFSETI2L:
3339     case INDINDEXSCALEDI2L:
3340     case INDINDEXSCALEDOFFSETI2LN:
3341     case INDINDEXSCALEDI2LN:
3342       scale = Address::sxtw(size);
3343       break;
3344     default:
3345       scale = Address::lsl(size);
3346     }
3347 
3348      if (index == -1) {
3349       (masm.*insn)(reg, Address(base, disp));
3350     } else {
3351       if (disp == 0) {
3352         (masm.*insn)(reg, Address(base, as_Register(index), scale));
3353       } else {
3354         masm.lea(rscratch1, Address(base, disp));
3355         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
3356       }
3357     }
3358   }
3359 
3360   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
3361                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
3362                          int opcode, Register base, int index, int size, int disp)
3363   {
3364     if (index == -1) {
3365       (masm.*insn)(reg, T, Address(base, disp));
3366     } else {
3367       assert(disp == 0, "unsupported address mode");
3368       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
3369     }
3370   }
3371 
3372 %}
3373 
3374 
3375 
3376 //----------ENCODING BLOCK-----------------------------------------------------
3377 // This block specifies the encoding classes used by the compiler to
3378 // output byte streams.  Encoding classes are parameterized macros
3379 // used by Machine Instruction Nodes in order to generate the bit
3380 // encoding of the instruction.  Operands specify their base encoding
3381 // interface with the interface keyword.  There are currently
3382 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
3383 // COND_INTER.  REG_INTER causes an operand to generate a function
3384 // which returns its register number when queried.  CONST_INTER causes
3385 // an operand to generate a function which returns the value of the
3386 // constant when queried.  MEMORY_INTER causes an operand to generate
3387 // four functions which return the Base Register, the Index Register,
3388 // the Scale Value, and the Offset Value of the operand when queried.
3389 // COND_INTER causes an operand to generate six functions which return
3390 // the encoding code (ie - encoding bits for the instruction)
3391 // associated with each basic boolean condition for a conditional
3392 // instruction.
3393 //
3394 // Instructions specify two basic values for encoding.  Again, a
3395 // function is available to check if the constant displacement is an
3396 // oop. They use the ins_encode keyword to specify their encoding
3397 // classes (which must be a sequence of enc_class names, and their
3398 // parameters, specified in the encoding block), and they use the
3399 // opcode keyword to specify, in order, their primary, secondary, and
3400 // tertiary opcode.  Only the opcode sections which a particular
3401 // instruction needs for encoding need to be specified.
3402 encode %{
3403   // Build emit functions for each basic byte or larger field in the
3404   // intel encoding scheme (opcode, rm, sib, immediate), and call them
3405   // from C++ code in the enc_class source block.  Emit functions will
3406   // live in the main source block for now.  In future, we can
3407   // generalize this by adding a syntax that specifies the sizes of
3408   // fields in an order, so that the adlc can build the emit functions
3409   // automagically
3410 
3411   // catch all for unimplemented encodings
3412   enc_class enc_unimplemented %{
3413     MacroAssembler _masm(&cbuf);
3414     __ unimplemented("C2 catch all");
3415   %}
3416 
3417   // BEGIN Non-volatile memory access
3418 
3419   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
3420     Register dst_reg = as_Register($dst$$reg);
3421     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
3422                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3423   %}
3424 
3425   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
3426     Register dst_reg = as_Register($dst$$reg);
3427     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
3428                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3429   %}
3430 
3431   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
3432     Register dst_reg = as_Register($dst$$reg);
3433     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3434                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3435   %}
3436 
3437   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
3438     Register dst_reg = as_Register($dst$$reg);
3439     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3440                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3441   %}
3442 
3443   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
3444     Register dst_reg = as_Register($dst$$reg);
3445     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
3446                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3447   %}
3448 
3449   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
3450     Register dst_reg = as_Register($dst$$reg);
3451     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
3452                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3453   %}
3454 
3455   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
3456     Register dst_reg = as_Register($dst$$reg);
3457     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3458                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3459   %}
3460 
3461   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
3462     Register dst_reg = as_Register($dst$$reg);
3463     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3464                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3465   %}
3466 
3467   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
3468     Register dst_reg = as_Register($dst$$reg);
3469     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3470                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3471   %}
3472 
3473   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
3474     Register dst_reg = as_Register($dst$$reg);
3475     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3476                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3477   %}
3478 
3479   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
3480     Register dst_reg = as_Register($dst$$reg);
3481     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
3482                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3483   %}
3484 
3485   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
3486     Register dst_reg = as_Register($dst$$reg);
3487     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
3488                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3489   %}
3490 
3491   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
3492     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3493     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
3494                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3495   %}
3496 
3497   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
3498     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3499     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
3500                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3501   %}
3502 
3503   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
3504     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3505     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
3506        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3507   %}
3508 
3509   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
3510     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3511     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
3512        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3513   %}
3514 
3515   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
3516     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3517     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
3518        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3519   %}
3520 
3521   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
3522     Register src_reg = as_Register($src$$reg);
3523     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
3524                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3525   %}
3526 
3527   enc_class aarch64_enc_strb0(memory mem) %{
3528     MacroAssembler _masm(&cbuf);
3529     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3530                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3531   %}
3532 
3533   enc_class aarch64_enc_strb0_ordered(memory mem) %{
3534     MacroAssembler _masm(&cbuf);
3535     __ membar(Assembler::StoreStore);
3536     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3537                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3538   %}
3539 
3540   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
3541     Register src_reg = as_Register($src$$reg);
3542     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
3543                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3544   %}
3545 
3546   enc_class aarch64_enc_strh0(memory mem) %{
3547     MacroAssembler _masm(&cbuf);
3548     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
3549                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3550   %}
3551 
3552   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
3553     Register src_reg = as_Register($src$$reg);
3554     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
3555                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3556   %}
3557 
3558   enc_class aarch64_enc_strw0(memory mem) %{
3559     MacroAssembler _masm(&cbuf);
3560     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
3561                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3562   %}
3563 
3564   enc_class aarch64_enc_str(iRegL src, memory mem) %{
3565     Register src_reg = as_Register($src$$reg);
3566     // we sometimes get asked to store the stack pointer into the
3567     // current thread -- we cannot do that directly on AArch64
3568     if (src_reg == r31_sp) {
3569       MacroAssembler _masm(&cbuf);
3570       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
3571       __ mov(rscratch2, sp);
3572       src_reg = rscratch2;
3573     }
3574     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
3575                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3576   %}
3577 
3578   enc_class aarch64_enc_str0(memory mem) %{
3579     MacroAssembler _masm(&cbuf);
3580     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
3581                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3582   %}
3583 
3584   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
3585     FloatRegister src_reg = as_FloatRegister($src$$reg);
3586     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
3587                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3588   %}
3589 
3590   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
3591     FloatRegister src_reg = as_FloatRegister($src$$reg);
3592     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
3593                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3594   %}
3595 
3596   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
3597     FloatRegister src_reg = as_FloatRegister($src$$reg);
3598     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
3599        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3600   %}
3601 
3602   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
3603     FloatRegister src_reg = as_FloatRegister($src$$reg);
3604     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
3605        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3606   %}
3607 
3608   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
3609     FloatRegister src_reg = as_FloatRegister($src$$reg);
3610     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
3611        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3612   %}
3613 
3614   // END Non-volatile memory access
3615 
3616   // volatile loads and stores
3617 
3618   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
3619     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3620                  rscratch1, stlrb);
3621   %}
3622 
3623   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
3624     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3625                  rscratch1, stlrh);
3626   %}
3627 
3628   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
3629     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3630                  rscratch1, stlrw);
3631   %}
3632 
3633 
3634   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
3635     Register dst_reg = as_Register($dst$$reg);
3636     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3637              rscratch1, ldarb);
3638     __ sxtbw(dst_reg, dst_reg);
3639   %}
3640 
3641   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
3642     Register dst_reg = as_Register($dst$$reg);
3643     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3644              rscratch1, ldarb);
3645     __ sxtb(dst_reg, dst_reg);
3646   %}
3647 
3648   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
3649     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3650              rscratch1, ldarb);
3651   %}
3652 
3653   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
3654     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3655              rscratch1, ldarb);
3656   %}
3657 
3658   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
3659     Register dst_reg = as_Register($dst$$reg);
3660     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3661              rscratch1, ldarh);
3662     __ sxthw(dst_reg, dst_reg);
3663   %}
3664 
3665   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
3666     Register dst_reg = as_Register($dst$$reg);
3667     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3668              rscratch1, ldarh);
3669     __ sxth(dst_reg, dst_reg);
3670   %}
3671 
3672   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
3673     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3674              rscratch1, ldarh);
3675   %}
3676 
3677   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
3678     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3679              rscratch1, ldarh);
3680   %}
3681 
3682   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
3683     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3684              rscratch1, ldarw);
3685   %}
3686 
3687   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
3688     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3689              rscratch1, ldarw);
3690   %}
3691 
3692   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
3693     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3694              rscratch1, ldar);
3695   %}
3696 
3697   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
3698     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3699              rscratch1, ldarw);
3700     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
3701   %}
3702 
3703   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
3704     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3705              rscratch1, ldar);
3706     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
3707   %}
3708 
3709   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
3710     Register src_reg = as_Register($src$$reg);
3711     // we sometimes get asked to store the stack pointer into the
3712     // current thread -- we cannot do that directly on AArch64
3713     if (src_reg == r31_sp) {
3714         MacroAssembler _masm(&cbuf);
3715       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
3716       __ mov(rscratch2, sp);
3717       src_reg = rscratch2;
3718     }
3719     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3720                  rscratch1, stlr);
3721   %}
3722 
3723   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
3724     {
3725       MacroAssembler _masm(&cbuf);
3726       FloatRegister src_reg = as_FloatRegister($src$$reg);
3727       __ fmovs(rscratch2, src_reg);
3728     }
3729     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3730                  rscratch1, stlrw);
3731   %}
3732 
3733   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
3734     {
3735       MacroAssembler _masm(&cbuf);
3736       FloatRegister src_reg = as_FloatRegister($src$$reg);
3737       __ fmovd(rscratch2, src_reg);
3738     }
3739     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3740                  rscratch1, stlr);
3741   %}
3742 
3743   // synchronized read/update encodings
3744 
3745   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
3746     MacroAssembler _masm(&cbuf);
3747     Register dst_reg = as_Register($dst$$reg);
3748     Register base = as_Register($mem$$base);
3749     int index = $mem$$index;
3750     int scale = $mem$$scale;
3751     int disp = $mem$$disp;
3752     if (index == -1) {
3753        if (disp != 0) {
3754         __ lea(rscratch1, Address(base, disp));
3755         __ ldaxr(dst_reg, rscratch1);
3756       } else {
3757         // TODO
3758         // should we ever get anything other than this case?
3759         __ ldaxr(dst_reg, base);
3760       }
3761     } else {
3762       Register index_reg = as_Register(index);
3763       if (disp == 0) {
3764         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
3765         __ ldaxr(dst_reg, rscratch1);
3766       } else {
3767         __ lea(rscratch1, Address(base, disp));
3768         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
3769         __ ldaxr(dst_reg, rscratch1);
3770       }
3771     }
3772   %}
3773 
3774   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
3775     MacroAssembler _masm(&cbuf);
3776     Register src_reg = as_Register($src$$reg);
3777     Register base = as_Register($mem$$base);
3778     int index = $mem$$index;
3779     int scale = $mem$$scale;
3780     int disp = $mem$$disp;
3781     if (index == -1) {
3782        if (disp != 0) {
3783         __ lea(rscratch2, Address(base, disp));
3784         __ stlxr(rscratch1, src_reg, rscratch2);
3785       } else {
3786         // TODO
3787         // should we ever get anything other than this case?
3788         __ stlxr(rscratch1, src_reg, base);
3789       }
3790     } else {
3791       Register index_reg = as_Register(index);
3792       if (disp == 0) {
3793         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
3794         __ stlxr(rscratch1, src_reg, rscratch2);
3795       } else {
3796         __ lea(rscratch2, Address(base, disp));
3797         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
3798         __ stlxr(rscratch1, src_reg, rscratch2);
3799       }
3800     }
3801     __ cmpw(rscratch1, zr);
3802   %}
3803 
3804   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
3805     MacroAssembler _masm(&cbuf);
3806     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3807     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3808                &Assembler::ldxr, &MacroAssembler::cmp, &Assembler::stlxr);
3809   %}
3810 
3811   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3812     MacroAssembler _masm(&cbuf);
3813     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3814     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3815                &Assembler::ldxrw, &MacroAssembler::cmpw, &Assembler::stlxrw);
3816   %}
3817 
3818 
3819   // The only difference between aarch64_enc_cmpxchg and
3820   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
3821   // CompareAndSwap sequence to serve as a barrier on acquiring a
3822   // lock.
3823   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
3824     MacroAssembler _masm(&cbuf);
3825     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3826     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3827                &Assembler::ldaxr, &MacroAssembler::cmp, &Assembler::stlxr);
3828   %}
3829 
3830   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3831     MacroAssembler _masm(&cbuf);
3832     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3833     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3834                &Assembler::ldaxrw, &MacroAssembler::cmpw, &Assembler::stlxrw);
3835   %}
3836 
3837 
3838   // auxiliary used for CompareAndSwapX to set result register
3839   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
3840     MacroAssembler _masm(&cbuf);
3841     Register res_reg = as_Register($res$$reg);
3842     __ cset(res_reg, Assembler::EQ);
3843   %}
3844 
3845   // prefetch encodings
3846 
3847   enc_class aarch64_enc_prefetchw(memory mem) %{
3848     MacroAssembler _masm(&cbuf);
3849     Register base = as_Register($mem$$base);
3850     int index = $mem$$index;
3851     int scale = $mem$$scale;
3852     int disp = $mem$$disp;
3853     if (index == -1) {
3854       __ prfm(Address(base, disp), PSTL1KEEP);
3855       __ nop();
3856     } else {
3857       Register index_reg = as_Register(index);
3858       if (disp == 0) {
3859         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
3860       } else {
3861         __ lea(rscratch1, Address(base, disp));
3862         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
3863       }
3864     }
3865   %}
3866 
3867   enc_class aarch64_enc_clear_array_reg_reg(iRegL_R11 cnt, iRegP_R10 base) %{
3868     MacroAssembler _masm(&cbuf);
3869     Register cnt_reg = as_Register($cnt$$reg);
3870     Register base_reg = as_Register($base$$reg);
3871     // base is word aligned
3872     // cnt is count of words
3873 
3874     Label loop;
3875     Label entry;
3876 
3877 //  Algorithm:
3878 //
3879 //    scratch1 = cnt & 7;
3880 //    cnt -= scratch1;
3881 //    p += scratch1;
3882 //    switch (scratch1) {
3883 //      do {
3884 //        cnt -= 8;
3885 //          p[-8] = 0;
3886 //        case 7:
3887 //          p[-7] = 0;
3888 //        case 6:
3889 //          p[-6] = 0;
3890 //          // ...
3891 //        case 1:
3892 //          p[-1] = 0;
3893 //        case 0:
3894 //          p += 8;
3895 //      } while (cnt);
3896 //    }
3897 
3898     const int unroll = 8; // Number of str(zr) instructions we'll unroll
3899 
3900     __ andr(rscratch1, cnt_reg, unroll - 1);  // tmp1 = cnt % unroll
3901     __ sub(cnt_reg, cnt_reg, rscratch1);      // cnt -= unroll
3902     // base_reg always points to the end of the region we're about to zero
3903     __ add(base_reg, base_reg, rscratch1, Assembler::LSL, exact_log2(wordSize));
3904     __ adr(rscratch2, entry);
3905     __ sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 2);
3906     __ br(rscratch2);
3907     __ bind(loop);
3908     __ sub(cnt_reg, cnt_reg, unroll);
3909     for (int i = -unroll; i < 0; i++)
3910       __ str(zr, Address(base_reg, i * wordSize));
3911     __ bind(entry);
3912     __ add(base_reg, base_reg, unroll * wordSize);
3913     __ cbnz(cnt_reg, loop);
3914   %}
3915 
3916   /// mov envcodings
3917 
3918   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
3919     MacroAssembler _masm(&cbuf);
3920     u_int32_t con = (u_int32_t)$src$$constant;
3921     Register dst_reg = as_Register($dst$$reg);
3922     if (con == 0) {
3923       __ movw(dst_reg, zr);
3924     } else {
3925       __ movw(dst_reg, con);
3926     }
3927   %}
3928 
3929   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
3930     MacroAssembler _masm(&cbuf);
3931     Register dst_reg = as_Register($dst$$reg);
3932     u_int64_t con = (u_int64_t)$src$$constant;
3933     if (con == 0) {
3934       __ mov(dst_reg, zr);
3935     } else {
3936       __ mov(dst_reg, con);
3937     }
3938   %}
3939 
3940   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
3941     MacroAssembler _masm(&cbuf);
3942     Register dst_reg = as_Register($dst$$reg);
3943     address con = (address)$src$$constant;
3944     if (con == NULL || con == (address)1) {
3945       ShouldNotReachHere();
3946     } else {
3947       relocInfo::relocType rtype = $src->constant_reloc();
3948       if (rtype == relocInfo::oop_type) {
3949         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
3950       } else if (rtype == relocInfo::metadata_type) {
3951         __ mov_metadata(dst_reg, (Metadata*)con);
3952       } else {
3953         assert(rtype == relocInfo::none, "unexpected reloc type");
3954         if (con < (address)(uintptr_t)os::vm_page_size()) {
3955           __ mov(dst_reg, con);
3956         } else {
3957           unsigned long offset;
3958           __ adrp(dst_reg, con, offset);
3959           __ add(dst_reg, dst_reg, offset);
3960         }
3961       }
3962     }
3963   %}
3964 
3965   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
3966     MacroAssembler _masm(&cbuf);
3967     Register dst_reg = as_Register($dst$$reg);
3968     __ mov(dst_reg, zr);
3969   %}
3970 
3971   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
3972     MacroAssembler _masm(&cbuf);
3973     Register dst_reg = as_Register($dst$$reg);
3974     __ mov(dst_reg, (u_int64_t)1);
3975   %}
3976 
3977   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
3978     MacroAssembler _masm(&cbuf);
3979     address page = (address)$src$$constant;
3980     Register dst_reg = as_Register($dst$$reg);
3981     unsigned long off;
3982     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
3983     assert(off == 0, "assumed offset == 0");
3984   %}
3985 
3986   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
3987     MacroAssembler _masm(&cbuf);
3988     address page = (address)$src$$constant;
3989     Register dst_reg = as_Register($dst$$reg);
3990     unsigned long off;
3991     __ adrp(dst_reg, ExternalAddress(page), off);
3992     assert(off == 0, "assumed offset == 0");
3993   %}
3994 
3995   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
3996     MacroAssembler _masm(&cbuf);
3997     Register dst_reg = as_Register($dst$$reg);
3998     address con = (address)$src$$constant;
3999     if (con == NULL) {
4000       ShouldNotReachHere();
4001     } else {
4002       relocInfo::relocType rtype = $src->constant_reloc();
4003       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
4004       __ set_narrow_oop(dst_reg, (jobject)con);
4005     }
4006   %}
4007 
4008   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
4009     MacroAssembler _masm(&cbuf);
4010     Register dst_reg = as_Register($dst$$reg);
4011     __ mov(dst_reg, zr);
4012   %}
4013 
4014   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
4015     MacroAssembler _masm(&cbuf);
4016     Register dst_reg = as_Register($dst$$reg);
4017     address con = (address)$src$$constant;
4018     if (con == NULL) {
4019       ShouldNotReachHere();
4020     } else {
4021       relocInfo::relocType rtype = $src->constant_reloc();
4022       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
4023       __ set_narrow_klass(dst_reg, (Klass *)con);
4024     }
4025   %}
4026 
4027   // arithmetic encodings
4028 
4029   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
4030     MacroAssembler _masm(&cbuf);
4031     Register dst_reg = as_Register($dst$$reg);
4032     Register src_reg = as_Register($src1$$reg);
4033     int32_t con = (int32_t)$src2$$constant;
4034     // add has primary == 0, subtract has primary == 1
4035     if ($primary) { con = -con; }
4036     if (con < 0) {
4037       __ subw(dst_reg, src_reg, -con);
4038     } else {
4039       __ addw(dst_reg, src_reg, con);
4040     }
4041   %}
4042 
4043   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
4044     MacroAssembler _masm(&cbuf);
4045     Register dst_reg = as_Register($dst$$reg);
4046     Register src_reg = as_Register($src1$$reg);
4047     int32_t con = (int32_t)$src2$$constant;
4048     // add has primary == 0, subtract has primary == 1
4049     if ($primary) { con = -con; }
4050     if (con < 0) {
4051       __ sub(dst_reg, src_reg, -con);
4052     } else {
4053       __ add(dst_reg, src_reg, con);
4054     }
4055   %}
4056 
4057   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
4058     MacroAssembler _masm(&cbuf);
4059    Register dst_reg = as_Register($dst$$reg);
4060    Register src1_reg = as_Register($src1$$reg);
4061    Register src2_reg = as_Register($src2$$reg);
4062     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
4063   %}
4064 
4065   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
4066     MacroAssembler _masm(&cbuf);
4067    Register dst_reg = as_Register($dst$$reg);
4068    Register src1_reg = as_Register($src1$$reg);
4069    Register src2_reg = as_Register($src2$$reg);
4070     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
4071   %}
4072 
4073   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
4074     MacroAssembler _masm(&cbuf);
4075    Register dst_reg = as_Register($dst$$reg);
4076    Register src1_reg = as_Register($src1$$reg);
4077    Register src2_reg = as_Register($src2$$reg);
4078     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
4079   %}
4080 
4081   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
4082     MacroAssembler _masm(&cbuf);
4083    Register dst_reg = as_Register($dst$$reg);
4084    Register src1_reg = as_Register($src1$$reg);
4085    Register src2_reg = as_Register($src2$$reg);
4086     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
4087   %}
4088 
4089   // compare instruction encodings
4090 
4091   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
4092     MacroAssembler _masm(&cbuf);
4093     Register reg1 = as_Register($src1$$reg);
4094     Register reg2 = as_Register($src2$$reg);
4095     __ cmpw(reg1, reg2);
4096   %}
4097 
4098   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
4099     MacroAssembler _masm(&cbuf);
4100     Register reg = as_Register($src1$$reg);
4101     int32_t val = $src2$$constant;
4102     if (val >= 0) {
4103       __ subsw(zr, reg, val);
4104     } else {
4105       __ addsw(zr, reg, -val);
4106     }
4107   %}
4108 
4109   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
4110     MacroAssembler _masm(&cbuf);
4111     Register reg1 = as_Register($src1$$reg);
4112     u_int32_t val = (u_int32_t)$src2$$constant;
4113     __ movw(rscratch1, val);
4114     __ cmpw(reg1, rscratch1);
4115   %}
4116 
4117   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
4118     MacroAssembler _masm(&cbuf);
4119     Register reg1 = as_Register($src1$$reg);
4120     Register reg2 = as_Register($src2$$reg);
4121     __ cmp(reg1, reg2);
4122   %}
4123 
4124   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
4125     MacroAssembler _masm(&cbuf);
4126     Register reg = as_Register($src1$$reg);
4127     int64_t val = $src2$$constant;
4128     if (val >= 0) {
4129       __ subs(zr, reg, val);
4130     } else if (val != -val) {
4131       __ adds(zr, reg, -val);
4132     } else {
4133     // aargh, Long.MIN_VALUE is a special case
4134       __ orr(rscratch1, zr, (u_int64_t)val);
4135       __ subs(zr, reg, rscratch1);
4136     }
4137   %}
4138 
4139   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
4140     MacroAssembler _masm(&cbuf);
4141     Register reg1 = as_Register($src1$$reg);
4142     u_int64_t val = (u_int64_t)$src2$$constant;
4143     __ mov(rscratch1, val);
4144     __ cmp(reg1, rscratch1);
4145   %}
4146 
4147   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
4148     MacroAssembler _masm(&cbuf);
4149     Register reg1 = as_Register($src1$$reg);
4150     Register reg2 = as_Register($src2$$reg);
4151     __ cmp(reg1, reg2);
4152   %}
4153 
4154   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
4155     MacroAssembler _masm(&cbuf);
4156     Register reg1 = as_Register($src1$$reg);
4157     Register reg2 = as_Register($src2$$reg);
4158     __ cmpw(reg1, reg2);
4159   %}
4160 
4161   enc_class aarch64_enc_testp(iRegP src) %{
4162     MacroAssembler _masm(&cbuf);
4163     Register reg = as_Register($src$$reg);
4164     __ cmp(reg, zr);
4165   %}
4166 
4167   enc_class aarch64_enc_testn(iRegN src) %{
4168     MacroAssembler _masm(&cbuf);
4169     Register reg = as_Register($src$$reg);
4170     __ cmpw(reg, zr);
4171   %}
4172 
4173   enc_class aarch64_enc_b(label lbl) %{
4174     MacroAssembler _masm(&cbuf);
4175     Label *L = $lbl$$label;
4176     __ b(*L);
4177   %}
4178 
4179   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
4180     MacroAssembler _masm(&cbuf);
4181     Label *L = $lbl$$label;
4182     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4183   %}
4184 
4185   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
4186     MacroAssembler _masm(&cbuf);
4187     Label *L = $lbl$$label;
4188     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4189   %}
4190 
4191   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
4192   %{
4193      Register sub_reg = as_Register($sub$$reg);
4194      Register super_reg = as_Register($super$$reg);
4195      Register temp_reg = as_Register($temp$$reg);
4196      Register result_reg = as_Register($result$$reg);
4197 
4198      Label miss;
4199      MacroAssembler _masm(&cbuf);
4200      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
4201                                      NULL, &miss,
4202                                      /*set_cond_codes:*/ true);
4203      if ($primary) {
4204        __ mov(result_reg, zr);
4205      }
4206      __ bind(miss);
4207   %}
4208 
4209   enc_class aarch64_enc_java_static_call(method meth) %{
4210     MacroAssembler _masm(&cbuf);
4211 
4212     address addr = (address)$meth$$method;
4213     address call;
4214     if (!_method) {
4215       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
4216       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
4217     } else if (_optimized_virtual) {
4218       call = __ trampoline_call(Address(addr, relocInfo::opt_virtual_call_type), &cbuf);
4219     } else {
4220       call = __ trampoline_call(Address(addr, relocInfo::static_call_type), &cbuf);
4221     }
4222     if (call == NULL) {
4223       ciEnv::current()->record_failure("CodeCache is full"); 
4224       return;
4225     }
4226 
4227     if (_method) {
4228       // Emit stub for static call
4229       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
4230       if (stub == NULL) {
4231         ciEnv::current()->record_failure("CodeCache is full"); 
4232         return;
4233       }
4234     }
4235   %}
4236 
4237   enc_class aarch64_enc_java_dynamic_call(method meth) %{
4238     MacroAssembler _masm(&cbuf);
4239     address call = __ ic_call((address)$meth$$method);
4240     if (call == NULL) {
4241       ciEnv::current()->record_failure("CodeCache is full"); 
4242       return;
4243     }
4244   %}
4245 
4246   enc_class aarch64_enc_call_epilog() %{
4247     MacroAssembler _masm(&cbuf);
4248     if (VerifyStackAtCalls) {
4249       // Check that stack depth is unchanged: find majik cookie on stack
4250       __ call_Unimplemented();
4251     }
4252   %}
4253 
4254   enc_class aarch64_enc_java_to_runtime(method meth) %{
4255     MacroAssembler _masm(&cbuf);
4256 
4257     // some calls to generated routines (arraycopy code) are scheduled
4258     // by C2 as runtime calls. if so we can call them using a br (they
4259     // will be in a reachable segment) otherwise we have to use a blrt
4260     // which loads the absolute address into a register.
4261     address entry = (address)$meth$$method;
4262     CodeBlob *cb = CodeCache::find_blob(entry);
4263     if (cb) {
4264       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
4265       if (call == NULL) {
4266         ciEnv::current()->record_failure("CodeCache is full"); 
4267         return;
4268       }
4269     } else {
4270       int gpcnt;
4271       int fpcnt;
4272       int rtype;
4273       getCallInfo(tf(), gpcnt, fpcnt, rtype);
4274       Label retaddr;
4275       __ adr(rscratch2, retaddr);
4276       __ lea(rscratch1, RuntimeAddress(entry));
4277       // Leave a breadcrumb for JavaThread::pd_last_frame().
4278       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
4279       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
4280       __ bind(retaddr);
4281       __ add(sp, sp, 2 * wordSize);
4282     }
4283   %}
4284 
4285   enc_class aarch64_enc_rethrow() %{
4286     MacroAssembler _masm(&cbuf);
4287     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
4288   %}
4289 
4290   enc_class aarch64_enc_ret() %{
4291     MacroAssembler _masm(&cbuf);
4292     __ ret(lr);
4293   %}
4294 
4295   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
4296     MacroAssembler _masm(&cbuf);
4297     Register target_reg = as_Register($jump_target$$reg);
4298     __ br(target_reg);
4299   %}
4300 
4301   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
4302     MacroAssembler _masm(&cbuf);
4303     Register target_reg = as_Register($jump_target$$reg);
4304     // exception oop should be in r0
4305     // ret addr has been popped into lr
4306     // callee expects it in r3
4307     __ mov(r3, lr);
4308     __ br(target_reg);
4309   %}
4310 
4311   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4312     MacroAssembler _masm(&cbuf);
4313     Register oop = as_Register($object$$reg);
4314     Register box = as_Register($box$$reg);
4315     Register disp_hdr = as_Register($tmp$$reg);
4316     Register tmp = as_Register($tmp2$$reg);
4317     Label cont;
4318     Label object_has_monitor;
4319     Label cas_failed;
4320 
4321     assert_different_registers(oop, box, tmp, disp_hdr);
4322 
4323     // Load markOop from object into displaced_header.
4324     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
4325 
4326     // Always do locking in runtime.
4327     if (EmitSync & 0x01) {
4328       __ cmp(oop, zr);
4329       return;
4330     }
4331 
4332     if (UseBiasedLocking && !UseOptoBiasInlining) {
4333       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
4334     }
4335 
4336     // Handle existing monitor
4337     if ((EmitSync & 0x02) == 0) {
4338       // we can use AArch64's bit test and branch here but
4339       // markoopDesc does not define a bit index just the bit value
4340       // so assert in case the bit pos changes
4341 #     define __monitor_value_log2 1
4342       assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
4343       __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
4344 #     undef __monitor_value_log2
4345     }
4346 
4347     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
4348     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
4349 
4350     // Load Compare Value application register.
4351 
4352     // Initialize the box. (Must happen before we update the object mark!)
4353     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4354 
4355     // Compare object markOop with mark and if equal exchange scratch1
4356     // with object markOop.
4357     {
4358       Label retry_load;
4359       __ bind(retry_load);
4360       __ ldaxr(tmp, oop);
4361       __ cmp(tmp, disp_hdr);
4362       __ br(Assembler::NE, cas_failed);
4363       // use stlxr to ensure update is immediately visible
4364       __ stlxr(tmp, box, oop);
4365       __ cbzw(tmp, cont);
4366       __ b(retry_load);
4367     }
4368 
4369     // Formerly:
4370     // __ cmpxchgptr(/*oldv=*/disp_hdr,
4371     //               /*newv=*/box,
4372     //               /*addr=*/oop,
4373     //               /*tmp=*/tmp,
4374     //               cont,
4375     //               /*fail*/NULL);
4376 
4377     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4378 
4379     // If the compare-and-exchange succeeded, then we found an unlocked
4380     // object, will have now locked it will continue at label cont
4381 
4382     __ bind(cas_failed);
4383     // We did not see an unlocked object so try the fast recursive case.
4384 
4385     // Check if the owner is self by comparing the value in the
4386     // markOop of object (disp_hdr) with the stack pointer.
4387     __ mov(rscratch1, sp);
4388     __ sub(disp_hdr, disp_hdr, rscratch1);
4389     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
4390     // If condition is true we are cont and hence we can store 0 as the
4391     // displaced header in the box, which indicates that it is a recursive lock.
4392     __ ands(tmp/*==0?*/, disp_hdr, tmp);
4393     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4394 
4395     // Handle existing monitor.
4396     if ((EmitSync & 0x02) == 0) {
4397       __ b(cont);
4398 
4399       __ bind(object_has_monitor);
4400       // The object's monitor m is unlocked iff m->owner == NULL,
4401       // otherwise m->owner may contain a thread or a stack address.
4402       //
4403       // Try to CAS m->owner from NULL to current thread.
4404       __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
4405       __ mov(disp_hdr, zr);
4406 
4407       {
4408         Label retry_load, fail;
4409         __ bind(retry_load);
4410         __ ldaxr(rscratch1, tmp);
4411         __ cmp(disp_hdr, rscratch1);
4412         __ br(Assembler::NE, fail);
4413         // use stlxr to ensure update is immediately visible
4414         __ stlxr(rscratch1, rthread, tmp);
4415         __ cbnzw(rscratch1, retry_load);
4416         __ bind(fail);
4417       }
4418 
4419       // Label next;
4420       // __ cmpxchgptr(/*oldv=*/disp_hdr,
4421       //               /*newv=*/rthread,
4422       //               /*addr=*/tmp,
4423       //               /*tmp=*/rscratch1,
4424       //               /*succeed*/next,
4425       //               /*fail*/NULL);
4426       // __ bind(next);
4427 
4428       // store a non-null value into the box.
4429       __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4430 
4431       // PPC port checks the following invariants
4432       // #ifdef ASSERT
4433       // bne(flag, cont);
4434       // We have acquired the monitor, check some invariants.
4435       // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
4436       // Invariant 1: _recursions should be 0.
4437       // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
4438       // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
4439       //                        "monitor->_recursions should be 0", -1);
4440       // Invariant 2: OwnerIsThread shouldn't be 0.
4441       // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
4442       //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
4443       //                           "monitor->OwnerIsThread shouldn't be 0", -1);
4444       // #endif
4445     }
4446 
4447     __ bind(cont);
4448     // flag == EQ indicates success
4449     // flag == NE indicates failure
4450 
4451   %}
4452 
4453   // TODO
4454   // reimplement this with custom cmpxchgptr code
4455   // which avoids some of the unnecessary branching
4456   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4457     MacroAssembler _masm(&cbuf);
4458     Register oop = as_Register($object$$reg);
4459     Register box = as_Register($box$$reg);
4460     Register disp_hdr = as_Register($tmp$$reg);
4461     Register tmp = as_Register($tmp2$$reg);
4462     Label cont;
4463     Label object_has_monitor;
4464     Label cas_failed;
4465 
4466     assert_different_registers(oop, box, tmp, disp_hdr);
4467 
4468     // Always do locking in runtime.
4469     if (EmitSync & 0x01) {
4470       __ cmp(oop, zr); // Oop can't be 0 here => always false.
4471       return;
4472     }
4473 
4474     if (UseBiasedLocking && !UseOptoBiasInlining) {
4475       __ biased_locking_exit(oop, tmp, cont);
4476     }
4477 
4478     // Find the lock address and load the displaced header from the stack.
4479     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4480 
4481     // If the displaced header is 0, we have a recursive unlock.
4482     __ cmp(disp_hdr, zr);
4483     __ br(Assembler::EQ, cont);
4484 
4485 
4486     // Handle existing monitor.
4487     if ((EmitSync & 0x02) == 0) {
4488       __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
4489       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
4490     }
4491 
4492     // Check if it is still a light weight lock, this is is true if we
4493     // see the stack address of the basicLock in the markOop of the
4494     // object.
4495 
4496       {
4497         Label retry_load;
4498         __ bind(retry_load);
4499         __ ldxr(tmp, oop);
4500         __ cmp(box, tmp);
4501         __ br(Assembler::NE, cas_failed);
4502         // use stlxr to ensure update is immediately visible
4503         __ stlxr(tmp, disp_hdr, oop);
4504         __ cbzw(tmp, cont);
4505         __ b(retry_load);
4506       }
4507 
4508     // __ cmpxchgptr(/*compare_value=*/box,
4509     //               /*exchange_value=*/disp_hdr,
4510     //               /*where=*/oop,
4511     //               /*result=*/tmp,
4512     //               cont,
4513     //               /*cas_failed*/NULL);
4514     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4515 
4516     __ bind(cas_failed);
4517 
4518     // Handle existing monitor.
4519     if ((EmitSync & 0x02) == 0) {
4520       __ b(cont);
4521 
4522       __ bind(object_has_monitor);
4523       __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
4524       __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
4525       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
4526       __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
4527       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
4528       __ cmp(rscratch1, zr);
4529       __ br(Assembler::NE, cont);
4530 
4531       __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
4532       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
4533       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
4534       __ cmp(rscratch1, zr);
4535       __ cbnz(rscratch1, cont);
4536       // need a release store here
4537       __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
4538       __ stlr(rscratch1, tmp); // rscratch1 is zero
4539     }
4540 
4541     __ bind(cont);
4542     // flag == EQ indicates success
4543     // flag == NE indicates failure
4544   %}
4545 
4546 %}
4547 
4548 //----------FRAME--------------------------------------------------------------
4549 // Definition of frame structure and management information.
4550 //
4551 //  S T A C K   L A Y O U T    Allocators stack-slot number
4552 //                             |   (to get allocators register number
4553 //  G  Owned by    |        |  v    add OptoReg::stack0())
4554 //  r   CALLER     |        |
4555 //  o     |        +--------+      pad to even-align allocators stack-slot
4556 //  w     V        |  pad0  |        numbers; owned by CALLER
4557 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
4558 //  h     ^        |   in   |  5
4559 //        |        |  args  |  4   Holes in incoming args owned by SELF
4560 //  |     |        |        |  3
4561 //  |     |        +--------+
4562 //  V     |        | old out|      Empty on Intel, window on Sparc
4563 //        |    old |preserve|      Must be even aligned.
4564 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
4565 //        |        |   in   |  3   area for Intel ret address
4566 //     Owned by    |preserve|      Empty on Sparc.
4567 //       SELF      +--------+
4568 //        |        |  pad2  |  2   pad to align old SP
4569 //        |        +--------+  1
4570 //        |        | locks  |  0
4571 //        |        +--------+----> OptoReg::stack0(), even aligned
4572 //        |        |  pad1  | 11   pad to align new SP
4573 //        |        +--------+
4574 //        |        |        | 10
4575 //        |        | spills |  9   spills
4576 //        V        |        |  8   (pad0 slot for callee)
4577 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
4578 //        ^        |  out   |  7
4579 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
4580 //     Owned by    +--------+
4581 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
4582 //        |    new |preserve|      Must be even-aligned.
4583 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
4584 //        |        |        |
4585 //
4586 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
4587 //         known from SELF's arguments and the Java calling convention.
4588 //         Region 6-7 is determined per call site.
4589 // Note 2: If the calling convention leaves holes in the incoming argument
4590 //         area, those holes are owned by SELF.  Holes in the outgoing area
4591 //         are owned by the CALLEE.  Holes should not be nessecary in the
4592 //         incoming area, as the Java calling convention is completely under
4593 //         the control of the AD file.  Doubles can be sorted and packed to
4594 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
4595 //         varargs C calling conventions.
4596 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
4597 //         even aligned with pad0 as needed.
4598 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
4599 //           (the latter is true on Intel but is it false on AArch64?)
4600 //         region 6-11 is even aligned; it may be padded out more so that
4601 //         the region from SP to FP meets the minimum stack alignment.
4602 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
4603 //         alignment.  Region 11, pad1, may be dynamically extended so that
4604 //         SP meets the minimum alignment.
4605 
4606 frame %{
4607   // What direction does stack grow in (assumed to be same for C & Java)
4608   stack_direction(TOWARDS_LOW);
4609 
4610   // These three registers define part of the calling convention
4611   // between compiled code and the interpreter.
4612 
4613   // Inline Cache Register or methodOop for I2C.
4614   inline_cache_reg(R12);
4615 
4616   // Method Oop Register when calling interpreter.
4617   interpreter_method_oop_reg(R12);
4618 
4619   // Number of stack slots consumed by locking an object
4620   sync_stack_slots(2);
4621 
4622   // Compiled code's Frame Pointer
4623   frame_pointer(R31);
4624 
4625   // Interpreter stores its frame pointer in a register which is
4626   // stored to the stack by I2CAdaptors.
4627   // I2CAdaptors convert from interpreted java to compiled java.
4628   interpreter_frame_pointer(R29);
4629 
4630   // Stack alignment requirement
4631   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
4632 
4633   // Number of stack slots between incoming argument block and the start of
4634   // a new frame.  The PROLOG must add this many slots to the stack.  The
4635   // EPILOG must remove this many slots. aarch64 needs two slots for
4636   // return address and fp.
4637   // TODO think this is correct but check
4638   in_preserve_stack_slots(4);
4639 
4640   // Number of outgoing stack slots killed above the out_preserve_stack_slots
4641   // for calls to C.  Supports the var-args backing area for register parms.
4642   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
4643 
4644   // The after-PROLOG location of the return address.  Location of
4645   // return address specifies a type (REG or STACK) and a number
4646   // representing the register number (i.e. - use a register name) or
4647   // stack slot.
4648   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
4649   // Otherwise, it is above the locks and verification slot and alignment word
4650   // TODO this may well be correct but need to check why that - 2 is there
4651   // ppc port uses 0 but we definitely need to allow for fixed_slots
4652   // which folds in the space used for monitors
4653   return_addr(STACK - 2 +
4654               round_to((Compile::current()->in_preserve_stack_slots() +
4655                         Compile::current()->fixed_slots()),
4656                        stack_alignment_in_slots()));
4657 
4658   // Body of function which returns an integer array locating
4659   // arguments either in registers or in stack slots.  Passed an array
4660   // of ideal registers called "sig" and a "length" count.  Stack-slot
4661   // offsets are based on outgoing arguments, i.e. a CALLER setting up
4662   // arguments for a CALLEE.  Incoming stack arguments are
4663   // automatically biased by the preserve_stack_slots field above.
4664 
4665   calling_convention
4666   %{
4667     // No difference between ingoing/outgoing just pass false
4668     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
4669   %}
4670 
4671   c_calling_convention
4672   %{
4673     // This is obviously always outgoing
4674     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
4675   %}
4676 
4677   // Location of compiled Java return values.  Same as C for now.
4678   return_value
4679   %{
4680     // TODO do we allow ideal_reg == Op_RegN???
4681     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
4682            "only return normal values");
4683 
4684     static const int lo[Op_RegL + 1] = { // enum name
4685       0,                                 // Op_Node
4686       0,                                 // Op_Set
4687       R0_num,                            // Op_RegN
4688       R0_num,                            // Op_RegI
4689       R0_num,                            // Op_RegP
4690       V0_num,                            // Op_RegF
4691       V0_num,                            // Op_RegD
4692       R0_num                             // Op_RegL
4693     };
4694 
4695     static const int hi[Op_RegL + 1] = { // enum name
4696       0,                                 // Op_Node
4697       0,                                 // Op_Set
4698       OptoReg::Bad,                       // Op_RegN
4699       OptoReg::Bad,                      // Op_RegI
4700       R0_H_num,                          // Op_RegP
4701       OptoReg::Bad,                      // Op_RegF
4702       V0_H_num,                          // Op_RegD
4703       R0_H_num                           // Op_RegL
4704     };
4705 
4706     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
4707   %}
4708 %}
4709 
4710 //----------ATTRIBUTES---------------------------------------------------------
4711 //----------Operand Attributes-------------------------------------------------
4712 op_attrib op_cost(1);        // Required cost attribute
4713 
4714 //----------Instruction Attributes---------------------------------------------
4715 ins_attrib ins_cost(INSN_COST); // Required cost attribute
4716 ins_attrib ins_size(32);        // Required size attribute (in bits)
4717 ins_attrib ins_short_branch(0); // Required flag: is this instruction
4718                                 // a non-matching short branch variant
4719                                 // of some long branch?
4720 ins_attrib ins_alignment(4);    // Required alignment attribute (must
4721                                 // be a power of 2) specifies the
4722                                 // alignment that some part of the
4723                                 // instruction (not necessarily the
4724                                 // start) requires.  If > 1, a
4725                                 // compute_padding() function must be
4726                                 // provided for the instruction
4727 
4728 //----------OPERANDS-----------------------------------------------------------
4729 // Operand definitions must precede instruction definitions for correct parsing
4730 // in the ADLC because operands constitute user defined types which are used in
4731 // instruction definitions.
4732 
4733 //----------Simple Operands----------------------------------------------------
4734 
4735 // Integer operands 32 bit
4736 // 32 bit immediate
4737 operand immI()
4738 %{
4739   match(ConI);
4740 
4741   op_cost(0);
4742   format %{ %}
4743   interface(CONST_INTER);
4744 %}
4745 
4746 // 32 bit zero
4747 operand immI0()
4748 %{
4749   predicate(n->get_int() == 0);
4750   match(ConI);
4751 
4752   op_cost(0);
4753   format %{ %}
4754   interface(CONST_INTER);
4755 %}
4756 
4757 // 32 bit unit increment
4758 operand immI_1()
4759 %{
4760   predicate(n->get_int() == 1);
4761   match(ConI);
4762 
4763   op_cost(0);
4764   format %{ %}
4765   interface(CONST_INTER);
4766 %}
4767 
4768 // 32 bit unit decrement
4769 operand immI_M1()
4770 %{
4771   predicate(n->get_int() == -1);
4772   match(ConI);
4773 
4774   op_cost(0);
4775   format %{ %}
4776   interface(CONST_INTER);
4777 %}
4778 
4779 operand immI_le_4()
4780 %{
4781   predicate(n->get_int() <= 4);
4782   match(ConI);
4783 
4784   op_cost(0);
4785   format %{ %}
4786   interface(CONST_INTER);
4787 %}
4788 
4789 operand immI_31()
4790 %{
4791   predicate(n->get_int() == 31);
4792   match(ConI);
4793 
4794   op_cost(0);
4795   format %{ %}
4796   interface(CONST_INTER);
4797 %}
4798 
4799 operand immI_8()
4800 %{
4801   predicate(n->get_int() == 8);
4802   match(ConI);
4803 
4804   op_cost(0);
4805   format %{ %}
4806   interface(CONST_INTER);
4807 %}
4808 
4809 operand immI_16()
4810 %{
4811   predicate(n->get_int() == 16);
4812   match(ConI);
4813 
4814   op_cost(0);
4815   format %{ %}
4816   interface(CONST_INTER);
4817 %}
4818 
4819 operand immI_24()
4820 %{
4821   predicate(n->get_int() == 24);
4822   match(ConI);
4823 
4824   op_cost(0);
4825   format %{ %}
4826   interface(CONST_INTER);
4827 %}
4828 
4829 operand immI_32()
4830 %{
4831   predicate(n->get_int() == 32);
4832   match(ConI);
4833 
4834   op_cost(0);
4835   format %{ %}
4836   interface(CONST_INTER);
4837 %}
4838 
4839 operand immI_48()
4840 %{
4841   predicate(n->get_int() == 48);
4842   match(ConI);
4843 
4844   op_cost(0);
4845   format %{ %}
4846   interface(CONST_INTER);
4847 %}
4848 
4849 operand immI_56()
4850 %{
4851   predicate(n->get_int() == 56);
4852   match(ConI);
4853 
4854   op_cost(0);
4855   format %{ %}
4856   interface(CONST_INTER);
4857 %}
4858 
4859 operand immI_64()
4860 %{
4861   predicate(n->get_int() == 64);
4862   match(ConI);
4863 
4864   op_cost(0);
4865   format %{ %}
4866   interface(CONST_INTER);
4867 %}
4868 
4869 operand immI_255()
4870 %{
4871   predicate(n->get_int() == 255);
4872   match(ConI);
4873 
4874   op_cost(0);
4875   format %{ %}
4876   interface(CONST_INTER);
4877 %}
4878 
4879 operand immI_65535()
4880 %{
4881   predicate(n->get_int() == 65535);
4882   match(ConI);
4883 
4884   op_cost(0);
4885   format %{ %}
4886   interface(CONST_INTER);
4887 %}
4888 
4889 operand immL_63()
4890 %{
4891   predicate(n->get_int() == 63);
4892   match(ConI);
4893 
4894   op_cost(0);
4895   format %{ %}
4896   interface(CONST_INTER);
4897 %}
4898 
4899 operand immL_255()
4900 %{
4901   predicate(n->get_int() == 255);
4902   match(ConI);
4903 
4904   op_cost(0);
4905   format %{ %}
4906   interface(CONST_INTER);
4907 %}
4908 
4909 operand immL_65535()
4910 %{
4911   predicate(n->get_long() == 65535L);
4912   match(ConL);
4913 
4914   op_cost(0);
4915   format %{ %}
4916   interface(CONST_INTER);
4917 %}
4918 
4919 operand immL_4294967295()
4920 %{
4921   predicate(n->get_long() == 4294967295L);
4922   match(ConL);
4923 
4924   op_cost(0);
4925   format %{ %}
4926   interface(CONST_INTER);
4927 %}
4928 
4929 operand immL_bitmask()
4930 %{
4931   predicate(((n->get_long() & 0xc000000000000000l) == 0)
4932             && is_power_of_2(n->get_long() + 1));
4933   match(ConL);
4934 
4935   op_cost(0);
4936   format %{ %}
4937   interface(CONST_INTER);
4938 %}
4939 
4940 operand immI_bitmask()
4941 %{
4942   predicate(((n->get_int() & 0xc0000000) == 0)
4943             && is_power_of_2(n->get_int() + 1));
4944   match(ConI);
4945 
4946   op_cost(0);
4947   format %{ %}
4948   interface(CONST_INTER);
4949 %}
4950 
4951 // Scale values for scaled offset addressing modes (up to long but not quad)
4952 operand immIScale()
4953 %{
4954   predicate(0 <= n->get_int() && (n->get_int() <= 3));
4955   match(ConI);
4956 
4957   op_cost(0);
4958   format %{ %}
4959   interface(CONST_INTER);
4960 %}
4961 
4962 // 26 bit signed offset -- for pc-relative branches
4963 operand immI26()
4964 %{
4965   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
4966   match(ConI);
4967 
4968   op_cost(0);
4969   format %{ %}
4970   interface(CONST_INTER);
4971 %}
4972 
4973 // 19 bit signed offset -- for pc-relative loads
4974 operand immI19()
4975 %{
4976   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
4977   match(ConI);
4978 
4979   op_cost(0);
4980   format %{ %}
4981   interface(CONST_INTER);
4982 %}
4983 
4984 // 12 bit unsigned offset -- for base plus immediate loads
4985 operand immIU12()
4986 %{
4987   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
4988   match(ConI);
4989 
4990   op_cost(0);
4991   format %{ %}
4992   interface(CONST_INTER);
4993 %}
4994 
4995 operand immLU12()
4996 %{
4997   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
4998   match(ConL);
4999 
5000   op_cost(0);
5001   format %{ %}
5002   interface(CONST_INTER);
5003 %}
5004 
5005 // Offset for scaled or unscaled immediate loads and stores
5006 operand immIOffset()
5007 %{
5008   predicate(Address::offset_ok_for_immed(n->get_int()));
5009   match(ConI);
5010 
5011   op_cost(0);
5012   format %{ %}
5013   interface(CONST_INTER);
5014 %}
5015 
5016 operand immLoffset()
5017 %{
5018   predicate(Address::offset_ok_for_immed(n->get_long()));
5019   match(ConL);
5020 
5021   op_cost(0);
5022   format %{ %}
5023   interface(CONST_INTER);
5024 %}
5025 
5026 // 32 bit integer valid for add sub immediate
5027 operand immIAddSub()
5028 %{
5029   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
5030   match(ConI);
5031   op_cost(0);
5032   format %{ %}
5033   interface(CONST_INTER);
5034 %}
5035 
5036 // 32 bit unsigned integer valid for logical immediate
5037 // TODO -- check this is right when e.g the mask is 0x80000000
5038 operand immILog()
5039 %{
5040   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
5041   match(ConI);
5042 
5043   op_cost(0);
5044   format %{ %}
5045   interface(CONST_INTER);
5046 %}
5047 
5048 // Integer operands 64 bit
5049 // 64 bit immediate
5050 operand immL()
5051 %{
5052   match(ConL);
5053 
5054   op_cost(0);
5055   format %{ %}
5056   interface(CONST_INTER);
5057 %}
5058 
5059 // 64 bit zero
5060 operand immL0()
5061 %{
5062   predicate(n->get_long() == 0);
5063   match(ConL);
5064 
5065   op_cost(0);
5066   format %{ %}
5067   interface(CONST_INTER);
5068 %}
5069 
5070 // 64 bit unit increment
5071 operand immL_1()
5072 %{
5073   predicate(n->get_long() == 1);
5074   match(ConL);
5075 
5076   op_cost(0);
5077   format %{ %}
5078   interface(CONST_INTER);
5079 %}
5080 
5081 // 64 bit unit decrement
5082 operand immL_M1()
5083 %{
5084   predicate(n->get_long() == -1);
5085   match(ConL);
5086 
5087   op_cost(0);
5088   format %{ %}
5089   interface(CONST_INTER);
5090 %}
5091 
5092 // 32 bit offset of pc in thread anchor
5093 
5094 operand immL_pc_off()
5095 %{
5096   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
5097                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
5098   match(ConL);
5099 
5100   op_cost(0);
5101   format %{ %}
5102   interface(CONST_INTER);
5103 %}
5104 
5105 // 64 bit integer valid for add sub immediate
5106 operand immLAddSub()
5107 %{
5108   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
5109   match(ConL);
5110   op_cost(0);
5111   format %{ %}
5112   interface(CONST_INTER);
5113 %}
5114 
5115 // 64 bit integer valid for logical immediate
5116 operand immLLog()
5117 %{
5118   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
5119   match(ConL);
5120   op_cost(0);
5121   format %{ %}
5122   interface(CONST_INTER);
5123 %}
5124 
5125 // Long Immediate: low 32-bit mask
5126 operand immL_32bits()
5127 %{
5128   predicate(n->get_long() == 0xFFFFFFFFL);
5129   match(ConL);
5130   op_cost(0);
5131   format %{ %}
5132   interface(CONST_INTER);
5133 %}
5134 
5135 // Pointer operands
5136 // Pointer Immediate
5137 operand immP()
5138 %{
5139   match(ConP);
5140 
5141   op_cost(0);
5142   format %{ %}
5143   interface(CONST_INTER);
5144 %}
5145 
5146 // NULL Pointer Immediate
5147 operand immP0()
5148 %{
5149   predicate(n->get_ptr() == 0);
5150   match(ConP);
5151 
5152   op_cost(0);
5153   format %{ %}
5154   interface(CONST_INTER);
5155 %}
5156 
5157 // Pointer Immediate One
5158 // this is used in object initialization (initial object header)
5159 operand immP_1()
5160 %{
5161   predicate(n->get_ptr() == 1);
5162   match(ConP);
5163 
5164   op_cost(0);
5165   format %{ %}
5166   interface(CONST_INTER);
5167 %}
5168 
5169 // Polling Page Pointer Immediate
5170 operand immPollPage()
5171 %{
5172   predicate((address)n->get_ptr() == os::get_polling_page());
5173   match(ConP);
5174 
5175   op_cost(0);
5176   format %{ %}
5177   interface(CONST_INTER);
5178 %}
5179 
5180 // Card Table Byte Map Base
5181 operand immByteMapBase()
5182 %{
5183   // Get base of card map
5184   predicate((jbyte*)n->get_ptr() ==
5185         ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
5186   match(ConP);
5187 
5188   op_cost(0);
5189   format %{ %}
5190   interface(CONST_INTER);
5191 %}
5192 
5193 // Pointer Immediate Minus One
5194 // this is used when we want to write the current PC to the thread anchor
5195 operand immP_M1()
5196 %{
5197   predicate(n->get_ptr() == -1);
5198   match(ConP);
5199 
5200   op_cost(0);
5201   format %{ %}
5202   interface(CONST_INTER);
5203 %}
5204 
5205 // Pointer Immediate Minus Two
5206 // this is used when we want to write the current PC to the thread anchor
5207 operand immP_M2()
5208 %{
5209   predicate(n->get_ptr() == -2);
5210   match(ConP);
5211 
5212   op_cost(0);
5213   format %{ %}
5214   interface(CONST_INTER);
5215 %}
5216 
5217 // Float and Double operands
5218 // Double Immediate
5219 operand immD()
5220 %{
5221   match(ConD);
5222   op_cost(0);
5223   format %{ %}
5224   interface(CONST_INTER);
5225 %}
5226 
5227 // Double Immediate: +0.0d
5228 operand immD0()
5229 %{
5230   predicate(jlong_cast(n->getd()) == 0);
5231   match(ConD);
5232 
5233   op_cost(0);
5234   format %{ %}
5235   interface(CONST_INTER);
5236 %}
5237 
5238 // constant 'double +0.0'.
5239 operand immDPacked()
5240 %{
5241   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
5242   match(ConD);
5243   op_cost(0);
5244   format %{ %}
5245   interface(CONST_INTER);
5246 %}
5247 
5248 // Float Immediate
5249 operand immF()
5250 %{
5251   match(ConF);
5252   op_cost(0);
5253   format %{ %}
5254   interface(CONST_INTER);
5255 %}
5256 
5257 // Float Immediate: +0.0f.
5258 operand immF0()
5259 %{
5260   predicate(jint_cast(n->getf()) == 0);
5261   match(ConF);
5262 
5263   op_cost(0);
5264   format %{ %}
5265   interface(CONST_INTER);
5266 %}
5267 
5268 //
5269 operand immFPacked()
5270 %{
5271   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
5272   match(ConF);
5273   op_cost(0);
5274   format %{ %}
5275   interface(CONST_INTER);
5276 %}
5277 
5278 // Narrow pointer operands
5279 // Narrow Pointer Immediate
5280 operand immN()
5281 %{
5282   match(ConN);
5283 
5284   op_cost(0);
5285   format %{ %}
5286   interface(CONST_INTER);
5287 %}
5288 
5289 // Narrow NULL Pointer Immediate
5290 operand immN0()
5291 %{
5292   predicate(n->get_narrowcon() == 0);
5293   match(ConN);
5294 
5295   op_cost(0);
5296   format %{ %}
5297   interface(CONST_INTER);
5298 %}
5299 
5300 operand immNKlass()
5301 %{
5302   match(ConNKlass);
5303 
5304   op_cost(0);
5305   format %{ %}
5306   interface(CONST_INTER);
5307 %}
5308 
5309 // Integer 32 bit Register Operands
5310 // Integer 32 bitRegister (excludes SP)
5311 operand iRegI()
5312 %{
5313   constraint(ALLOC_IN_RC(any_reg32));
5314   match(RegI);
5315   match(iRegINoSp);
5316   op_cost(0);
5317   format %{ %}
5318   interface(REG_INTER);
5319 %}
5320 
5321 // Integer 32 bit Register not Special
5322 operand iRegINoSp()
5323 %{
5324   constraint(ALLOC_IN_RC(no_special_reg32));
5325   match(RegI);
5326   op_cost(0);
5327   format %{ %}
5328   interface(REG_INTER);
5329 %}
5330 
5331 // Integer 64 bit Register Operands
5332 // Integer 64 bit Register (includes SP)
5333 operand iRegL()
5334 %{
5335   constraint(ALLOC_IN_RC(any_reg));
5336   match(RegL);
5337   match(iRegLNoSp);
5338   op_cost(0);
5339   format %{ %}
5340   interface(REG_INTER);
5341 %}
5342 
5343 // Integer 64 bit Register not Special
5344 operand iRegLNoSp()
5345 %{
5346   constraint(ALLOC_IN_RC(no_special_reg));
5347   match(RegL);
5348   format %{ %}
5349   interface(REG_INTER);
5350 %}
5351 
5352 // Pointer Register Operands
5353 // Pointer Register
5354 operand iRegP()
5355 %{
5356   constraint(ALLOC_IN_RC(ptr_reg));
5357   match(RegP);
5358   match(iRegPNoSp);
5359   match(iRegP_R0);
5360   //match(iRegP_R2);
5361   //match(iRegP_R4);
5362   //match(iRegP_R5);
5363   match(thread_RegP);
5364   op_cost(0);
5365   format %{ %}
5366   interface(REG_INTER);
5367 %}
5368 
5369 // Pointer 64 bit Register not Special
5370 operand iRegPNoSp()
5371 %{
5372   constraint(ALLOC_IN_RC(no_special_ptr_reg));
5373   match(RegP);
5374   // match(iRegP);
5375   // match(iRegP_R0);
5376   // match(iRegP_R2);
5377   // match(iRegP_R4);
5378   // match(iRegP_R5);
5379   // match(thread_RegP);
5380   op_cost(0);
5381   format %{ %}
5382   interface(REG_INTER);
5383 %}
5384 
5385 // Pointer 64 bit Register R0 only
5386 operand iRegP_R0()
5387 %{
5388   constraint(ALLOC_IN_RC(r0_reg));
5389   match(RegP);
5390   // match(iRegP);
5391   match(iRegPNoSp);
5392   op_cost(0);
5393   format %{ %}
5394   interface(REG_INTER);
5395 %}
5396 
5397 // Pointer 64 bit Register R1 only
5398 operand iRegP_R1()
5399 %{
5400   constraint(ALLOC_IN_RC(r1_reg));
5401   match(RegP);
5402   // match(iRegP);
5403   match(iRegPNoSp);
5404   op_cost(0);
5405   format %{ %}
5406   interface(REG_INTER);
5407 %}
5408 
5409 // Pointer 64 bit Register R2 only
5410 operand iRegP_R2()
5411 %{
5412   constraint(ALLOC_IN_RC(r2_reg));
5413   match(RegP);
5414   // match(iRegP);
5415   match(iRegPNoSp);
5416   op_cost(0);
5417   format %{ %}
5418   interface(REG_INTER);
5419 %}
5420 
5421 // Pointer 64 bit Register R3 only
5422 operand iRegP_R3()
5423 %{
5424   constraint(ALLOC_IN_RC(r3_reg));
5425   match(RegP);
5426   // match(iRegP);
5427   match(iRegPNoSp);
5428   op_cost(0);
5429   format %{ %}
5430   interface(REG_INTER);
5431 %}
5432 
5433 // Pointer 64 bit Register R4 only
5434 operand iRegP_R4()
5435 %{
5436   constraint(ALLOC_IN_RC(r4_reg));
5437   match(RegP);
5438   // match(iRegP);
5439   match(iRegPNoSp);
5440   op_cost(0);
5441   format %{ %}
5442   interface(REG_INTER);
5443 %}
5444 
5445 // Pointer 64 bit Register R5 only
5446 operand iRegP_R5()
5447 %{
5448   constraint(ALLOC_IN_RC(r5_reg));
5449   match(RegP);
5450   // match(iRegP);
5451   match(iRegPNoSp);
5452   op_cost(0);
5453   format %{ %}
5454   interface(REG_INTER);
5455 %}
5456 
5457 // Pointer 64 bit Register R10 only
5458 operand iRegP_R10()
5459 %{
5460   constraint(ALLOC_IN_RC(r10_reg));
5461   match(RegP);
5462   // match(iRegP);
5463   match(iRegPNoSp);
5464   op_cost(0);
5465   format %{ %}
5466   interface(REG_INTER);
5467 %}
5468 
5469 // Long 64 bit Register R11 only
5470 operand iRegL_R11()
5471 %{
5472   constraint(ALLOC_IN_RC(r11_reg));
5473   match(RegL);
5474   match(iRegLNoSp);
5475   op_cost(0);
5476   format %{ %}
5477   interface(REG_INTER);
5478 %}
5479 
5480 // Pointer 64 bit Register FP only
5481 operand iRegP_FP()
5482 %{
5483   constraint(ALLOC_IN_RC(fp_reg));
5484   match(RegP);
5485   // match(iRegP);
5486   op_cost(0);
5487   format %{ %}
5488   interface(REG_INTER);
5489 %}
5490 
5491 // Register R0 only
5492 operand iRegI_R0()
5493 %{
5494   constraint(ALLOC_IN_RC(int_r0_reg));
5495   match(RegI);
5496   match(iRegINoSp);
5497   op_cost(0);
5498   format %{ %}
5499   interface(REG_INTER);
5500 %}
5501 
5502 // Register R2 only
5503 operand iRegI_R2()
5504 %{
5505   constraint(ALLOC_IN_RC(int_r2_reg));
5506   match(RegI);
5507   match(iRegINoSp);
5508   op_cost(0);
5509   format %{ %}
5510   interface(REG_INTER);
5511 %}
5512 
5513 // Register R3 only
5514 operand iRegI_R3()
5515 %{
5516   constraint(ALLOC_IN_RC(int_r3_reg));
5517   match(RegI);
5518   match(iRegINoSp);
5519   op_cost(0);
5520   format %{ %}
5521   interface(REG_INTER);
5522 %}
5523 
5524 
5525 // Register R2 only
5526 operand iRegI_R4()
5527 %{
5528   constraint(ALLOC_IN_RC(int_r4_reg));
5529   match(RegI);
5530   match(iRegINoSp);
5531   op_cost(0);
5532   format %{ %}
5533   interface(REG_INTER);
5534 %}
5535 
5536 
5537 // Pointer Register Operands
5538 // Narrow Pointer Register
5539 operand iRegN()
5540 %{
5541   constraint(ALLOC_IN_RC(any_reg32));
5542   match(RegN);
5543   match(iRegNNoSp);
5544   op_cost(0);
5545   format %{ %}
5546   interface(REG_INTER);
5547 %}
5548 
5549 // Integer 64 bit Register not Special
5550 operand iRegNNoSp()
5551 %{
5552   constraint(ALLOC_IN_RC(no_special_reg32));
5553   match(RegN);
5554   op_cost(0);
5555   format %{ %}
5556   interface(REG_INTER);
5557 %}
5558 
5559 // heap base register -- used for encoding immN0
5560 
5561 operand iRegIHeapbase()
5562 %{
5563   constraint(ALLOC_IN_RC(heapbase_reg));
5564   match(RegI);
5565   op_cost(0);
5566   format %{ %}
5567   interface(REG_INTER);
5568 %}
5569 
5570 // Float Register
5571 // Float register operands
5572 operand vRegF()
5573 %{
5574   constraint(ALLOC_IN_RC(float_reg));
5575   match(RegF);
5576 
5577   op_cost(0);
5578   format %{ %}
5579   interface(REG_INTER);
5580 %}
5581 
5582 // Double Register
5583 // Double register operands
5584 operand vRegD()
5585 %{
5586   constraint(ALLOC_IN_RC(double_reg));
5587   match(RegD);
5588 
5589   op_cost(0);
5590   format %{ %}
5591   interface(REG_INTER);
5592 %}
5593 
5594 operand vecD()
5595 %{
5596   constraint(ALLOC_IN_RC(vectord_reg));
5597   match(VecD);
5598 
5599   op_cost(0);
5600   format %{ %}
5601   interface(REG_INTER);
5602 %}
5603 
5604 operand vecX()
5605 %{
5606   constraint(ALLOC_IN_RC(vectorx_reg));
5607   match(VecX);
5608 
5609   op_cost(0);
5610   format %{ %}
5611   interface(REG_INTER);
5612 %}
5613 
5614 operand vRegD_V0()
5615 %{
5616   constraint(ALLOC_IN_RC(v0_reg));
5617   match(RegD);
5618   op_cost(0);
5619   format %{ %}
5620   interface(REG_INTER);
5621 %}
5622 
5623 operand vRegD_V1()
5624 %{
5625   constraint(ALLOC_IN_RC(v1_reg));
5626   match(RegD);
5627   op_cost(0);
5628   format %{ %}
5629   interface(REG_INTER);
5630 %}
5631 
5632 operand vRegD_V2()
5633 %{
5634   constraint(ALLOC_IN_RC(v2_reg));
5635   match(RegD);
5636   op_cost(0);
5637   format %{ %}
5638   interface(REG_INTER);
5639 %}
5640 
5641 operand vRegD_V3()
5642 %{
5643   constraint(ALLOC_IN_RC(v3_reg));
5644   match(RegD);
5645   op_cost(0);
5646   format %{ %}
5647   interface(REG_INTER);
5648 %}
5649 
5650 // Flags register, used as output of signed compare instructions
5651 
5652 // note that on AArch64 we also use this register as the output for
5653 // for floating point compare instructions (CmpF CmpD). this ensures
5654 // that ordered inequality tests use GT, GE, LT or LE none of which
5655 // pass through cases where the result is unordered i.e. one or both
5656 // inputs to the compare is a NaN. this means that the ideal code can
5657 // replace e.g. a GT with an LE and not end up capturing the NaN case
5658 // (where the comparison should always fail). EQ and NE tests are
5659 // always generated in ideal code so that unordered folds into the NE
5660 // case, matching the behaviour of AArch64 NE.
5661 //
5662 // This differs from x86 where the outputs of FP compares use a
5663 // special FP flags registers and where compares based on this
5664 // register are distinguished into ordered inequalities (cmpOpUCF) and
5665 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
5666 // to explicitly handle the unordered case in branches. x86 also has
5667 // to include extra CMoveX rules to accept a cmpOpUCF input.
5668 
5669 operand rFlagsReg()
5670 %{
5671   constraint(ALLOC_IN_RC(int_flags));
5672   match(RegFlags);
5673 
5674   op_cost(0);
5675   format %{ "RFLAGS" %}
5676   interface(REG_INTER);
5677 %}
5678 
5679 // Flags register, used as output of unsigned compare instructions
5680 operand rFlagsRegU()
5681 %{
5682   constraint(ALLOC_IN_RC(int_flags));
5683   match(RegFlags);
5684 
5685   op_cost(0);
5686   format %{ "RFLAGSU" %}
5687   interface(REG_INTER);
5688 %}
5689 
5690 // Special Registers
5691 
5692 // Method Register
5693 operand inline_cache_RegP(iRegP reg)
5694 %{
5695   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
5696   match(reg);
5697   match(iRegPNoSp);
5698   op_cost(0);
5699   format %{ %}
5700   interface(REG_INTER);
5701 %}
5702 
5703 operand interpreter_method_oop_RegP(iRegP reg)
5704 %{
5705   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
5706   match(reg);
5707   match(iRegPNoSp);
5708   op_cost(0);
5709   format %{ %}
5710   interface(REG_INTER);
5711 %}
5712 
5713 // Thread Register
5714 operand thread_RegP(iRegP reg)
5715 %{
5716   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
5717   match(reg);
5718   op_cost(0);
5719   format %{ %}
5720   interface(REG_INTER);
5721 %}
5722 
5723 operand lr_RegP(iRegP reg)
5724 %{
5725   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
5726   match(reg);
5727   op_cost(0);
5728   format %{ %}
5729   interface(REG_INTER);
5730 %}
5731 
5732 //----------Memory Operands----------------------------------------------------
5733 
5734 operand indirect(iRegP reg)
5735 %{
5736   constraint(ALLOC_IN_RC(ptr_reg));
5737   match(reg);
5738   op_cost(0);
5739   format %{ "[$reg]" %}
5740   interface(MEMORY_INTER) %{
5741     base($reg);
5742     index(0xffffffff);
5743     scale(0x0);
5744     disp(0x0);
5745   %}
5746 %}
5747 
5748 operand indIndexScaledOffsetI(iRegP reg, iRegL lreg, immIScale scale, immIU12 off)
5749 %{
5750   constraint(ALLOC_IN_RC(ptr_reg));
5751   match(AddP (AddP reg (LShiftL lreg scale)) off);
5752   op_cost(INSN_COST);
5753   format %{ "$reg, $lreg lsl($scale), $off" %}
5754   interface(MEMORY_INTER) %{
5755     base($reg);
5756     index($lreg);
5757     scale($scale);
5758     disp($off);
5759   %}
5760 %}
5761 
5762 operand indIndexScaledOffsetL(iRegP reg, iRegL lreg, immIScale scale, immLU12 off)
5763 %{
5764   constraint(ALLOC_IN_RC(ptr_reg));
5765   match(AddP (AddP reg (LShiftL lreg scale)) off);
5766   op_cost(INSN_COST);
5767   format %{ "$reg, $lreg lsl($scale), $off" %}
5768   interface(MEMORY_INTER) %{
5769     base($reg);
5770     index($lreg);
5771     scale($scale);
5772     disp($off);
5773   %}
5774 %}
5775 
5776 operand indIndexOffsetI2L(iRegP reg, iRegI ireg, immLU12 off)
5777 %{
5778   constraint(ALLOC_IN_RC(ptr_reg));
5779   match(AddP (AddP reg (ConvI2L ireg)) off);
5780   op_cost(INSN_COST);
5781   format %{ "$reg, $ireg, $off I2L" %}
5782   interface(MEMORY_INTER) %{
5783     base($reg);
5784     index($ireg);
5785     scale(0x0);
5786     disp($off);
5787   %}
5788 %}
5789 
5790 operand indIndexScaledOffsetI2L(iRegP reg, iRegI ireg, immIScale scale, immLU12 off)
5791 %{
5792   constraint(ALLOC_IN_RC(ptr_reg));
5793   match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5794   op_cost(INSN_COST);
5795   format %{ "$reg, $ireg sxtw($scale), $off I2L" %}
5796   interface(MEMORY_INTER) %{
5797     base($reg);
5798     index($ireg);
5799     scale($scale);
5800     disp($off);
5801   %}
5802 %}
5803 
5804 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
5805 %{
5806   constraint(ALLOC_IN_RC(ptr_reg));
5807   match(AddP reg (LShiftL (ConvI2L ireg) scale));
5808   op_cost(0);
5809   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
5810   interface(MEMORY_INTER) %{
5811     base($reg);
5812     index($ireg);
5813     scale($scale);
5814     disp(0x0);
5815   %}
5816 %}
5817 
5818 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
5819 %{
5820   constraint(ALLOC_IN_RC(ptr_reg));
5821   match(AddP reg (LShiftL lreg scale));
5822   op_cost(0);
5823   format %{ "$reg, $lreg lsl($scale)" %}
5824   interface(MEMORY_INTER) %{
5825     base($reg);
5826     index($lreg);
5827     scale($scale);
5828     disp(0x0);
5829   %}
5830 %}
5831 
5832 operand indIndex(iRegP reg, iRegL lreg)
5833 %{
5834   constraint(ALLOC_IN_RC(ptr_reg));
5835   match(AddP reg lreg);
5836   op_cost(0);
5837   format %{ "$reg, $lreg" %}
5838   interface(MEMORY_INTER) %{
5839     base($reg);
5840     index($lreg);
5841     scale(0x0);
5842     disp(0x0);
5843   %}
5844 %}
5845 
5846 operand indOffI(iRegP reg, immIOffset off)
5847 %{
5848   constraint(ALLOC_IN_RC(ptr_reg));
5849   match(AddP reg off);
5850   op_cost(0);
5851   format %{ "[$reg, $off]" %}
5852   interface(MEMORY_INTER) %{
5853     base($reg);
5854     index(0xffffffff);
5855     scale(0x0);
5856     disp($off);
5857   %}
5858 %}
5859 
5860 operand indOffL(iRegP reg, immLoffset off)
5861 %{
5862   constraint(ALLOC_IN_RC(ptr_reg));
5863   match(AddP reg off);
5864   op_cost(0);
5865   format %{ "[$reg, $off]" %}
5866   interface(MEMORY_INTER) %{
5867     base($reg);
5868     index(0xffffffff);
5869     scale(0x0);
5870     disp($off);
5871   %}
5872 %}
5873 
5874 
5875 operand indirectN(iRegN reg)
5876 %{
5877   predicate(Universe::narrow_oop_shift() == 0);
5878   constraint(ALLOC_IN_RC(ptr_reg));
5879   match(DecodeN reg);
5880   op_cost(0);
5881   format %{ "[$reg]\t# narrow" %}
5882   interface(MEMORY_INTER) %{
5883     base($reg);
5884     index(0xffffffff);
5885     scale(0x0);
5886     disp(0x0);
5887   %}
5888 %}
5889 
5890 operand indIndexScaledOffsetIN(iRegN reg, iRegL lreg, immIScale scale, immIU12 off)
5891 %{
5892   predicate(Universe::narrow_oop_shift() == 0);
5893   constraint(ALLOC_IN_RC(ptr_reg));
5894   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5895   op_cost(0);
5896   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
5897   interface(MEMORY_INTER) %{
5898     base($reg);
5899     index($lreg);
5900     scale($scale);
5901     disp($off);
5902   %}
5903 %}
5904 
5905 operand indIndexScaledOffsetLN(iRegN reg, iRegL lreg, immIScale scale, immLU12 off)
5906 %{
5907   predicate(Universe::narrow_oop_shift() == 0);
5908   constraint(ALLOC_IN_RC(ptr_reg));
5909   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5910   op_cost(INSN_COST);
5911   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
5912   interface(MEMORY_INTER) %{
5913     base($reg);
5914     index($lreg);
5915     scale($scale);
5916     disp($off);
5917   %}
5918 %}
5919 
5920 operand indIndexOffsetI2LN(iRegN reg, iRegI ireg, immLU12 off)
5921 %{
5922   predicate(Universe::narrow_oop_shift() == 0);
5923   constraint(ALLOC_IN_RC(ptr_reg));
5924   match(AddP (AddP (DecodeN reg) (ConvI2L ireg)) off);
5925   op_cost(INSN_COST);
5926   format %{ "$reg, $ireg, $off I2L\t# narrow" %}
5927   interface(MEMORY_INTER) %{
5928     base($reg);
5929     index($ireg);
5930     scale(0x0);
5931     disp($off);
5932   %}
5933 %}
5934 
5935 operand indIndexScaledOffsetI2LN(iRegN reg, iRegI ireg, immIScale scale, immLU12 off)
5936 %{
5937   predicate(Universe::narrow_oop_shift() == 0);
5938   constraint(ALLOC_IN_RC(ptr_reg));
5939   match(AddP (AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale)) off);
5940   op_cost(INSN_COST);
5941   format %{ "$reg, $ireg sxtw($scale), $off I2L\t# narrow" %}
5942   interface(MEMORY_INTER) %{
5943     base($reg);
5944     index($ireg);
5945     scale($scale);
5946     disp($off);
5947   %}
5948 %}
5949 
5950 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
5951 %{
5952   predicate(Universe::narrow_oop_shift() == 0);
5953   constraint(ALLOC_IN_RC(ptr_reg));
5954   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
5955   op_cost(0);
5956   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
5957   interface(MEMORY_INTER) %{
5958     base($reg);
5959     index($ireg);
5960     scale($scale);
5961     disp(0x0);
5962   %}
5963 %}
5964 
5965 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
5966 %{
5967   predicate(Universe::narrow_oop_shift() == 0);
5968   constraint(ALLOC_IN_RC(ptr_reg));
5969   match(AddP (DecodeN reg) (LShiftL lreg scale));
5970   op_cost(0);
5971   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
5972   interface(MEMORY_INTER) %{
5973     base($reg);
5974     index($lreg);
5975     scale($scale);
5976     disp(0x0);
5977   %}
5978 %}
5979 
5980 operand indIndexN(iRegN reg, iRegL lreg)
5981 %{
5982   predicate(Universe::narrow_oop_shift() == 0);
5983   constraint(ALLOC_IN_RC(ptr_reg));
5984   match(AddP (DecodeN reg) lreg);
5985   op_cost(0);
5986   format %{ "$reg, $lreg\t# narrow" %}
5987   interface(MEMORY_INTER) %{
5988     base($reg);
5989     index($lreg);
5990     scale(0x0);
5991     disp(0x0);
5992   %}
5993 %}
5994 
5995 operand indOffIN(iRegN reg, immIOffset off)
5996 %{
5997   predicate(Universe::narrow_oop_shift() == 0);
5998   constraint(ALLOC_IN_RC(ptr_reg));
5999   match(AddP (DecodeN reg) off);
6000   op_cost(0);
6001   format %{ "[$reg, $off]\t# narrow" %}
6002   interface(MEMORY_INTER) %{
6003     base($reg);
6004     index(0xffffffff);
6005     scale(0x0);
6006     disp($off);
6007   %}
6008 %}
6009 
6010 operand indOffLN(iRegN reg, immLoffset off)
6011 %{
6012   predicate(Universe::narrow_oop_shift() == 0);
6013   constraint(ALLOC_IN_RC(ptr_reg));
6014   match(AddP (DecodeN reg) off);
6015   op_cost(0);
6016   format %{ "[$reg, $off]\t# narrow" %}
6017   interface(MEMORY_INTER) %{
6018     base($reg);
6019     index(0xffffffff);
6020     scale(0x0);
6021     disp($off);
6022   %}
6023 %}
6024 
6025 
6026 
6027 // AArch64 opto stubs need to write to the pc slot in the thread anchor
6028 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
6029 %{
6030   constraint(ALLOC_IN_RC(ptr_reg));
6031   match(AddP reg off);
6032   op_cost(0);
6033   format %{ "[$reg, $off]" %}
6034   interface(MEMORY_INTER) %{
6035     base($reg);
6036     index(0xffffffff);
6037     scale(0x0);
6038     disp($off);
6039   %}
6040 %}
6041 
6042 //----------Special Memory Operands--------------------------------------------
6043 // Stack Slot Operand - This operand is used for loading and storing temporary
6044 //                      values on the stack where a match requires a value to
6045 //                      flow through memory.
6046 operand stackSlotP(sRegP reg)
6047 %{
6048   constraint(ALLOC_IN_RC(stack_slots));
6049   op_cost(100);
6050   // No match rule because this operand is only generated in matching
6051   // match(RegP);
6052   format %{ "[$reg]" %}
6053   interface(MEMORY_INTER) %{
6054     base(0x1e);  // RSP
6055     index(0x0);  // No Index
6056     scale(0x0);  // No Scale
6057     disp($reg);  // Stack Offset
6058   %}
6059 %}
6060 
6061 operand stackSlotI(sRegI reg)
6062 %{
6063   constraint(ALLOC_IN_RC(stack_slots));
6064   // No match rule because this operand is only generated in matching
6065   // match(RegI);
6066   format %{ "[$reg]" %}
6067   interface(MEMORY_INTER) %{
6068     base(0x1e);  // RSP
6069     index(0x0);  // No Index
6070     scale(0x0);  // No Scale
6071     disp($reg);  // Stack Offset
6072   %}
6073 %}
6074 
6075 operand stackSlotF(sRegF reg)
6076 %{
6077   constraint(ALLOC_IN_RC(stack_slots));
6078   // No match rule because this operand is only generated in matching
6079   // match(RegF);
6080   format %{ "[$reg]" %}
6081   interface(MEMORY_INTER) %{
6082     base(0x1e);  // RSP
6083     index(0x0);  // No Index
6084     scale(0x0);  // No Scale
6085     disp($reg);  // Stack Offset
6086   %}
6087 %}
6088 
6089 operand stackSlotD(sRegD reg)
6090 %{
6091   constraint(ALLOC_IN_RC(stack_slots));
6092   // No match rule because this operand is only generated in matching
6093   // match(RegD);
6094   format %{ "[$reg]" %}
6095   interface(MEMORY_INTER) %{
6096     base(0x1e);  // RSP
6097     index(0x0);  // No Index
6098     scale(0x0);  // No Scale
6099     disp($reg);  // Stack Offset
6100   %}
6101 %}
6102 
6103 operand stackSlotL(sRegL reg)
6104 %{
6105   constraint(ALLOC_IN_RC(stack_slots));
6106   // No match rule because this operand is only generated in matching
6107   // match(RegL);
6108   format %{ "[$reg]" %}
6109   interface(MEMORY_INTER) %{
6110     base(0x1e);  // RSP
6111     index(0x0);  // No Index
6112     scale(0x0);  // No Scale
6113     disp($reg);  // Stack Offset
6114   %}
6115 %}
6116 
6117 // Operands for expressing Control Flow
6118 // NOTE: Label is a predefined operand which should not be redefined in
6119 //       the AD file. It is generically handled within the ADLC.
6120 
6121 //----------Conditional Branch Operands----------------------------------------
6122 // Comparison Op  - This is the operation of the comparison, and is limited to
6123 //                  the following set of codes:
6124 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
6125 //
6126 // Other attributes of the comparison, such as unsignedness, are specified
6127 // by the comparison instruction that sets a condition code flags register.
6128 // That result is represented by a flags operand whose subtype is appropriate
6129 // to the unsignedness (etc.) of the comparison.
6130 //
6131 // Later, the instruction which matches both the Comparison Op (a Bool) and
6132 // the flags (produced by the Cmp) specifies the coding of the comparison op
6133 // by matching a specific subtype of Bool operand below, such as cmpOpU.
6134 
6135 // used for signed integral comparisons and fp comparisons
6136 
6137 operand cmpOp()
6138 %{
6139   match(Bool);
6140 
6141   format %{ "" %}
6142   interface(COND_INTER) %{
6143     equal(0x0, "eq");
6144     not_equal(0x1, "ne");
6145     less(0xb, "lt");
6146     greater_equal(0xa, "ge");
6147     less_equal(0xd, "le");
6148     greater(0xc, "gt");
6149     overflow(0x6, "vs");
6150     no_overflow(0x7, "vc");
6151   %}
6152 %}
6153 
6154 // used for unsigned integral comparisons
6155 
6156 operand cmpOpU()
6157 %{
6158   match(Bool);
6159 
6160   format %{ "" %}
6161   interface(COND_INTER) %{
6162     equal(0x0, "eq");
6163     not_equal(0x1, "ne");
6164     less(0x3, "lo");
6165     greater_equal(0x2, "hs");
6166     less_equal(0x9, "ls");
6167     greater(0x8, "hi");
6168     overflow(0x6, "vs");
6169     no_overflow(0x7, "vc");
6170   %}
6171 %}
6172 
6173 // Special operand allowing long args to int ops to be truncated for free
6174 
6175 operand iRegL2I(iRegL reg) %{
6176 
6177   op_cost(0);
6178 
6179   match(ConvL2I reg);
6180 
6181   format %{ "l2i($reg)" %}
6182 
6183   interface(REG_INTER)
6184 %}
6185 
6186 opclass vmem(indirect, indIndex, indOffI, indOffL);
6187 
6188 //----------OPERAND CLASSES----------------------------------------------------
6189 // Operand Classes are groups of operands that are used as to simplify
6190 // instruction definitions by not requiring the AD writer to specify
6191 // separate instructions for every form of operand when the
6192 // instruction accepts multiple operand types with the same basic
6193 // encoding and format. The classic case of this is memory operands.
6194 
6195 // memory is used to define read/write location for load/store
6196 // instruction defs. we can turn a memory op into an Address
6197 
6198 opclass memory(indirect, indIndexScaledOffsetI, indIndexScaledOffsetL, indIndexOffsetI2L, indIndexScaledOffsetI2L, indIndexScaled, indIndexScaledI2L, indIndex, indOffI, indOffL,
6199                indirectN, indIndexScaledOffsetIN, indIndexScaledOffsetLN, indIndexOffsetI2LN, indIndexScaledOffsetI2LN, indIndexScaledN, indIndexScaledI2LN, indIndexN, indOffIN, indOffLN);
6200 
6201 
6202 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
6203 // operations. it allows the src to be either an iRegI or a (ConvL2I
6204 // iRegL). in the latter case the l2i normally planted for a ConvL2I
6205 // can be elided because the 32-bit instruction will just employ the
6206 // lower 32 bits anyway.
6207 //
6208 // n.b. this does not elide all L2I conversions. if the truncated
6209 // value is consumed by more than one operation then the ConvL2I
6210 // cannot be bundled into the consuming nodes so an l2i gets planted
6211 // (actually a movw $dst $src) and the downstream instructions consume
6212 // the result of the l2i as an iRegI input. That's a shame since the
6213 // movw is actually redundant but its not too costly.
6214 
6215 opclass iRegIorL2I(iRegI, iRegL2I);
6216 
6217 //----------PIPELINE-----------------------------------------------------------
6218 // Rules which define the behavior of the target architectures pipeline.
6219 // Integer ALU reg operation
6220 pipeline %{
6221 
6222 attributes %{
6223   // ARM instructions are of fixed length
6224   fixed_size_instructions;        // Fixed size instructions TODO does
6225   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
6226   // ARM instructions come in 32-bit word units
6227   instruction_unit_size = 4;         // An instruction is 4 bytes long
6228   instruction_fetch_unit_size = 64;  // The processor fetches one line
6229   instruction_fetch_units = 1;       // of 64 bytes
6230 
6231   // List of nop instructions
6232   nops( MachNop );
6233 %}
6234 
6235 // We don't use an actual pipeline model so don't care about resources
6236 // or description. we do use pipeline classes to introduce fixed
6237 // latencies
6238 
6239 //----------RESOURCES----------------------------------------------------------
6240 // Resources are the functional units available to the machine
6241 
6242 resources( INS0, INS1, INS01 = INS0 | INS1,
6243            ALU0, ALU1, ALU = ALU0 | ALU1,
6244            MAC,
6245            DIV,
6246            BRANCH,
6247            LDST,
6248            NEON_FP);
6249 
6250 //----------PIPELINE DESCRIPTION-----------------------------------------------
6251 // Pipeline Description specifies the stages in the machine's pipeline
6252 
6253 pipe_desc(ISS, EX1, EX2, WR);
6254 
6255 //----------PIPELINE CLASSES---------------------------------------------------
6256 // Pipeline Classes describe the stages in which input and output are
6257 // referenced by the hardware pipeline.
6258 
6259 //------- Integer ALU operations --------------------------
6260 
6261 // Integer ALU reg-reg operation
6262 // Operands needed in EX1, result generated in EX2
6263 // Eg.  ADD     x0, x1, x2
6264 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6265 %{
6266   single_instruction;
6267   dst    : EX2(write);
6268   src1   : EX1(read);
6269   src2   : EX1(read);
6270   INS01  : ISS; // Dual issue as instruction 0 or 1
6271   ALU    : EX2;
6272 %}
6273 
6274 // Integer ALU reg-reg operation with constant shift
6275 // Shifted register must be available in LATE_ISS instead of EX1
6276 // Eg.  ADD     x0, x1, x2, LSL #2
6277 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
6278 %{
6279   single_instruction;
6280   dst    : EX2(write);
6281   src1   : EX1(read);
6282   src2   : ISS(read);
6283   INS01  : ISS;
6284   ALU    : EX2;
6285 %}
6286 
6287 // Integer ALU reg operation with constant shift
6288 // Eg.  LSL     x0, x1, #shift
6289 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
6290 %{
6291   single_instruction;
6292   dst    : EX2(write);
6293   src1   : ISS(read);
6294   INS01  : ISS;
6295   ALU    : EX2;
6296 %}
6297 
6298 // Integer ALU reg-reg operation with variable shift
6299 // Both operands must be available in LATE_ISS instead of EX1
6300 // Result is available in EX1 instead of EX2
6301 // Eg.  LSLV    x0, x1, x2
6302 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
6303 %{
6304   single_instruction;
6305   dst    : EX1(write);
6306   src1   : ISS(read);
6307   src2   : ISS(read);
6308   INS01  : ISS;
6309   ALU    : EX1;
6310 %}
6311 
6312 // Integer ALU reg-reg operation with extract
6313 // As for _vshift above, but result generated in EX2
6314 // Eg.  EXTR    x0, x1, x2, #N
6315 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
6316 %{
6317   single_instruction;
6318   dst    : EX2(write);
6319   src1   : ISS(read);
6320   src2   : ISS(read);
6321   INS1   : ISS; // Can only dual issue as Instruction 1
6322   ALU    : EX1;
6323 %}
6324 
6325 // Integer ALU reg operation
6326 // Eg.  NEG     x0, x1
6327 pipe_class ialu_reg(iRegI dst, iRegI src)
6328 %{
6329   single_instruction;
6330   dst    : EX2(write);
6331   src    : EX1(read);
6332   INS01  : ISS;
6333   ALU    : EX2;
6334 %}
6335 
6336 // Integer ALU reg mmediate operation
6337 // Eg.  ADD     x0, x1, #N
6338 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
6339 %{
6340   single_instruction;
6341   dst    : EX2(write);
6342   src1   : EX1(read);
6343   INS01  : ISS;
6344   ALU    : EX2;
6345 %}
6346 
6347 // Integer ALU immediate operation (no source operands)
6348 // Eg.  MOV     x0, #N
6349 pipe_class ialu_imm(iRegI dst)
6350 %{
6351   single_instruction;
6352   dst    : EX1(write);
6353   INS01  : ISS;
6354   ALU    : EX1;
6355 %}
6356 
6357 //------- Compare operation -------------------------------
6358 
6359 // Compare reg-reg
6360 // Eg.  CMP     x0, x1
6361 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
6362 %{
6363   single_instruction;
6364 //  fixed_latency(16);
6365   cr     : EX2(write);
6366   op1    : EX1(read);
6367   op2    : EX1(read);
6368   INS01  : ISS;
6369   ALU    : EX2;
6370 %}
6371 
6372 // Compare reg-reg
6373 // Eg.  CMP     x0, #N
6374 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
6375 %{
6376   single_instruction;
6377 //  fixed_latency(16);
6378   cr     : EX2(write);
6379   op1    : EX1(read);
6380   INS01  : ISS;
6381   ALU    : EX2;
6382 %}
6383 
6384 //------- Conditional instructions ------------------------
6385 
6386 // Conditional no operands
6387 // Eg.  CSINC   x0, zr, zr, <cond>
6388 pipe_class icond_none(iRegI dst, rFlagsReg cr)
6389 %{
6390   single_instruction;
6391   cr     : EX1(read);
6392   dst    : EX2(write);
6393   INS01  : ISS;
6394   ALU    : EX2;
6395 %}
6396 
6397 // Conditional 2 operand
6398 // EG.  CSEL    X0, X1, X2, <cond>
6399 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
6400 %{
6401   single_instruction;
6402   cr     : EX1(read);
6403   src1   : EX1(read);
6404   src2   : EX1(read);
6405   dst    : EX2(write);
6406   INS01  : ISS;
6407   ALU    : EX2;
6408 %}
6409 
6410 // Conditional 2 operand
6411 // EG.  CSEL    X0, X1, X2, <cond>
6412 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
6413 %{
6414   single_instruction;
6415   cr     : EX1(read);
6416   src    : EX1(read);
6417   dst    : EX2(write);
6418   INS01  : ISS;
6419   ALU    : EX2;
6420 %}
6421 
6422 //------- Multiply pipeline operations --------------------
6423 
6424 // Multiply reg-reg
6425 // Eg.  MUL     w0, w1, w2
6426 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6427 %{
6428   single_instruction;
6429   dst    : WR(write);
6430   src1   : ISS(read);
6431   src2   : ISS(read);
6432   INS01  : ISS;
6433   MAC    : WR;
6434 %}
6435 
6436 // Multiply accumulate
6437 // Eg.  MADD    w0, w1, w2, w3
6438 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6439 %{
6440   single_instruction;
6441   dst    : WR(write);
6442   src1   : ISS(read);
6443   src2   : ISS(read);
6444   src3   : ISS(read);
6445   INS01  : ISS;
6446   MAC    : WR;
6447 %}
6448 
6449 // Eg.  MUL     w0, w1, w2
6450 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6451 %{
6452   single_instruction;
6453   fixed_latency(3); // Maximum latency for 64 bit mul
6454   dst    : WR(write);
6455   src1   : ISS(read);
6456   src2   : ISS(read);
6457   INS01  : ISS;
6458   MAC    : WR;
6459 %}
6460 
6461 // Multiply accumulate
6462 // Eg.  MADD    w0, w1, w2, w3
6463 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6464 %{
6465   single_instruction;
6466   fixed_latency(3); // Maximum latency for 64 bit mul
6467   dst    : WR(write);
6468   src1   : ISS(read);
6469   src2   : ISS(read);
6470   src3   : ISS(read);
6471   INS01  : ISS;
6472   MAC    : WR;
6473 %}
6474 
6475 //------- Divide pipeline operations --------------------
6476 
6477 // Eg.  SDIV    w0, w1, w2
6478 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6479 %{
6480   single_instruction;
6481   fixed_latency(8); // Maximum latency for 32 bit divide
6482   dst    : WR(write);
6483   src1   : ISS(read);
6484   src2   : ISS(read);
6485   INS0   : ISS; // Can only dual issue as instruction 0
6486   DIV    : WR;
6487 %}
6488 
6489 // Eg.  SDIV    x0, x1, x2
6490 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6491 %{
6492   single_instruction;
6493   fixed_latency(16); // Maximum latency for 64 bit divide
6494   dst    : WR(write);
6495   src1   : ISS(read);
6496   src2   : ISS(read);
6497   INS0   : ISS; // Can only dual issue as instruction 0
6498   DIV    : WR;
6499 %}
6500 
6501 //------- Load pipeline operations ------------------------
6502 
6503 // Load - prefetch
6504 // Eg.  PFRM    <mem>
6505 pipe_class iload_prefetch(memory mem)
6506 %{
6507   single_instruction;
6508   mem    : ISS(read);
6509   INS01  : ISS;
6510   LDST   : WR;
6511 %}
6512 
6513 // Load - reg, mem
6514 // Eg.  LDR     x0, <mem>
6515 pipe_class iload_reg_mem(iRegI dst, memory mem)
6516 %{
6517   single_instruction;
6518   dst    : WR(write);
6519   mem    : ISS(read);
6520   INS01  : ISS;
6521   LDST   : WR;
6522 %}
6523 
6524 // Load - reg, reg
6525 // Eg.  LDR     x0, [sp, x1]
6526 pipe_class iload_reg_reg(iRegI dst, iRegI src)
6527 %{
6528   single_instruction;
6529   dst    : WR(write);
6530   src    : ISS(read);
6531   INS01  : ISS;
6532   LDST   : WR;
6533 %}
6534 
6535 //------- Store pipeline operations -----------------------
6536 
6537 // Store - zr, mem
6538 // Eg.  STR     zr, <mem>
6539 pipe_class istore_mem(memory mem)
6540 %{
6541   single_instruction;
6542   mem    : ISS(read);
6543   INS01  : ISS;
6544   LDST   : WR;
6545 %}
6546 
6547 // Store - reg, mem
6548 // Eg.  STR     x0, <mem>
6549 pipe_class istore_reg_mem(iRegI src, memory mem)
6550 %{
6551   single_instruction;
6552   mem    : ISS(read);
6553   src    : EX2(read);
6554   INS01  : ISS;
6555   LDST   : WR;
6556 %}
6557 
6558 // Store - reg, reg
6559 // Eg. STR      x0, [sp, x1]
6560 pipe_class istore_reg_reg(iRegI dst, iRegI src)
6561 %{
6562   single_instruction;
6563   dst    : ISS(read);
6564   src    : EX2(read);
6565   INS01  : ISS;
6566   LDST   : WR;
6567 %}
6568 
6569 //------- Store pipeline operations -----------------------
6570 
6571 // Branch
6572 pipe_class pipe_branch()
6573 %{
6574   single_instruction;
6575   INS01  : ISS;
6576   BRANCH : EX1;
6577 %}
6578 
6579 // Conditional branch
6580 pipe_class pipe_branch_cond(rFlagsReg cr)
6581 %{
6582   single_instruction;
6583   cr     : EX1(read);
6584   INS01  : ISS;
6585   BRANCH : EX1;
6586 %}
6587 
6588 // Compare & Branch
6589 // EG.  CBZ/CBNZ
6590 pipe_class pipe_cmp_branch(iRegI op1)
6591 %{
6592   single_instruction;
6593   op1    : EX1(read);
6594   INS01  : ISS;
6595   BRANCH : EX1;
6596 %}
6597 
6598 //------- Synchronisation operations ----------------------
6599 
6600 // Any operation requiring serialization.
6601 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
6602 pipe_class pipe_serial()
6603 %{
6604   single_instruction;
6605   force_serialization;
6606   fixed_latency(16);
6607   INS01  : ISS(2); // Cannot dual issue with any other instruction
6608   LDST   : WR;
6609 %}
6610 
6611 // Generic big/slow expanded idiom - also serialized
6612 pipe_class pipe_slow()
6613 %{
6614   instruction_count(10);
6615   multiple_bundles;
6616   force_serialization;
6617   fixed_latency(16);
6618   INS01  : ISS(2); // Cannot dual issue with any other instruction
6619   LDST   : WR;
6620 %}
6621 
6622 // Empty pipeline class
6623 pipe_class pipe_class_empty()
6624 %{
6625   single_instruction;
6626   fixed_latency(0);
6627 %}
6628 
6629 // Default pipeline class.
6630 pipe_class pipe_class_default()
6631 %{
6632   single_instruction;
6633   fixed_latency(2);
6634 %}
6635 
6636 // Pipeline class for compares.
6637 pipe_class pipe_class_compare()
6638 %{
6639   single_instruction;
6640   fixed_latency(16);
6641 %}
6642 
6643 // Pipeline class for memory operations.
6644 pipe_class pipe_class_memory()
6645 %{
6646   single_instruction;
6647   fixed_latency(16);
6648 %}
6649 
6650 // Pipeline class for call.
6651 pipe_class pipe_class_call()
6652 %{
6653   single_instruction;
6654   fixed_latency(100);
6655 %}
6656 
6657 // Define the class for the Nop node.
6658 define %{
6659    MachNop = pipe_class_empty;
6660 %}
6661 
6662 %}
6663 //----------INSTRUCTIONS-------------------------------------------------------
6664 //
6665 // match      -- States which machine-independent subtree may be replaced
6666 //               by this instruction.
6667 // ins_cost   -- The estimated cost of this instruction is used by instruction
6668 //               selection to identify a minimum cost tree of machine
6669 //               instructions that matches a tree of machine-independent
6670 //               instructions.
6671 // format     -- A string providing the disassembly for this instruction.
6672 //               The value of an instruction's operand may be inserted
6673 //               by referring to it with a '$' prefix.
6674 // opcode     -- Three instruction opcodes may be provided.  These are referred
6675 //               to within an encode class as $primary, $secondary, and $tertiary
6676 //               rrspectively.  The primary opcode is commonly used to
6677 //               indicate the type of machine instruction, while secondary
6678 //               and tertiary are often used for prefix options or addressing
6679 //               modes.
6680 // ins_encode -- A list of encode classes with parameters. The encode class
6681 //               name must have been defined in an 'enc_class' specification
6682 //               in the encode section of the architecture description.
6683 
6684 // ============================================================================
6685 // Memory (Load/Store) Instructions
6686 
6687 // Load Instructions
6688 
6689 // Load Byte (8 bit signed)
6690 instruct loadB(iRegINoSp dst, memory mem)
6691 %{
6692   match(Set dst (LoadB mem));
6693   predicate(!needs_acquiring_load(n));
6694 
6695   ins_cost(4 * INSN_COST);
6696   format %{ "ldrsbw  $dst, $mem\t# byte" %}
6697 
6698   ins_encode(aarch64_enc_ldrsbw(dst, mem));
6699 
6700   ins_pipe(iload_reg_mem);
6701 %}
6702 
6703 // Load Byte (8 bit signed) into long
6704 instruct loadB2L(iRegLNoSp dst, memory mem)
6705 %{
6706   match(Set dst (ConvI2L (LoadB mem)));
6707   predicate(!needs_acquiring_load(n->in(1)));
6708 
6709   ins_cost(4 * INSN_COST);
6710   format %{ "ldrsb  $dst, $mem\t# byte" %}
6711 
6712   ins_encode(aarch64_enc_ldrsb(dst, mem));
6713 
6714   ins_pipe(iload_reg_mem);
6715 %}
6716 
6717 // Load Byte (8 bit unsigned)
6718 instruct loadUB(iRegINoSp dst, memory mem)
6719 %{
6720   match(Set dst (LoadUB mem));
6721   predicate(!needs_acquiring_load(n));
6722 
6723   ins_cost(4 * INSN_COST);
6724   format %{ "ldrbw  $dst, $mem\t# byte" %}
6725 
6726   ins_encode(aarch64_enc_ldrb(dst, mem));
6727 
6728   ins_pipe(iload_reg_mem);
6729 %}
6730 
6731 // Load Byte (8 bit unsigned) into long
6732 instruct loadUB2L(iRegLNoSp dst, memory mem)
6733 %{
6734   match(Set dst (ConvI2L (LoadUB mem)));
6735   predicate(!needs_acquiring_load(n->in(1)));
6736 
6737   ins_cost(4 * INSN_COST);
6738   format %{ "ldrb  $dst, $mem\t# byte" %}
6739 
6740   ins_encode(aarch64_enc_ldrb(dst, mem));
6741 
6742   ins_pipe(iload_reg_mem);
6743 %}
6744 
6745 // Load Short (16 bit signed)
6746 instruct loadS(iRegINoSp dst, memory mem)
6747 %{
6748   match(Set dst (LoadS mem));
6749   predicate(!needs_acquiring_load(n));
6750 
6751   ins_cost(4 * INSN_COST);
6752   format %{ "ldrshw  $dst, $mem\t# short" %}
6753 
6754   ins_encode(aarch64_enc_ldrshw(dst, mem));
6755 
6756   ins_pipe(iload_reg_mem);
6757 %}
6758 
6759 // Load Short (16 bit signed) into long
6760 instruct loadS2L(iRegLNoSp dst, memory mem)
6761 %{
6762   match(Set dst (ConvI2L (LoadS mem)));
6763   predicate(!needs_acquiring_load(n->in(1)));
6764 
6765   ins_cost(4 * INSN_COST);
6766   format %{ "ldrsh  $dst, $mem\t# short" %}
6767 
6768   ins_encode(aarch64_enc_ldrsh(dst, mem));
6769 
6770   ins_pipe(iload_reg_mem);
6771 %}
6772 
6773 // Load Char (16 bit unsigned)
6774 instruct loadUS(iRegINoSp dst, memory mem)
6775 %{
6776   match(Set dst (LoadUS mem));
6777   predicate(!needs_acquiring_load(n));
6778 
6779   ins_cost(4 * INSN_COST);
6780   format %{ "ldrh  $dst, $mem\t# short" %}
6781 
6782   ins_encode(aarch64_enc_ldrh(dst, mem));
6783 
6784   ins_pipe(iload_reg_mem);
6785 %}
6786 
6787 // Load Short/Char (16 bit unsigned) into long
6788 instruct loadUS2L(iRegLNoSp dst, memory mem)
6789 %{
6790   match(Set dst (ConvI2L (LoadUS mem)));
6791   predicate(!needs_acquiring_load(n->in(1)));
6792 
6793   ins_cost(4 * INSN_COST);
6794   format %{ "ldrh  $dst, $mem\t# short" %}
6795 
6796   ins_encode(aarch64_enc_ldrh(dst, mem));
6797 
6798   ins_pipe(iload_reg_mem);
6799 %}
6800 
6801 // Load Integer (32 bit signed)
6802 instruct loadI(iRegINoSp dst, memory mem)
6803 %{
6804   match(Set dst (LoadI mem));
6805   predicate(!needs_acquiring_load(n));
6806 
6807   ins_cost(4 * INSN_COST);
6808   format %{ "ldrw  $dst, $mem\t# int" %}
6809 
6810   ins_encode(aarch64_enc_ldrw(dst, mem));
6811 
6812   ins_pipe(iload_reg_mem);
6813 %}
6814 
6815 // Load Integer (32 bit signed) into long
6816 instruct loadI2L(iRegLNoSp dst, memory mem)
6817 %{
6818   match(Set dst (ConvI2L (LoadI mem)));
6819   predicate(!needs_acquiring_load(n->in(1)));
6820 
6821   ins_cost(4 * INSN_COST);
6822   format %{ "ldrsw  $dst, $mem\t# int" %}
6823 
6824   ins_encode(aarch64_enc_ldrsw(dst, mem));
6825 
6826   ins_pipe(iload_reg_mem);
6827 %}
6828 
6829 // Load Integer (32 bit unsigned) into long
6830 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
6831 %{
6832   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
6833   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
6834 
6835   ins_cost(4 * INSN_COST);
6836   format %{ "ldrw  $dst, $mem\t# int" %}
6837 
6838   ins_encode(aarch64_enc_ldrw(dst, mem));
6839 
6840   ins_pipe(iload_reg_mem);
6841 %}
6842 
6843 // Load Long (64 bit signed)
6844 instruct loadL(iRegLNoSp dst, memory mem)
6845 %{
6846   match(Set dst (LoadL mem));
6847   predicate(!needs_acquiring_load(n));
6848 
6849   ins_cost(4 * INSN_COST);
6850   format %{ "ldr  $dst, $mem\t# int" %}
6851 
6852   ins_encode(aarch64_enc_ldr(dst, mem));
6853 
6854   ins_pipe(iload_reg_mem);
6855 %}
6856 
6857 // Load Range
6858 instruct loadRange(iRegINoSp dst, memory mem)
6859 %{
6860   match(Set dst (LoadRange mem));
6861 
6862   ins_cost(4 * INSN_COST);
6863   format %{ "ldrw  $dst, $mem\t# range" %}
6864 
6865   ins_encode(aarch64_enc_ldrw(dst, mem));
6866 
6867   ins_pipe(iload_reg_mem);
6868 %}
6869 
6870 // Load Pointer
6871 instruct loadP(iRegPNoSp dst, memory mem)
6872 %{
6873   match(Set dst (LoadP mem));
6874   predicate(!needs_acquiring_load(n));
6875 
6876   ins_cost(4 * INSN_COST);
6877   format %{ "ldr  $dst, $mem\t# ptr" %}
6878 
6879   ins_encode(aarch64_enc_ldr(dst, mem));
6880 
6881   ins_pipe(iload_reg_mem);
6882 %}
6883 
6884 // Load Compressed Pointer
6885 instruct loadN(iRegNNoSp dst, memory mem)
6886 %{
6887   match(Set dst (LoadN mem));
6888   predicate(!needs_acquiring_load(n));
6889 
6890   ins_cost(4 * INSN_COST);
6891   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
6892 
6893   ins_encode(aarch64_enc_ldrw(dst, mem));
6894 
6895   ins_pipe(iload_reg_mem);
6896 %}
6897 
6898 // Load Klass Pointer
6899 instruct loadKlass(iRegPNoSp dst, memory mem)
6900 %{
6901   match(Set dst (LoadKlass mem));
6902   predicate(!needs_acquiring_load(n));
6903 
6904   ins_cost(4 * INSN_COST);
6905   format %{ "ldr  $dst, $mem\t# class" %}
6906 
6907   ins_encode(aarch64_enc_ldr(dst, mem));
6908 
6909   ins_pipe(iload_reg_mem);
6910 %}
6911 
6912 // Load Narrow Klass Pointer
6913 instruct loadNKlass(iRegNNoSp dst, memory mem)
6914 %{
6915   match(Set dst (LoadNKlass mem));
6916   predicate(!needs_acquiring_load(n));
6917 
6918   ins_cost(4 * INSN_COST);
6919   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
6920 
6921   ins_encode(aarch64_enc_ldrw(dst, mem));
6922 
6923   ins_pipe(iload_reg_mem);
6924 %}
6925 
6926 // Load Float
6927 instruct loadF(vRegF dst, memory mem)
6928 %{
6929   match(Set dst (LoadF mem));
6930   predicate(!needs_acquiring_load(n));
6931 
6932   ins_cost(4 * INSN_COST);
6933   format %{ "ldrs  $dst, $mem\t# float" %}
6934 
6935   ins_encode( aarch64_enc_ldrs(dst, mem) );
6936 
6937   ins_pipe(pipe_class_memory);
6938 %}
6939 
6940 // Load Double
6941 instruct loadD(vRegD dst, memory mem)
6942 %{
6943   match(Set dst (LoadD mem));
6944   predicate(!needs_acquiring_load(n));
6945 
6946   ins_cost(4 * INSN_COST);
6947   format %{ "ldrd  $dst, $mem\t# double" %}
6948 
6949   ins_encode( aarch64_enc_ldrd(dst, mem) );
6950 
6951   ins_pipe(pipe_class_memory);
6952 %}
6953 
6954 
6955 // Load Int Constant
6956 instruct loadConI(iRegINoSp dst, immI src)
6957 %{
6958   match(Set dst src);
6959 
6960   ins_cost(INSN_COST);
6961   format %{ "mov $dst, $src\t# int" %}
6962 
6963   ins_encode( aarch64_enc_movw_imm(dst, src) );
6964 
6965   ins_pipe(ialu_imm);
6966 %}
6967 
6968 // Load Long Constant
6969 instruct loadConL(iRegLNoSp dst, immL src)
6970 %{
6971   match(Set dst src);
6972 
6973   ins_cost(INSN_COST);
6974   format %{ "mov $dst, $src\t# long" %}
6975 
6976   ins_encode( aarch64_enc_mov_imm(dst, src) );
6977 
6978   ins_pipe(ialu_imm);
6979 %}
6980 
6981 // Load Pointer Constant
6982 
6983 instruct loadConP(iRegPNoSp dst, immP con)
6984 %{
6985   match(Set dst con);
6986 
6987   ins_cost(INSN_COST * 4);
6988   format %{
6989     "mov  $dst, $con\t# ptr\n\t"
6990   %}
6991 
6992   ins_encode(aarch64_enc_mov_p(dst, con));
6993 
6994   ins_pipe(ialu_imm);
6995 %}
6996 
6997 // Load Null Pointer Constant
6998 
6999 instruct loadConP0(iRegPNoSp dst, immP0 con)
7000 %{
7001   match(Set dst con);
7002 
7003   ins_cost(INSN_COST);
7004   format %{ "mov  $dst, $con\t# NULL ptr" %}
7005 
7006   ins_encode(aarch64_enc_mov_p0(dst, con));
7007 
7008   ins_pipe(ialu_imm);
7009 %}
7010 
7011 // Load Pointer Constant One
7012 
7013 instruct loadConP1(iRegPNoSp dst, immP_1 con)
7014 %{
7015   match(Set dst con);
7016 
7017   ins_cost(INSN_COST);
7018   format %{ "mov  $dst, $con\t# NULL ptr" %}
7019 
7020   ins_encode(aarch64_enc_mov_p1(dst, con));
7021 
7022   ins_pipe(ialu_imm);
7023 %}
7024 
7025 // Load Poll Page Constant
7026 
7027 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
7028 %{
7029   match(Set dst con);
7030 
7031   ins_cost(INSN_COST);
7032   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
7033 
7034   ins_encode(aarch64_enc_mov_poll_page(dst, con));
7035 
7036   ins_pipe(ialu_imm);
7037 %}
7038 
7039 // Load Byte Map Base Constant
7040 
7041 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
7042 %{
7043   match(Set dst con);
7044 
7045   ins_cost(INSN_COST);
7046   format %{ "adr  $dst, $con\t# Byte Map Base" %}
7047 
7048   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
7049 
7050   ins_pipe(ialu_imm);
7051 %}
7052 
7053 // Load Narrow Pointer Constant
7054 
7055 instruct loadConN(iRegNNoSp dst, immN con)
7056 %{
7057   match(Set dst con);
7058 
7059   ins_cost(INSN_COST * 4);
7060   format %{ "mov  $dst, $con\t# compressed ptr" %}
7061 
7062   ins_encode(aarch64_enc_mov_n(dst, con));
7063 
7064   ins_pipe(ialu_imm);
7065 %}
7066 
7067 // Load Narrow Null Pointer Constant
7068 
7069 instruct loadConN0(iRegNNoSp dst, immN0 con)
7070 %{
7071   match(Set dst con);
7072 
7073   ins_cost(INSN_COST);
7074   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
7075 
7076   ins_encode(aarch64_enc_mov_n0(dst, con));
7077 
7078   ins_pipe(ialu_imm);
7079 %}
7080 
7081 // Load Narrow Klass Constant
7082 
7083 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
7084 %{
7085   match(Set dst con);
7086 
7087   ins_cost(INSN_COST);
7088   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
7089 
7090   ins_encode(aarch64_enc_mov_nk(dst, con));
7091 
7092   ins_pipe(ialu_imm);
7093 %}
7094 
7095 // Load Packed Float Constant
7096 
7097 instruct loadConF_packed(vRegF dst, immFPacked con) %{
7098   match(Set dst con);
7099   ins_cost(INSN_COST * 4);
7100   format %{ "fmovs  $dst, $con"%}
7101   ins_encode %{
7102     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
7103   %}
7104 
7105   ins_pipe(pipe_class_default);
7106 %}
7107 
7108 // Load Float Constant
7109 
7110 instruct loadConF(vRegF dst, immF con) %{
7111   match(Set dst con);
7112 
7113   ins_cost(INSN_COST * 4);
7114 
7115   format %{
7116     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7117   %}
7118 
7119   ins_encode %{
7120     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
7121   %}
7122 
7123   ins_pipe(pipe_class_default);
7124 %}
7125 
7126 // Load Packed Double Constant
7127 
7128 instruct loadConD_packed(vRegD dst, immDPacked con) %{
7129   match(Set dst con);
7130   ins_cost(INSN_COST);
7131   format %{ "fmovd  $dst, $con"%}
7132   ins_encode %{
7133     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
7134   %}
7135 
7136   ins_pipe(pipe_class_default);
7137 %}
7138 
7139 // Load Double Constant
7140 
7141 instruct loadConD(vRegD dst, immD con) %{
7142   match(Set dst con);
7143 
7144   ins_cost(INSN_COST * 5);
7145   format %{
7146     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7147   %}
7148 
7149   ins_encode %{
7150     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
7151   %}
7152 
7153   ins_pipe(pipe_class_default);
7154 %}
7155 
7156 // Store Instructions
7157 
7158 // Store CMS card-mark Immediate
7159 instruct storeimmCM0(immI0 zero, memory mem)
7160 %{
7161   match(Set mem (StoreCM mem zero));
7162   predicate(unnecessary_storestore(n));
7163 
7164   ins_cost(INSN_COST);
7165   format %{ "strb zr, $mem\t# byte" %}
7166 
7167   ins_encode(aarch64_enc_strb0(mem));
7168 
7169   ins_pipe(istore_mem);
7170 %}
7171 
7172 // Store CMS card-mark Immediate with intervening StoreStore
7173 // needed when using CMS with no conditional card marking
7174 instruct storeimmCM0_ordered(immI0 zero, memory mem)
7175 %{
7176   match(Set mem (StoreCM mem zero));
7177 
7178   ins_cost(INSN_COST * 2);
7179   format %{ "dmb ishst"
7180       "\n\tstrb zr, $mem\t# byte" %}
7181 
7182   ins_encode(aarch64_enc_strb0_ordered(mem));
7183 
7184   ins_pipe(istore_mem);
7185 %}
7186 
7187 // Store Byte
7188 instruct storeB(iRegIorL2I src, memory mem)
7189 %{
7190   match(Set mem (StoreB mem src));
7191   predicate(!needs_releasing_store(n));
7192 
7193   ins_cost(INSN_COST);
7194   format %{ "strb  $src, $mem\t# byte" %}
7195 
7196   ins_encode(aarch64_enc_strb(src, mem));
7197 
7198   ins_pipe(istore_reg_mem);
7199 %}
7200 
7201 
7202 instruct storeimmB0(immI0 zero, memory mem)
7203 %{
7204   match(Set mem (StoreB mem zero));
7205   predicate(!needs_releasing_store(n));
7206 
7207   ins_cost(INSN_COST);
7208   format %{ "strb rscractch2, $mem\t# byte" %}
7209 
7210   ins_encode(aarch64_enc_strb0(mem));
7211 
7212   ins_pipe(istore_mem);
7213 %}
7214 
7215 // Store Char/Short
7216 instruct storeC(iRegIorL2I src, memory mem)
7217 %{
7218   match(Set mem (StoreC mem src));
7219   predicate(!needs_releasing_store(n));
7220 
7221   ins_cost(INSN_COST);
7222   format %{ "strh  $src, $mem\t# short" %}
7223 
7224   ins_encode(aarch64_enc_strh(src, mem));
7225 
7226   ins_pipe(istore_reg_mem);
7227 %}
7228 
7229 instruct storeimmC0(immI0 zero, memory mem)
7230 %{
7231   match(Set mem (StoreC mem zero));
7232   predicate(!needs_releasing_store(n));
7233 
7234   ins_cost(INSN_COST);
7235   format %{ "strh  zr, $mem\t# short" %}
7236 
7237   ins_encode(aarch64_enc_strh0(mem));
7238 
7239   ins_pipe(istore_mem);
7240 %}
7241 
7242 // Store Integer
7243 
7244 instruct storeI(iRegIorL2I src, memory mem)
7245 %{
7246   match(Set mem(StoreI mem src));
7247   predicate(!needs_releasing_store(n));
7248 
7249   ins_cost(INSN_COST);
7250   format %{ "strw  $src, $mem\t# int" %}
7251 
7252   ins_encode(aarch64_enc_strw(src, mem));
7253 
7254   ins_pipe(istore_reg_mem);
7255 %}
7256 
7257 instruct storeimmI0(immI0 zero, memory mem)
7258 %{
7259   match(Set mem(StoreI mem zero));
7260   predicate(!needs_releasing_store(n));
7261 
7262   ins_cost(INSN_COST);
7263   format %{ "strw  zr, $mem\t# int" %}
7264 
7265   ins_encode(aarch64_enc_strw0(mem));
7266 
7267   ins_pipe(istore_mem);
7268 %}
7269 
7270 // Store Long (64 bit signed)
7271 instruct storeL(iRegL src, memory mem)
7272 %{
7273   match(Set mem (StoreL mem src));
7274   predicate(!needs_releasing_store(n));
7275 
7276   ins_cost(INSN_COST);
7277   format %{ "str  $src, $mem\t# int" %}
7278 
7279   ins_encode(aarch64_enc_str(src, mem));
7280 
7281   ins_pipe(istore_reg_mem);
7282 %}
7283 
7284 // Store Long (64 bit signed)
7285 instruct storeimmL0(immL0 zero, memory mem)
7286 %{
7287   match(Set mem (StoreL mem zero));
7288   predicate(!needs_releasing_store(n));
7289 
7290   ins_cost(INSN_COST);
7291   format %{ "str  zr, $mem\t# int" %}
7292 
7293   ins_encode(aarch64_enc_str0(mem));
7294 
7295   ins_pipe(istore_mem);
7296 %}
7297 
7298 // Store Pointer
7299 instruct storeP(iRegP src, memory mem)
7300 %{
7301   match(Set mem (StoreP mem src));
7302   predicate(!needs_releasing_store(n));
7303 
7304   ins_cost(INSN_COST);
7305   format %{ "str  $src, $mem\t# ptr" %}
7306 
7307   ins_encode(aarch64_enc_str(src, mem));
7308 
7309   ins_pipe(istore_reg_mem);
7310 %}
7311 
7312 // Store Pointer
7313 instruct storeimmP0(immP0 zero, memory mem)
7314 %{
7315   match(Set mem (StoreP mem zero));
7316   predicate(!needs_releasing_store(n));
7317 
7318   ins_cost(INSN_COST);
7319   format %{ "str zr, $mem\t# ptr" %}
7320 
7321   ins_encode(aarch64_enc_str0(mem));
7322 
7323   ins_pipe(istore_mem);
7324 %}
7325 
7326 // Store Compressed Pointer
7327 instruct storeN(iRegN src, memory mem)
7328 %{
7329   match(Set mem (StoreN mem src));
7330   predicate(!needs_releasing_store(n));
7331 
7332   ins_cost(INSN_COST);
7333   format %{ "strw  $src, $mem\t# compressed ptr" %}
7334 
7335   ins_encode(aarch64_enc_strw(src, mem));
7336 
7337   ins_pipe(istore_reg_mem);
7338 %}
7339 
7340 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
7341 %{
7342   match(Set mem (StoreN mem zero));
7343   predicate(Universe::narrow_oop_base() == NULL &&
7344             Universe::narrow_klass_base() == NULL &&
7345             (!needs_releasing_store(n)));
7346 
7347   ins_cost(INSN_COST);
7348   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
7349 
7350   ins_encode(aarch64_enc_strw(heapbase, mem));
7351 
7352   ins_pipe(istore_reg_mem);
7353 %}
7354 
7355 // Store Float
7356 instruct storeF(vRegF src, memory mem)
7357 %{
7358   match(Set mem (StoreF mem src));
7359   predicate(!needs_releasing_store(n));
7360 
7361   ins_cost(INSN_COST);
7362   format %{ "strs  $src, $mem\t# float" %}
7363 
7364   ins_encode( aarch64_enc_strs(src, mem) );
7365 
7366   ins_pipe(pipe_class_memory);
7367 %}
7368 
7369 // TODO
7370 // implement storeImmF0 and storeFImmPacked
7371 
7372 // Store Double
7373 instruct storeD(vRegD src, memory mem)
7374 %{
7375   match(Set mem (StoreD mem src));
7376   predicate(!needs_releasing_store(n));
7377 
7378   ins_cost(INSN_COST);
7379   format %{ "strd  $src, $mem\t# double" %}
7380 
7381   ins_encode( aarch64_enc_strd(src, mem) );
7382 
7383   ins_pipe(pipe_class_memory);
7384 %}
7385 
7386 // Store Compressed Klass Pointer
7387 instruct storeNKlass(iRegN src, memory mem)
7388 %{
7389   predicate(!needs_releasing_store(n));
7390   match(Set mem (StoreNKlass mem src));
7391 
7392   ins_cost(INSN_COST);
7393   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
7394 
7395   ins_encode(aarch64_enc_strw(src, mem));
7396 
7397   ins_pipe(istore_reg_mem);
7398 %}
7399 
7400 // TODO
7401 // implement storeImmD0 and storeDImmPacked
7402 
7403 // prefetch instructions
7404 // Must be safe to execute with invalid address (cannot fault).
7405 
7406 instruct prefetchalloc( memory mem ) %{
7407   match(PrefetchAllocation mem);
7408 
7409   ins_cost(INSN_COST);
7410   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
7411 
7412   ins_encode( aarch64_enc_prefetchw(mem) );
7413 
7414   ins_pipe(iload_prefetch);
7415 %}
7416 
7417 //  ---------------- volatile loads and stores ----------------
7418 
7419 // Load Byte (8 bit signed)
7420 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7421 %{
7422   match(Set dst (LoadB mem));
7423 
7424   ins_cost(VOLATILE_REF_COST);
7425   format %{ "ldarsb  $dst, $mem\t# byte" %}
7426 
7427   ins_encode(aarch64_enc_ldarsb(dst, mem));
7428 
7429   ins_pipe(pipe_serial);
7430 %}
7431 
7432 // Load Byte (8 bit signed) into long
7433 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7434 %{
7435   match(Set dst (ConvI2L (LoadB mem)));
7436 
7437   ins_cost(VOLATILE_REF_COST);
7438   format %{ "ldarsb  $dst, $mem\t# byte" %}
7439 
7440   ins_encode(aarch64_enc_ldarsb(dst, mem));
7441 
7442   ins_pipe(pipe_serial);
7443 %}
7444 
7445 // Load Byte (8 bit unsigned)
7446 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7447 %{
7448   match(Set dst (LoadUB mem));
7449 
7450   ins_cost(VOLATILE_REF_COST);
7451   format %{ "ldarb  $dst, $mem\t# byte" %}
7452 
7453   ins_encode(aarch64_enc_ldarb(dst, mem));
7454 
7455   ins_pipe(pipe_serial);
7456 %}
7457 
7458 // Load Byte (8 bit unsigned) into long
7459 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7460 %{
7461   match(Set dst (ConvI2L (LoadUB mem)));
7462 
7463   ins_cost(VOLATILE_REF_COST);
7464   format %{ "ldarb  $dst, $mem\t# byte" %}
7465 
7466   ins_encode(aarch64_enc_ldarb(dst, mem));
7467 
7468   ins_pipe(pipe_serial);
7469 %}
7470 
7471 // Load Short (16 bit signed)
7472 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7473 %{
7474   match(Set dst (LoadS mem));
7475 
7476   ins_cost(VOLATILE_REF_COST);
7477   format %{ "ldarshw  $dst, $mem\t# short" %}
7478 
7479   ins_encode(aarch64_enc_ldarshw(dst, mem));
7480 
7481   ins_pipe(pipe_serial);
7482 %}
7483 
7484 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7485 %{
7486   match(Set dst (LoadUS mem));
7487 
7488   ins_cost(VOLATILE_REF_COST);
7489   format %{ "ldarhw  $dst, $mem\t# short" %}
7490 
7491   ins_encode(aarch64_enc_ldarhw(dst, mem));
7492 
7493   ins_pipe(pipe_serial);
7494 %}
7495 
7496 // Load Short/Char (16 bit unsigned) into long
7497 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7498 %{
7499   match(Set dst (ConvI2L (LoadUS mem)));
7500 
7501   ins_cost(VOLATILE_REF_COST);
7502   format %{ "ldarh  $dst, $mem\t# short" %}
7503 
7504   ins_encode(aarch64_enc_ldarh(dst, mem));
7505 
7506   ins_pipe(pipe_serial);
7507 %}
7508 
7509 // Load Short/Char (16 bit signed) into long
7510 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7511 %{
7512   match(Set dst (ConvI2L (LoadS mem)));
7513 
7514   ins_cost(VOLATILE_REF_COST);
7515   format %{ "ldarh  $dst, $mem\t# short" %}
7516 
7517   ins_encode(aarch64_enc_ldarsh(dst, mem));
7518 
7519   ins_pipe(pipe_serial);
7520 %}
7521 
7522 // Load Integer (32 bit signed)
7523 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7524 %{
7525   match(Set dst (LoadI mem));
7526 
7527   ins_cost(VOLATILE_REF_COST);
7528   format %{ "ldarw  $dst, $mem\t# int" %}
7529 
7530   ins_encode(aarch64_enc_ldarw(dst, mem));
7531 
7532   ins_pipe(pipe_serial);
7533 %}
7534 
7535 // Load Integer (32 bit unsigned) into long
7536 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
7537 %{
7538   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7539 
7540   ins_cost(VOLATILE_REF_COST);
7541   format %{ "ldarw  $dst, $mem\t# int" %}
7542 
7543   ins_encode(aarch64_enc_ldarw(dst, mem));
7544 
7545   ins_pipe(pipe_serial);
7546 %}
7547 
7548 // Load Long (64 bit signed)
7549 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7550 %{
7551   match(Set dst (LoadL mem));
7552 
7553   ins_cost(VOLATILE_REF_COST);
7554   format %{ "ldar  $dst, $mem\t# int" %}
7555 
7556   ins_encode(aarch64_enc_ldar(dst, mem));
7557 
7558   ins_pipe(pipe_serial);
7559 %}
7560 
7561 // Load Pointer
7562 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
7563 %{
7564   match(Set dst (LoadP mem));
7565 
7566   ins_cost(VOLATILE_REF_COST);
7567   format %{ "ldar  $dst, $mem\t# ptr" %}
7568 
7569   ins_encode(aarch64_enc_ldar(dst, mem));
7570 
7571   ins_pipe(pipe_serial);
7572 %}
7573 
7574 // Load Compressed Pointer
7575 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
7576 %{
7577   match(Set dst (LoadN mem));
7578 
7579   ins_cost(VOLATILE_REF_COST);
7580   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
7581 
7582   ins_encode(aarch64_enc_ldarw(dst, mem));
7583 
7584   ins_pipe(pipe_serial);
7585 %}
7586 
7587 // Load Float
7588 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
7589 %{
7590   match(Set dst (LoadF mem));
7591 
7592   ins_cost(VOLATILE_REF_COST);
7593   format %{ "ldars  $dst, $mem\t# float" %}
7594 
7595   ins_encode( aarch64_enc_fldars(dst, mem) );
7596 
7597   ins_pipe(pipe_serial);
7598 %}
7599 
7600 // Load Double
7601 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
7602 %{
7603   match(Set dst (LoadD mem));
7604 
7605   ins_cost(VOLATILE_REF_COST);
7606   format %{ "ldard  $dst, $mem\t# double" %}
7607 
7608   ins_encode( aarch64_enc_fldard(dst, mem) );
7609 
7610   ins_pipe(pipe_serial);
7611 %}
7612 
7613 // Store Byte
7614 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7615 %{
7616   match(Set mem (StoreB mem src));
7617 
7618   ins_cost(VOLATILE_REF_COST);
7619   format %{ "stlrb  $src, $mem\t# byte" %}
7620 
7621   ins_encode(aarch64_enc_stlrb(src, mem));
7622 
7623   ins_pipe(pipe_class_memory);
7624 %}
7625 
7626 // Store Char/Short
7627 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7628 %{
7629   match(Set mem (StoreC mem src));
7630 
7631   ins_cost(VOLATILE_REF_COST);
7632   format %{ "stlrh  $src, $mem\t# short" %}
7633 
7634   ins_encode(aarch64_enc_stlrh(src, mem));
7635 
7636   ins_pipe(pipe_class_memory);
7637 %}
7638 
7639 // Store Integer
7640 
7641 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7642 %{
7643   match(Set mem(StoreI mem src));
7644 
7645   ins_cost(VOLATILE_REF_COST);
7646   format %{ "stlrw  $src, $mem\t# int" %}
7647 
7648   ins_encode(aarch64_enc_stlrw(src, mem));
7649 
7650   ins_pipe(pipe_class_memory);
7651 %}
7652 
7653 // Store Long (64 bit signed)
7654 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
7655 %{
7656   match(Set mem (StoreL mem src));
7657 
7658   ins_cost(VOLATILE_REF_COST);
7659   format %{ "stlr  $src, $mem\t# int" %}
7660 
7661   ins_encode(aarch64_enc_stlr(src, mem));
7662 
7663   ins_pipe(pipe_class_memory);
7664 %}
7665 
7666 // Store Pointer
7667 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
7668 %{
7669   match(Set mem (StoreP mem src));
7670 
7671   ins_cost(VOLATILE_REF_COST);
7672   format %{ "stlr  $src, $mem\t# ptr" %}
7673 
7674   ins_encode(aarch64_enc_stlr(src, mem));
7675 
7676   ins_pipe(pipe_class_memory);
7677 %}
7678 
7679 // Store Compressed Pointer
7680 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
7681 %{
7682   match(Set mem (StoreN mem src));
7683 
7684   ins_cost(VOLATILE_REF_COST);
7685   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
7686 
7687   ins_encode(aarch64_enc_stlrw(src, mem));
7688 
7689   ins_pipe(pipe_class_memory);
7690 %}
7691 
7692 // Store Float
7693 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
7694 %{
7695   match(Set mem (StoreF mem src));
7696 
7697   ins_cost(VOLATILE_REF_COST);
7698   format %{ "stlrs  $src, $mem\t# float" %}
7699 
7700   ins_encode( aarch64_enc_fstlrs(src, mem) );
7701 
7702   ins_pipe(pipe_class_memory);
7703 %}
7704 
7705 // TODO
7706 // implement storeImmF0 and storeFImmPacked
7707 
7708 // Store Double
7709 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
7710 %{
7711   match(Set mem (StoreD mem src));
7712 
7713   ins_cost(VOLATILE_REF_COST);
7714   format %{ "stlrd  $src, $mem\t# double" %}
7715 
7716   ins_encode( aarch64_enc_fstlrd(src, mem) );
7717 
7718   ins_pipe(pipe_class_memory);
7719 %}
7720 
7721 //  ---------------- end of volatile loads and stores ----------------
7722 
7723 // ============================================================================
7724 // BSWAP Instructions
7725 
7726 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
7727   match(Set dst (ReverseBytesI src));
7728 
7729   ins_cost(INSN_COST);
7730   format %{ "revw  $dst, $src" %}
7731 
7732   ins_encode %{
7733     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
7734   %}
7735 
7736   ins_pipe(ialu_reg);
7737 %}
7738 
7739 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
7740   match(Set dst (ReverseBytesL src));
7741 
7742   ins_cost(INSN_COST);
7743   format %{ "rev  $dst, $src" %}
7744 
7745   ins_encode %{
7746     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
7747   %}
7748 
7749   ins_pipe(ialu_reg);
7750 %}
7751 
7752 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
7753   match(Set dst (ReverseBytesUS src));
7754 
7755   ins_cost(INSN_COST);
7756   format %{ "rev16w  $dst, $src" %}
7757 
7758   ins_encode %{
7759     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7760   %}
7761 
7762   ins_pipe(ialu_reg);
7763 %}
7764 
7765 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
7766   match(Set dst (ReverseBytesS src));
7767 
7768   ins_cost(INSN_COST);
7769   format %{ "rev16w  $dst, $src\n\t"
7770             "sbfmw $dst, $dst, #0, #15" %}
7771 
7772   ins_encode %{
7773     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7774     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
7775   %}
7776 
7777   ins_pipe(ialu_reg);
7778 %}
7779 
7780 // ============================================================================
7781 // Zero Count Instructions
7782 
7783 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7784   match(Set dst (CountLeadingZerosI src));
7785 
7786   ins_cost(INSN_COST);
7787   format %{ "clzw  $dst, $src" %}
7788   ins_encode %{
7789     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
7790   %}
7791 
7792   ins_pipe(ialu_reg);
7793 %}
7794 
7795 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
7796   match(Set dst (CountLeadingZerosL src));
7797 
7798   ins_cost(INSN_COST);
7799   format %{ "clz   $dst, $src" %}
7800   ins_encode %{
7801     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
7802   %}
7803 
7804   ins_pipe(ialu_reg);
7805 %}
7806 
7807 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7808   match(Set dst (CountTrailingZerosI src));
7809 
7810   ins_cost(INSN_COST * 2);
7811   format %{ "rbitw  $dst, $src\n\t"
7812             "clzw   $dst, $dst" %}
7813   ins_encode %{
7814     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
7815     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
7816   %}
7817 
7818   ins_pipe(ialu_reg);
7819 %}
7820 
7821 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
7822   match(Set dst (CountTrailingZerosL src));
7823 
7824   ins_cost(INSN_COST * 2);
7825   format %{ "rbit   $dst, $src\n\t"
7826             "clz    $dst, $dst" %}
7827   ins_encode %{
7828     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
7829     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
7830   %}
7831 
7832   ins_pipe(ialu_reg);
7833 %}
7834 
7835 //---------- Population Count Instructions -------------------------------------
7836 //
7837 
7838 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
7839   predicate(UsePopCountInstruction);
7840   match(Set dst (PopCountI src));
7841   effect(TEMP tmp);
7842   ins_cost(INSN_COST * 13);
7843 
7844   format %{ "movw   $src, $src\n\t"
7845             "mov    $tmp, $src\t# vector (1D)\n\t"
7846             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7847             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7848             "mov    $dst, $tmp\t# vector (1D)" %}
7849   ins_encode %{
7850     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
7851     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7852     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7853     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7854     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7855   %}
7856 
7857   ins_pipe(pipe_class_default);
7858 %}
7859 
7860 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
7861   predicate(UsePopCountInstruction);
7862   match(Set dst (PopCountI (LoadI mem)));
7863   effect(TEMP tmp);
7864   ins_cost(INSN_COST * 13);
7865 
7866   format %{ "ldrs   $tmp, $mem\n\t"
7867             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7868             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7869             "mov    $dst, $tmp\t# vector (1D)" %}
7870   ins_encode %{
7871     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
7872     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
7873                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
7874     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7875     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7876     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7877   %}
7878 
7879   ins_pipe(pipe_class_default);
7880 %}
7881 
7882 // Note: Long.bitCount(long) returns an int.
7883 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
7884   predicate(UsePopCountInstruction);
7885   match(Set dst (PopCountL src));
7886   effect(TEMP tmp);
7887   ins_cost(INSN_COST * 13);
7888 
7889   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
7890             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7891             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7892             "mov    $dst, $tmp\t# vector (1D)" %}
7893   ins_encode %{
7894     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7895     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7896     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7897     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7898   %}
7899 
7900   ins_pipe(pipe_class_default);
7901 %}
7902 
7903 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
7904   predicate(UsePopCountInstruction);
7905   match(Set dst (PopCountL (LoadL mem)));
7906   effect(TEMP tmp);
7907   ins_cost(INSN_COST * 13);
7908 
7909   format %{ "ldrd   $tmp, $mem\n\t"
7910             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7911             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7912             "mov    $dst, $tmp\t# vector (1D)" %}
7913   ins_encode %{
7914     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
7915     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
7916                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
7917     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7918     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7919     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7920   %}
7921 
7922   ins_pipe(pipe_class_default);
7923 %}
7924 
7925 // ============================================================================
7926 // MemBar Instruction
7927 
7928 instruct load_fence() %{
7929   match(LoadFence);
7930   ins_cost(VOLATILE_REF_COST);
7931 
7932   format %{ "load_fence" %}
7933 
7934   ins_encode %{
7935     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7936   %}
7937   ins_pipe(pipe_serial);
7938 %}
7939 
7940 instruct unnecessary_membar_acquire() %{
7941   predicate(unnecessary_acquire(n));
7942   match(MemBarAcquire);
7943   ins_cost(0);
7944 
7945   format %{ "membar_acquire (elided)" %}
7946 
7947   ins_encode %{
7948     __ block_comment("membar_acquire (elided)");
7949   %}
7950 
7951   ins_pipe(pipe_class_empty);
7952 %}
7953 
7954 instruct membar_acquire() %{
7955   match(MemBarAcquire);
7956   ins_cost(VOLATILE_REF_COST);
7957 
7958   format %{ "membar_acquire" %}
7959 
7960   ins_encode %{
7961     __ block_comment("membar_acquire");
7962     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7963   %}
7964 
7965   ins_pipe(pipe_serial);
7966 %}
7967 
7968 
7969 instruct membar_acquire_lock() %{
7970   match(MemBarAcquireLock);
7971   ins_cost(VOLATILE_REF_COST);
7972 
7973   format %{ "membar_acquire_lock (elided)" %}
7974 
7975   ins_encode %{
7976     __ block_comment("membar_acquire_lock (elided)");
7977   %}
7978 
7979   ins_pipe(pipe_serial);
7980 %}
7981 
7982 instruct store_fence() %{
7983   match(StoreFence);
7984   ins_cost(VOLATILE_REF_COST);
7985 
7986   format %{ "store_fence" %}
7987 
7988   ins_encode %{
7989     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7990   %}
7991   ins_pipe(pipe_serial);
7992 %}
7993 
7994 instruct unnecessary_membar_release() %{
7995   predicate(unnecessary_release(n));
7996   match(MemBarRelease);
7997   ins_cost(0);
7998 
7999   format %{ "membar_release (elided)" %}
8000 
8001   ins_encode %{
8002     __ block_comment("membar_release (elided)");
8003   %}
8004   ins_pipe(pipe_serial);
8005 %}
8006 
8007 instruct membar_release() %{
8008   match(MemBarRelease);
8009   ins_cost(VOLATILE_REF_COST);
8010 
8011   format %{ "membar_release" %}
8012 
8013   ins_encode %{
8014     __ block_comment("membar_release");
8015     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8016   %}
8017   ins_pipe(pipe_serial);
8018 %}
8019 
8020 instruct membar_storestore() %{
8021   match(MemBarStoreStore);
8022   ins_cost(VOLATILE_REF_COST);
8023 
8024   format %{ "MEMBAR-store-store" %}
8025 
8026   ins_encode %{
8027     __ membar(Assembler::StoreStore);
8028   %}
8029   ins_pipe(pipe_serial);
8030 %}
8031 
8032 instruct membar_release_lock() %{
8033   match(MemBarReleaseLock);
8034   ins_cost(VOLATILE_REF_COST);
8035 
8036   format %{ "membar_release_lock (elided)" %}
8037 
8038   ins_encode %{
8039     __ block_comment("membar_release_lock (elided)");
8040   %}
8041 
8042   ins_pipe(pipe_serial);
8043 %}
8044 
8045 instruct unnecessary_membar_volatile() %{
8046   predicate(unnecessary_volatile(n));
8047   match(MemBarVolatile);
8048   ins_cost(0);
8049 
8050   format %{ "membar_volatile (elided)" %}
8051 
8052   ins_encode %{
8053     __ block_comment("membar_volatile (elided)");
8054   %}
8055 
8056   ins_pipe(pipe_serial);
8057 %}
8058 
8059 instruct membar_volatile() %{
8060   match(MemBarVolatile);
8061   ins_cost(VOLATILE_REF_COST*100);
8062 
8063   format %{ "membar_volatile" %}
8064 
8065   ins_encode %{
8066     __ block_comment("membar_volatile");
8067     __ membar(Assembler::StoreLoad);
8068   %}
8069 
8070   ins_pipe(pipe_serial);
8071 %}
8072 
8073 // ============================================================================
8074 // Cast/Convert Instructions
8075 
8076 instruct castX2P(iRegPNoSp dst, iRegL src) %{
8077   match(Set dst (CastX2P src));
8078 
8079   ins_cost(INSN_COST);
8080   format %{ "mov $dst, $src\t# long -> ptr" %}
8081 
8082   ins_encode %{
8083     if ($dst$$reg != $src$$reg) {
8084       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8085     }
8086   %}
8087 
8088   ins_pipe(ialu_reg);
8089 %}
8090 
8091 instruct castP2X(iRegLNoSp dst, iRegP src) %{
8092   match(Set dst (CastP2X src));
8093 
8094   ins_cost(INSN_COST);
8095   format %{ "mov $dst, $src\t# ptr -> long" %}
8096 
8097   ins_encode %{
8098     if ($dst$$reg != $src$$reg) {
8099       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8100     }
8101   %}
8102 
8103   ins_pipe(ialu_reg);
8104 %}
8105 
8106 // Convert oop into int for vectors alignment masking
8107 instruct convP2I(iRegINoSp dst, iRegP src) %{
8108   match(Set dst (ConvL2I (CastP2X src)));
8109 
8110   ins_cost(INSN_COST);
8111   format %{ "movw $dst, $src\t# ptr -> int" %}
8112   ins_encode %{
8113     __ movw($dst$$Register, $src$$Register);
8114   %}
8115 
8116   ins_pipe(ialu_reg);
8117 %}
8118 
8119 // Convert compressed oop into int for vectors alignment masking
8120 // in case of 32bit oops (heap < 4Gb).
8121 instruct convN2I(iRegINoSp dst, iRegN src)
8122 %{
8123   predicate(Universe::narrow_oop_shift() == 0);
8124   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
8125 
8126   ins_cost(INSN_COST);
8127   format %{ "mov dst, $src\t# compressed ptr -> int" %}
8128   ins_encode %{
8129     __ movw($dst$$Register, $src$$Register);
8130   %}
8131 
8132   ins_pipe(ialu_reg);
8133 %}
8134 
8135 
8136 // Convert oop pointer into compressed form
8137 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8138   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
8139   match(Set dst (EncodeP src));
8140   effect(KILL cr);
8141   ins_cost(INSN_COST * 3);
8142   format %{ "encode_heap_oop $dst, $src" %}
8143   ins_encode %{
8144     Register s = $src$$Register;
8145     Register d = $dst$$Register;
8146     __ encode_heap_oop(d, s);
8147   %}
8148   ins_pipe(ialu_reg);
8149 %}
8150 
8151 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8152   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
8153   match(Set dst (EncodeP src));
8154   ins_cost(INSN_COST * 3);
8155   format %{ "encode_heap_oop_not_null $dst, $src" %}
8156   ins_encode %{
8157     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
8158   %}
8159   ins_pipe(ialu_reg);
8160 %}
8161 
8162 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8163   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
8164             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
8165   match(Set dst (DecodeN src));
8166   ins_cost(INSN_COST * 3);
8167   format %{ "decode_heap_oop $dst, $src" %}
8168   ins_encode %{
8169     Register s = $src$$Register;
8170     Register d = $dst$$Register;
8171     __ decode_heap_oop(d, s);
8172   %}
8173   ins_pipe(ialu_reg);
8174 %}
8175 
8176 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8177   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
8178             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
8179   match(Set dst (DecodeN src));
8180   ins_cost(INSN_COST * 3);
8181   format %{ "decode_heap_oop_not_null $dst, $src" %}
8182   ins_encode %{
8183     Register s = $src$$Register;
8184     Register d = $dst$$Register;
8185     __ decode_heap_oop_not_null(d, s);
8186   %}
8187   ins_pipe(ialu_reg);
8188 %}
8189 
8190 // n.b. AArch64 implementations of encode_klass_not_null and
8191 // decode_klass_not_null do not modify the flags register so, unlike
8192 // Intel, we don't kill CR as a side effect here
8193 
8194 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
8195   match(Set dst (EncodePKlass src));
8196 
8197   ins_cost(INSN_COST * 3);
8198   format %{ "encode_klass_not_null $dst,$src" %}
8199 
8200   ins_encode %{
8201     Register src_reg = as_Register($src$$reg);
8202     Register dst_reg = as_Register($dst$$reg);
8203     __ encode_klass_not_null(dst_reg, src_reg);
8204   %}
8205 
8206    ins_pipe(ialu_reg);
8207 %}
8208 
8209 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
8210   match(Set dst (DecodeNKlass src));
8211 
8212   ins_cost(INSN_COST * 3);
8213   format %{ "decode_klass_not_null $dst,$src" %}
8214 
8215   ins_encode %{
8216     Register src_reg = as_Register($src$$reg);
8217     Register dst_reg = as_Register($dst$$reg);
8218     if (dst_reg != src_reg) {
8219       __ decode_klass_not_null(dst_reg, src_reg);
8220     } else {
8221       __ decode_klass_not_null(dst_reg);
8222     }
8223   %}
8224 
8225    ins_pipe(ialu_reg);
8226 %}
8227 
8228 instruct checkCastPP(iRegPNoSp dst)
8229 %{
8230   match(Set dst (CheckCastPP dst));
8231 
8232   size(0);
8233   format %{ "# checkcastPP of $dst" %}
8234   ins_encode(/* empty encoding */);
8235   ins_pipe(pipe_class_empty);
8236 %}
8237 
8238 instruct castPP(iRegPNoSp dst)
8239 %{
8240   match(Set dst (CastPP dst));
8241 
8242   size(0);
8243   format %{ "# castPP of $dst" %}
8244   ins_encode(/* empty encoding */);
8245   ins_pipe(pipe_class_empty);
8246 %}
8247 
8248 instruct castII(iRegI dst)
8249 %{
8250   match(Set dst (CastII dst));
8251 
8252   size(0);
8253   format %{ "# castII of $dst" %}
8254   ins_encode(/* empty encoding */);
8255   ins_cost(0);
8256   ins_pipe(pipe_class_empty);
8257 %}
8258 
8259 // ============================================================================
8260 // Atomic operation instructions
8261 //
8262 // Intel and SPARC both implement Ideal Node LoadPLocked and
8263 // Store{PIL}Conditional instructions using a normal load for the
8264 // LoadPLocked and a CAS for the Store{PIL}Conditional.
8265 //
8266 // The ideal code appears only to use LoadPLocked/StorePLocked as a
8267 // pair to lock object allocations from Eden space when not using
8268 // TLABs.
8269 //
8270 // There does not appear to be a Load{IL}Locked Ideal Node and the
8271 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
8272 // and to use StoreIConditional only for 32-bit and StoreLConditional
8273 // only for 64-bit.
8274 //
8275 // We implement LoadPLocked and StorePLocked instructions using,
8276 // respectively the AArch64 hw load-exclusive and store-conditional
8277 // instructions. Whereas we must implement each of
8278 // Store{IL}Conditional using a CAS which employs a pair of
8279 // instructions comprising a load-exclusive followed by a
8280 // store-conditional.
8281 
8282 
8283 // Locked-load (linked load) of the current heap-top
8284 // used when updating the eden heap top
8285 // implemented using ldaxr on AArch64
8286 
8287 instruct loadPLocked(iRegPNoSp dst, indirect mem)
8288 %{
8289   match(Set dst (LoadPLocked mem));
8290 
8291   ins_cost(VOLATILE_REF_COST);
8292 
8293   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
8294 
8295   ins_encode(aarch64_enc_ldaxr(dst, mem));
8296 
8297   ins_pipe(pipe_serial);
8298 %}
8299 
8300 // Conditional-store of the updated heap-top.
8301 // Used during allocation of the shared heap.
8302 // Sets flag (EQ) on success.
8303 // implemented using stlxr on AArch64.
8304 
8305 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
8306 %{
8307   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
8308 
8309   ins_cost(VOLATILE_REF_COST);
8310 
8311  // TODO
8312  // do we need to do a store-conditional release or can we just use a
8313  // plain store-conditional?
8314 
8315   format %{
8316     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
8317     "cmpw rscratch1, zr\t# EQ on successful write"
8318   %}
8319 
8320   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
8321 
8322   ins_pipe(pipe_serial);
8323 %}
8324 
8325 
8326 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
8327 // when attempting to rebias a lock towards the current thread.  We
8328 // must use the acquire form of cmpxchg in order to guarantee acquire
8329 // semantics in this case.
8330 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
8331 %{
8332   match(Set cr (StoreLConditional mem (Binary oldval newval)));
8333 
8334   ins_cost(VOLATILE_REF_COST);
8335 
8336   format %{
8337     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8338     "cmpw rscratch1, zr\t# EQ on successful write"
8339   %}
8340 
8341   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
8342 
8343   ins_pipe(pipe_slow);
8344 %}
8345 
8346 // storeIConditional also has acquire semantics, for no better reason
8347 // than matching storeLConditional.  At the time of writing this
8348 // comment storeIConditional was not used anywhere by AArch64.
8349 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
8350 %{
8351   match(Set cr (StoreIConditional mem (Binary oldval newval)));
8352 
8353   ins_cost(VOLATILE_REF_COST);
8354 
8355   format %{
8356     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8357     "cmpw rscratch1, zr\t# EQ on successful write"
8358   %}
8359 
8360   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
8361 
8362   ins_pipe(pipe_slow);
8363 %}
8364 
8365 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
8366 // can't match them
8367 
8368 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8369 
8370   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8371 
8372   effect(KILL cr);
8373 
8374  format %{
8375     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8376     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8377  %}
8378 
8379  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8380             aarch64_enc_cset_eq(res));
8381 
8382   ins_pipe(pipe_slow);
8383 %}
8384 
8385 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8386 
8387   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8388 
8389   effect(KILL cr);
8390 
8391  format %{
8392     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8393     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8394  %}
8395 
8396  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8397             aarch64_enc_cset_eq(res));
8398 
8399   ins_pipe(pipe_slow);
8400 %}
8401 
8402 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8403 
8404   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8405 
8406   effect(KILL cr);
8407 
8408  format %{
8409     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8410     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8411  %}
8412 
8413  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8414             aarch64_enc_cset_eq(res));
8415 
8416   ins_pipe(pipe_slow);
8417 %}
8418 
8419 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8420 
8421   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8422 
8423   effect(KILL cr);
8424 
8425  format %{
8426     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8427     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8428  %}
8429 
8430  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8431             aarch64_enc_cset_eq(res));
8432 
8433   ins_pipe(pipe_slow);
8434 %}
8435 
8436 
8437 instruct get_and_setI(indirect mem, iRegINoSp newv, iRegI prev) %{
8438   match(Set prev (GetAndSetI mem newv));
8439   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
8440   ins_encode %{
8441     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8442   %}
8443   ins_pipe(pipe_serial);
8444 %}
8445 
8446 instruct get_and_setL(indirect mem, iRegLNoSp newv, iRegL prev) %{
8447   match(Set prev (GetAndSetL mem newv));
8448   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
8449   ins_encode %{
8450     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
8451   %}
8452   ins_pipe(pipe_serial);
8453 %}
8454 
8455 instruct get_and_setN(indirect mem, iRegNNoSp newv, iRegI prev) %{
8456   match(Set prev (GetAndSetN mem newv));
8457   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
8458   ins_encode %{
8459     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8460   %}
8461   ins_pipe(pipe_serial);
8462 %}
8463 
8464 instruct get_and_setP(indirect mem, iRegPNoSp newv, iRegP prev) %{
8465   match(Set prev (GetAndSetP mem newv));
8466   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
8467   ins_encode %{
8468     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
8469   %}
8470   ins_pipe(pipe_serial);
8471 %}
8472 
8473 
8474 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
8475   match(Set newval (GetAndAddL mem incr));
8476   ins_cost(INSN_COST * 10);
8477   format %{ "get_and_addL $newval, [$mem], $incr" %}
8478   ins_encode %{
8479     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
8480   %}
8481   ins_pipe(pipe_serial);
8482 %}
8483 
8484 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
8485   predicate(n->as_LoadStore()->result_not_used());
8486   match(Set dummy (GetAndAddL mem incr));
8487   ins_cost(INSN_COST * 9);
8488   format %{ "get_and_addL [$mem], $incr" %}
8489   ins_encode %{
8490     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
8491   %}
8492   ins_pipe(pipe_serial);
8493 %}
8494 
8495 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
8496   match(Set newval (GetAndAddL mem incr));
8497   ins_cost(INSN_COST * 10);
8498   format %{ "get_and_addL $newval, [$mem], $incr" %}
8499   ins_encode %{
8500     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
8501   %}
8502   ins_pipe(pipe_serial);
8503 %}
8504 
8505 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
8506   predicate(n->as_LoadStore()->result_not_used());
8507   match(Set dummy (GetAndAddL mem incr));
8508   ins_cost(INSN_COST * 9);
8509   format %{ "get_and_addL [$mem], $incr" %}
8510   ins_encode %{
8511     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
8512   %}
8513   ins_pipe(pipe_serial);
8514 %}
8515 
8516 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
8517   match(Set newval (GetAndAddI mem incr));
8518   ins_cost(INSN_COST * 10);
8519   format %{ "get_and_addI $newval, [$mem], $incr" %}
8520   ins_encode %{
8521     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
8522   %}
8523   ins_pipe(pipe_serial);
8524 %}
8525 
8526 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
8527   predicate(n->as_LoadStore()->result_not_used());
8528   match(Set dummy (GetAndAddI mem incr));
8529   ins_cost(INSN_COST * 9);
8530   format %{ "get_and_addI [$mem], $incr" %}
8531   ins_encode %{
8532     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
8533   %}
8534   ins_pipe(pipe_serial);
8535 %}
8536 
8537 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
8538   match(Set newval (GetAndAddI mem incr));
8539   ins_cost(INSN_COST * 10);
8540   format %{ "get_and_addI $newval, [$mem], $incr" %}
8541   ins_encode %{
8542     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
8543   %}
8544   ins_pipe(pipe_serial);
8545 %}
8546 
8547 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
8548   predicate(n->as_LoadStore()->result_not_used());
8549   match(Set dummy (GetAndAddI mem incr));
8550   ins_cost(INSN_COST * 9);
8551   format %{ "get_and_addI [$mem], $incr" %}
8552   ins_encode %{
8553     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
8554   %}
8555   ins_pipe(pipe_serial);
8556 %}
8557 
8558 // Manifest a CmpL result in an integer register.
8559 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
8560 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
8561 %{
8562   match(Set dst (CmpL3 src1 src2));
8563   effect(KILL flags);
8564 
8565   ins_cost(INSN_COST * 6);
8566   format %{
8567       "cmp $src1, $src2"
8568       "csetw $dst, ne"
8569       "cnegw $dst, lt"
8570   %}
8571   // format %{ "CmpL3 $dst, $src1, $src2" %}
8572   ins_encode %{
8573     __ cmp($src1$$Register, $src2$$Register);
8574     __ csetw($dst$$Register, Assembler::NE);
8575     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
8576   %}
8577 
8578   ins_pipe(pipe_class_default);
8579 %}
8580 
8581 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
8582 %{
8583   match(Set dst (CmpL3 src1 src2));
8584   effect(KILL flags);
8585 
8586   ins_cost(INSN_COST * 6);
8587   format %{
8588       "cmp $src1, $src2"
8589       "csetw $dst, ne"
8590       "cnegw $dst, lt"
8591   %}
8592   ins_encode %{
8593     int32_t con = (int32_t)$src2$$constant;
8594      if (con < 0) {
8595       __ adds(zr, $src1$$Register, -con);
8596     } else {
8597       __ subs(zr, $src1$$Register, con);
8598     }
8599     __ csetw($dst$$Register, Assembler::NE);
8600     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
8601   %}
8602 
8603   ins_pipe(pipe_class_default);
8604 %}
8605 
8606 // ============================================================================
8607 // Conditional Move Instructions
8608 
8609 // n.b. we have identical rules for both a signed compare op (cmpOp)
8610 // and an unsigned compare op (cmpOpU). it would be nice if we could
8611 // define an op class which merged both inputs and use it to type the
8612 // argument to a single rule. unfortunatelyt his fails because the
8613 // opclass does not live up to the COND_INTER interface of its
8614 // component operands. When the generic code tries to negate the
8615 // operand it ends up running the generci Machoper::negate method
8616 // which throws a ShouldNotHappen. So, we have to provide two flavours
8617 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
8618 
8619 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8620   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
8621 
8622   ins_cost(INSN_COST * 2);
8623   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
8624 
8625   ins_encode %{
8626     __ cselw(as_Register($dst$$reg),
8627              as_Register($src2$$reg),
8628              as_Register($src1$$reg),
8629              (Assembler::Condition)$cmp$$cmpcode);
8630   %}
8631 
8632   ins_pipe(icond_reg_reg);
8633 %}
8634 
8635 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8636   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
8637 
8638   ins_cost(INSN_COST * 2);
8639   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
8640 
8641   ins_encode %{
8642     __ cselw(as_Register($dst$$reg),
8643              as_Register($src2$$reg),
8644              as_Register($src1$$reg),
8645              (Assembler::Condition)$cmp$$cmpcode);
8646   %}
8647 
8648   ins_pipe(icond_reg_reg);
8649 %}
8650 
8651 // special cases where one arg is zero
8652 
8653 // n.b. this is selected in preference to the rule above because it
8654 // avoids loading constant 0 into a source register
8655 
8656 // TODO
8657 // we ought only to be able to cull one of these variants as the ideal
8658 // transforms ought always to order the zero consistently (to left/right?)
8659 
8660 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
8661   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
8662 
8663   ins_cost(INSN_COST * 2);
8664   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
8665 
8666   ins_encode %{
8667     __ cselw(as_Register($dst$$reg),
8668              as_Register($src$$reg),
8669              zr,
8670              (Assembler::Condition)$cmp$$cmpcode);
8671   %}
8672 
8673   ins_pipe(icond_reg);
8674 %}
8675 
8676 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
8677   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
8678 
8679   ins_cost(INSN_COST * 2);
8680   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
8681 
8682   ins_encode %{
8683     __ cselw(as_Register($dst$$reg),
8684              as_Register($src$$reg),
8685              zr,
8686              (Assembler::Condition)$cmp$$cmpcode);
8687   %}
8688 
8689   ins_pipe(icond_reg);
8690 %}
8691 
8692 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
8693   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
8694 
8695   ins_cost(INSN_COST * 2);
8696   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
8697 
8698   ins_encode %{
8699     __ cselw(as_Register($dst$$reg),
8700              zr,
8701              as_Register($src$$reg),
8702              (Assembler::Condition)$cmp$$cmpcode);
8703   %}
8704 
8705   ins_pipe(icond_reg);
8706 %}
8707 
8708 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
8709   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
8710 
8711   ins_cost(INSN_COST * 2);
8712   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
8713 
8714   ins_encode %{
8715     __ cselw(as_Register($dst$$reg),
8716              zr,
8717              as_Register($src$$reg),
8718              (Assembler::Condition)$cmp$$cmpcode);
8719   %}
8720 
8721   ins_pipe(icond_reg);
8722 %}
8723 
8724 // special case for creating a boolean 0 or 1
8725 
8726 // n.b. this is selected in preference to the rule above because it
8727 // avoids loading constants 0 and 1 into a source register
8728 
8729 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
8730   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
8731 
8732   ins_cost(INSN_COST * 2);
8733   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
8734 
8735   ins_encode %{
8736     // equivalently
8737     // cset(as_Register($dst$$reg),
8738     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
8739     __ csincw(as_Register($dst$$reg),
8740              zr,
8741              zr,
8742              (Assembler::Condition)$cmp$$cmpcode);
8743   %}
8744 
8745   ins_pipe(icond_none);
8746 %}
8747 
8748 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
8749   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
8750 
8751   ins_cost(INSN_COST * 2);
8752   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
8753 
8754   ins_encode %{
8755     // equivalently
8756     // cset(as_Register($dst$$reg),
8757     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
8758     __ csincw(as_Register($dst$$reg),
8759              zr,
8760              zr,
8761              (Assembler::Condition)$cmp$$cmpcode);
8762   %}
8763 
8764   ins_pipe(icond_none);
8765 %}
8766 
8767 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
8768   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
8769 
8770   ins_cost(INSN_COST * 2);
8771   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
8772 
8773   ins_encode %{
8774     __ csel(as_Register($dst$$reg),
8775             as_Register($src2$$reg),
8776             as_Register($src1$$reg),
8777             (Assembler::Condition)$cmp$$cmpcode);
8778   %}
8779 
8780   ins_pipe(icond_reg_reg);
8781 %}
8782 
8783 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
8784   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
8785 
8786   ins_cost(INSN_COST * 2);
8787   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
8788 
8789   ins_encode %{
8790     __ csel(as_Register($dst$$reg),
8791             as_Register($src2$$reg),
8792             as_Register($src1$$reg),
8793             (Assembler::Condition)$cmp$$cmpcode);
8794   %}
8795 
8796   ins_pipe(icond_reg_reg);
8797 %}
8798 
8799 // special cases where one arg is zero
8800 
8801 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
8802   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
8803 
8804   ins_cost(INSN_COST * 2);
8805   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
8806 
8807   ins_encode %{
8808     __ csel(as_Register($dst$$reg),
8809             zr,
8810             as_Register($src$$reg),
8811             (Assembler::Condition)$cmp$$cmpcode);
8812   %}
8813 
8814   ins_pipe(icond_reg);
8815 %}
8816 
8817 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
8818   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
8819 
8820   ins_cost(INSN_COST * 2);
8821   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
8822 
8823   ins_encode %{
8824     __ csel(as_Register($dst$$reg),
8825             zr,
8826             as_Register($src$$reg),
8827             (Assembler::Condition)$cmp$$cmpcode);
8828   %}
8829 
8830   ins_pipe(icond_reg);
8831 %}
8832 
8833 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
8834   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
8835 
8836   ins_cost(INSN_COST * 2);
8837   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
8838 
8839   ins_encode %{
8840     __ csel(as_Register($dst$$reg),
8841             as_Register($src$$reg),
8842             zr,
8843             (Assembler::Condition)$cmp$$cmpcode);
8844   %}
8845 
8846   ins_pipe(icond_reg);
8847 %}
8848 
8849 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
8850   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
8851 
8852   ins_cost(INSN_COST * 2);
8853   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
8854 
8855   ins_encode %{
8856     __ csel(as_Register($dst$$reg),
8857             as_Register($src$$reg),
8858             zr,
8859             (Assembler::Condition)$cmp$$cmpcode);
8860   %}
8861 
8862   ins_pipe(icond_reg);
8863 %}
8864 
8865 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
8866   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
8867 
8868   ins_cost(INSN_COST * 2);
8869   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
8870 
8871   ins_encode %{
8872     __ csel(as_Register($dst$$reg),
8873             as_Register($src2$$reg),
8874             as_Register($src1$$reg),
8875             (Assembler::Condition)$cmp$$cmpcode);
8876   %}
8877 
8878   ins_pipe(icond_reg_reg);
8879 %}
8880 
8881 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
8882   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
8883 
8884   ins_cost(INSN_COST * 2);
8885   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
8886 
8887   ins_encode %{
8888     __ csel(as_Register($dst$$reg),
8889             as_Register($src2$$reg),
8890             as_Register($src1$$reg),
8891             (Assembler::Condition)$cmp$$cmpcode);
8892   %}
8893 
8894   ins_pipe(icond_reg_reg);
8895 %}
8896 
8897 // special cases where one arg is zero
8898 
8899 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
8900   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
8901 
8902   ins_cost(INSN_COST * 2);
8903   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
8904 
8905   ins_encode %{
8906     __ csel(as_Register($dst$$reg),
8907             zr,
8908             as_Register($src$$reg),
8909             (Assembler::Condition)$cmp$$cmpcode);
8910   %}
8911 
8912   ins_pipe(icond_reg);
8913 %}
8914 
8915 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
8916   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
8917 
8918   ins_cost(INSN_COST * 2);
8919   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
8920 
8921   ins_encode %{
8922     __ csel(as_Register($dst$$reg),
8923             zr,
8924             as_Register($src$$reg),
8925             (Assembler::Condition)$cmp$$cmpcode);
8926   %}
8927 
8928   ins_pipe(icond_reg);
8929 %}
8930 
8931 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
8932   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
8933 
8934   ins_cost(INSN_COST * 2);
8935   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
8936 
8937   ins_encode %{
8938     __ csel(as_Register($dst$$reg),
8939             as_Register($src$$reg),
8940             zr,
8941             (Assembler::Condition)$cmp$$cmpcode);
8942   %}
8943 
8944   ins_pipe(icond_reg);
8945 %}
8946 
8947 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
8948   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
8949 
8950   ins_cost(INSN_COST * 2);
8951   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
8952 
8953   ins_encode %{
8954     __ csel(as_Register($dst$$reg),
8955             as_Register($src$$reg),
8956             zr,
8957             (Assembler::Condition)$cmp$$cmpcode);
8958   %}
8959 
8960   ins_pipe(icond_reg);
8961 %}
8962 
8963 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
8964   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
8965 
8966   ins_cost(INSN_COST * 2);
8967   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
8968 
8969   ins_encode %{
8970     __ cselw(as_Register($dst$$reg),
8971              as_Register($src2$$reg),
8972              as_Register($src1$$reg),
8973              (Assembler::Condition)$cmp$$cmpcode);
8974   %}
8975 
8976   ins_pipe(icond_reg_reg);
8977 %}
8978 
8979 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
8980   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
8981 
8982   ins_cost(INSN_COST * 2);
8983   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
8984 
8985   ins_encode %{
8986     __ cselw(as_Register($dst$$reg),
8987              as_Register($src2$$reg),
8988              as_Register($src1$$reg),
8989              (Assembler::Condition)$cmp$$cmpcode);
8990   %}
8991 
8992   ins_pipe(icond_reg_reg);
8993 %}
8994 
8995 // special cases where one arg is zero
8996 
8997 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
8998   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
8999 
9000   ins_cost(INSN_COST * 2);
9001   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
9002 
9003   ins_encode %{
9004     __ cselw(as_Register($dst$$reg),
9005              zr,
9006              as_Register($src$$reg),
9007              (Assembler::Condition)$cmp$$cmpcode);
9008   %}
9009 
9010   ins_pipe(icond_reg);
9011 %}
9012 
9013 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9014   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9015 
9016   ins_cost(INSN_COST * 2);
9017   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
9018 
9019   ins_encode %{
9020     __ cselw(as_Register($dst$$reg),
9021              zr,
9022              as_Register($src$$reg),
9023              (Assembler::Condition)$cmp$$cmpcode);
9024   %}
9025 
9026   ins_pipe(icond_reg);
9027 %}
9028 
9029 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9030   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9031 
9032   ins_cost(INSN_COST * 2);
9033   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
9034 
9035   ins_encode %{
9036     __ cselw(as_Register($dst$$reg),
9037              as_Register($src$$reg),
9038              zr,
9039              (Assembler::Condition)$cmp$$cmpcode);
9040   %}
9041 
9042   ins_pipe(icond_reg);
9043 %}
9044 
9045 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9046   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9047 
9048   ins_cost(INSN_COST * 2);
9049   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
9050 
9051   ins_encode %{
9052     __ cselw(as_Register($dst$$reg),
9053              as_Register($src$$reg),
9054              zr,
9055              (Assembler::Condition)$cmp$$cmpcode);
9056   %}
9057 
9058   ins_pipe(icond_reg);
9059 %}
9060 
9061 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
9062 %{
9063   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9064 
9065   ins_cost(INSN_COST * 3);
9066 
9067   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9068   ins_encode %{
9069     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9070     __ fcsels(as_FloatRegister($dst$$reg),
9071               as_FloatRegister($src2$$reg),
9072               as_FloatRegister($src1$$reg),
9073               cond);
9074   %}
9075 
9076   ins_pipe(pipe_class_default);
9077 %}
9078 
9079 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
9080 %{
9081   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9082 
9083   ins_cost(INSN_COST * 3);
9084 
9085   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9086   ins_encode %{
9087     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9088     __ fcsels(as_FloatRegister($dst$$reg),
9089               as_FloatRegister($src2$$reg),
9090               as_FloatRegister($src1$$reg),
9091               cond);
9092   %}
9093 
9094   ins_pipe(pipe_class_default);
9095 %}
9096 
9097 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
9098 %{
9099   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9100 
9101   ins_cost(INSN_COST * 3);
9102 
9103   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9104   ins_encode %{
9105     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9106     __ fcseld(as_FloatRegister($dst$$reg),
9107               as_FloatRegister($src2$$reg),
9108               as_FloatRegister($src1$$reg),
9109               cond);
9110   %}
9111 
9112   ins_pipe(pipe_class_default);
9113 %}
9114 
9115 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
9116 %{
9117   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9118 
9119   ins_cost(INSN_COST * 3);
9120 
9121   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9122   ins_encode %{
9123     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9124     __ fcseld(as_FloatRegister($dst$$reg),
9125               as_FloatRegister($src2$$reg),
9126               as_FloatRegister($src1$$reg),
9127               cond);
9128   %}
9129 
9130   ins_pipe(pipe_class_default);
9131 %}
9132 
9133 // ============================================================================
9134 // Arithmetic Instructions
9135 //
9136 
9137 // Integer Addition
9138 
9139 // TODO
9140 // these currently employ operations which do not set CR and hence are
9141 // not flagged as killing CR but we would like to isolate the cases
9142 // where we want to set flags from those where we don't. need to work
9143 // out how to do that.
9144 
9145 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9146   match(Set dst (AddI src1 src2));
9147 
9148   ins_cost(INSN_COST);
9149   format %{ "addw  $dst, $src1, $src2" %}
9150 
9151   ins_encode %{
9152     __ addw(as_Register($dst$$reg),
9153             as_Register($src1$$reg),
9154             as_Register($src2$$reg));
9155   %}
9156 
9157   ins_pipe(ialu_reg_reg);
9158 %}
9159 
9160 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9161   match(Set dst (AddI src1 src2));
9162 
9163   ins_cost(INSN_COST);
9164   format %{ "addw $dst, $src1, $src2" %}
9165 
9166   // use opcode to indicate that this is an add not a sub
9167   opcode(0x0);
9168 
9169   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9170 
9171   ins_pipe(ialu_reg_imm);
9172 %}
9173 
9174 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
9175   match(Set dst (AddI (ConvL2I src1) src2));
9176 
9177   ins_cost(INSN_COST);
9178   format %{ "addw $dst, $src1, $src2" %}
9179 
9180   // use opcode to indicate that this is an add not a sub
9181   opcode(0x0);
9182 
9183   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9184 
9185   ins_pipe(ialu_reg_imm);
9186 %}
9187 
9188 // Pointer Addition
9189 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
9190   match(Set dst (AddP src1 src2));
9191 
9192   ins_cost(INSN_COST);
9193   format %{ "add $dst, $src1, $src2\t# ptr" %}
9194 
9195   ins_encode %{
9196     __ add(as_Register($dst$$reg),
9197            as_Register($src1$$reg),
9198            as_Register($src2$$reg));
9199   %}
9200 
9201   ins_pipe(ialu_reg_reg);
9202 %}
9203 
9204 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
9205   match(Set dst (AddP src1 (ConvI2L src2)));
9206 
9207   ins_cost(1.9 * INSN_COST);
9208   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
9209 
9210   ins_encode %{
9211     __ add(as_Register($dst$$reg),
9212            as_Register($src1$$reg),
9213            as_Register($src2$$reg), ext::sxtw);
9214   %}
9215 
9216   ins_pipe(ialu_reg_reg);
9217 %}
9218 
9219 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
9220   match(Set dst (AddP src1 (LShiftL src2 scale)));
9221 
9222   ins_cost(1.9 * INSN_COST);
9223   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
9224 
9225   ins_encode %{
9226     __ lea(as_Register($dst$$reg),
9227            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9228                    Address::lsl($scale$$constant)));
9229   %}
9230 
9231   ins_pipe(ialu_reg_reg_shift);
9232 %}
9233 
9234 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
9235   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
9236 
9237   ins_cost(1.9 * INSN_COST);
9238   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
9239 
9240   ins_encode %{
9241     __ lea(as_Register($dst$$reg),
9242            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9243                    Address::sxtw($scale$$constant)));
9244   %}
9245 
9246   ins_pipe(ialu_reg_reg_shift);
9247 %}
9248 
9249 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
9250   match(Set dst (LShiftL (ConvI2L src) scale));
9251 
9252   ins_cost(INSN_COST);
9253   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
9254 
9255   ins_encode %{
9256     __ sbfiz(as_Register($dst$$reg),
9257           as_Register($src$$reg),
9258           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
9259   %}
9260 
9261   ins_pipe(ialu_reg_shift);
9262 %}
9263 
9264 // Pointer Immediate Addition
9265 // n.b. this needs to be more expensive than using an indirect memory
9266 // operand
9267 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
9268   match(Set dst (AddP src1 src2));
9269 
9270   ins_cost(INSN_COST);
9271   format %{ "add $dst, $src1, $src2\t# ptr" %}
9272 
9273   // use opcode to indicate that this is an add not a sub
9274   opcode(0x0);
9275 
9276   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9277 
9278   ins_pipe(ialu_reg_imm);
9279 %}
9280 
9281 // Long Addition
9282 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9283 
9284   match(Set dst (AddL src1 src2));
9285 
9286   ins_cost(INSN_COST);
9287   format %{ "add  $dst, $src1, $src2" %}
9288 
9289   ins_encode %{
9290     __ add(as_Register($dst$$reg),
9291            as_Register($src1$$reg),
9292            as_Register($src2$$reg));
9293   %}
9294 
9295   ins_pipe(ialu_reg_reg);
9296 %}
9297 
9298 // No constant pool entries requiredLong Immediate Addition.
9299 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
9300   match(Set dst (AddL src1 src2));
9301 
9302   ins_cost(INSN_COST);
9303   format %{ "add $dst, $src1, $src2" %}
9304 
9305   // use opcode to indicate that this is an add not a sub
9306   opcode(0x0);
9307 
9308   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9309 
9310   ins_pipe(ialu_reg_imm);
9311 %}
9312 
9313 // Integer Subtraction
9314 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9315   match(Set dst (SubI src1 src2));
9316 
9317   ins_cost(INSN_COST);
9318   format %{ "subw  $dst, $src1, $src2" %}
9319 
9320   ins_encode %{
9321     __ subw(as_Register($dst$$reg),
9322             as_Register($src1$$reg),
9323             as_Register($src2$$reg));
9324   %}
9325 
9326   ins_pipe(ialu_reg_reg);
9327 %}
9328 
9329 // Immediate Subtraction
9330 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9331   match(Set dst (SubI src1 src2));
9332 
9333   ins_cost(INSN_COST);
9334   format %{ "subw $dst, $src1, $src2" %}
9335 
9336   // use opcode to indicate that this is a sub not an add
9337   opcode(0x1);
9338 
9339   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9340 
9341   ins_pipe(ialu_reg_imm);
9342 %}
9343 
9344 // Long Subtraction
9345 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9346 
9347   match(Set dst (SubL src1 src2));
9348 
9349   ins_cost(INSN_COST);
9350   format %{ "sub  $dst, $src1, $src2" %}
9351 
9352   ins_encode %{
9353     __ sub(as_Register($dst$$reg),
9354            as_Register($src1$$reg),
9355            as_Register($src2$$reg));
9356   %}
9357 
9358   ins_pipe(ialu_reg_reg);
9359 %}
9360 
9361 // No constant pool entries requiredLong Immediate Subtraction.
9362 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
9363   match(Set dst (SubL src1 src2));
9364 
9365   ins_cost(INSN_COST);
9366   format %{ "sub$dst, $src1, $src2" %}
9367 
9368   // use opcode to indicate that this is a sub not an add
9369   opcode(0x1);
9370 
9371   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9372 
9373   ins_pipe(ialu_reg_imm);
9374 %}
9375 
9376 // Integer Negation (special case for sub)
9377 
9378 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
9379   match(Set dst (SubI zero src));
9380 
9381   ins_cost(INSN_COST);
9382   format %{ "negw $dst, $src\t# int" %}
9383 
9384   ins_encode %{
9385     __ negw(as_Register($dst$$reg),
9386             as_Register($src$$reg));
9387   %}
9388 
9389   ins_pipe(ialu_reg);
9390 %}
9391 
9392 // Long Negation
9393 
9394 instruct negL_reg(iRegLNoSp dst, iRegIorL2I src, immL0 zero, rFlagsReg cr) %{
9395   match(Set dst (SubL zero src));
9396 
9397   ins_cost(INSN_COST);
9398   format %{ "neg $dst, $src\t# long" %}
9399 
9400   ins_encode %{
9401     __ neg(as_Register($dst$$reg),
9402            as_Register($src$$reg));
9403   %}
9404 
9405   ins_pipe(ialu_reg);
9406 %}
9407 
9408 // Integer Multiply
9409 
9410 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9411   match(Set dst (MulI src1 src2));
9412 
9413   ins_cost(INSN_COST * 3);
9414   format %{ "mulw  $dst, $src1, $src2" %}
9415 
9416   ins_encode %{
9417     __ mulw(as_Register($dst$$reg),
9418             as_Register($src1$$reg),
9419             as_Register($src2$$reg));
9420   %}
9421 
9422   ins_pipe(imul_reg_reg);
9423 %}
9424 
9425 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9426   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
9427 
9428   ins_cost(INSN_COST * 3);
9429   format %{ "smull  $dst, $src1, $src2" %}
9430 
9431   ins_encode %{
9432     __ smull(as_Register($dst$$reg),
9433              as_Register($src1$$reg),
9434              as_Register($src2$$reg));
9435   %}
9436 
9437   ins_pipe(imul_reg_reg);
9438 %}
9439 
9440 // Long Multiply
9441 
9442 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9443   match(Set dst (MulL src1 src2));
9444 
9445   ins_cost(INSN_COST * 5);
9446   format %{ "mul  $dst, $src1, $src2" %}
9447 
9448   ins_encode %{
9449     __ mul(as_Register($dst$$reg),
9450            as_Register($src1$$reg),
9451            as_Register($src2$$reg));
9452   %}
9453 
9454   ins_pipe(lmul_reg_reg);
9455 %}
9456 
9457 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
9458 %{
9459   match(Set dst (MulHiL src1 src2));
9460 
9461   ins_cost(INSN_COST * 7);
9462   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
9463 
9464   ins_encode %{
9465     __ smulh(as_Register($dst$$reg),
9466              as_Register($src1$$reg),
9467              as_Register($src2$$reg));
9468   %}
9469 
9470   ins_pipe(lmul_reg_reg);
9471 %}
9472 
9473 // Combined Integer Multiply & Add/Sub
9474 
9475 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
9476   match(Set dst (AddI src3 (MulI src1 src2)));
9477 
9478   ins_cost(INSN_COST * 3);
9479   format %{ "madd  $dst, $src1, $src2, $src3" %}
9480 
9481   ins_encode %{
9482     __ maddw(as_Register($dst$$reg),
9483              as_Register($src1$$reg),
9484              as_Register($src2$$reg),
9485              as_Register($src3$$reg));
9486   %}
9487 
9488   ins_pipe(imac_reg_reg);
9489 %}
9490 
9491 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
9492   match(Set dst (SubI src3 (MulI src1 src2)));
9493 
9494   ins_cost(INSN_COST * 3);
9495   format %{ "msub  $dst, $src1, $src2, $src3" %}
9496 
9497   ins_encode %{
9498     __ msubw(as_Register($dst$$reg),
9499              as_Register($src1$$reg),
9500              as_Register($src2$$reg),
9501              as_Register($src3$$reg));
9502   %}
9503 
9504   ins_pipe(imac_reg_reg);
9505 %}
9506 
9507 // Combined Long Multiply & Add/Sub
9508 
9509 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
9510   match(Set dst (AddL src3 (MulL src1 src2)));
9511 
9512   ins_cost(INSN_COST * 5);
9513   format %{ "madd  $dst, $src1, $src2, $src3" %}
9514 
9515   ins_encode %{
9516     __ madd(as_Register($dst$$reg),
9517             as_Register($src1$$reg),
9518             as_Register($src2$$reg),
9519             as_Register($src3$$reg));
9520   %}
9521 
9522   ins_pipe(lmac_reg_reg);
9523 %}
9524 
9525 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
9526   match(Set dst (SubL src3 (MulL src1 src2)));
9527 
9528   ins_cost(INSN_COST * 5);
9529   format %{ "msub  $dst, $src1, $src2, $src3" %}
9530 
9531   ins_encode %{
9532     __ msub(as_Register($dst$$reg),
9533             as_Register($src1$$reg),
9534             as_Register($src2$$reg),
9535             as_Register($src3$$reg));
9536   %}
9537 
9538   ins_pipe(lmac_reg_reg);
9539 %}
9540 
9541 // Integer Divide
9542 
9543 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9544   match(Set dst (DivI src1 src2));
9545 
9546   ins_cost(INSN_COST * 19);
9547   format %{ "sdivw  $dst, $src1, $src2" %}
9548 
9549   ins_encode(aarch64_enc_divw(dst, src1, src2));
9550   ins_pipe(idiv_reg_reg);
9551 %}
9552 
9553 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
9554   match(Set dst (URShiftI (RShiftI src1 div1) div2));
9555   ins_cost(INSN_COST);
9556   format %{ "lsrw $dst, $src1, $div1" %}
9557   ins_encode %{
9558     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
9559   %}
9560   ins_pipe(ialu_reg_shift);
9561 %}
9562 
9563 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
9564   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
9565   ins_cost(INSN_COST);
9566   format %{ "addw $dst, $src, LSR $div1" %}
9567 
9568   ins_encode %{
9569     __ addw(as_Register($dst$$reg),
9570               as_Register($src$$reg),
9571               as_Register($src$$reg),
9572               Assembler::LSR, 31);
9573   %}
9574   ins_pipe(ialu_reg);
9575 %}
9576 
9577 // Long Divide
9578 
9579 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9580   match(Set dst (DivL src1 src2));
9581 
9582   ins_cost(INSN_COST * 35);
9583   format %{ "sdiv   $dst, $src1, $src2" %}
9584 
9585   ins_encode(aarch64_enc_div(dst, src1, src2));
9586   ins_pipe(ldiv_reg_reg);
9587 %}
9588 
9589 instruct signExtractL(iRegLNoSp dst, iRegL src1, immL_63 div1, immL_63 div2) %{
9590   match(Set dst (URShiftL (RShiftL src1 div1) div2));
9591   ins_cost(INSN_COST);
9592   format %{ "lsr $dst, $src1, $div1" %}
9593   ins_encode %{
9594     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
9595   %}
9596   ins_pipe(ialu_reg_shift);
9597 %}
9598 
9599 instruct div2RoundL(iRegLNoSp dst, iRegL src, immL_63 div1, immL_63 div2) %{
9600   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
9601   ins_cost(INSN_COST);
9602   format %{ "add $dst, $src, $div1" %}
9603 
9604   ins_encode %{
9605     __ add(as_Register($dst$$reg),
9606               as_Register($src$$reg),
9607               as_Register($src$$reg),
9608               Assembler::LSR, 63);
9609   %}
9610   ins_pipe(ialu_reg);
9611 %}
9612 
9613 // Integer Remainder
9614 
9615 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9616   match(Set dst (ModI src1 src2));
9617 
9618   ins_cost(INSN_COST * 22);
9619   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
9620             "msubw($dst, rscratch1, $src2, $src1" %}
9621 
9622   ins_encode(aarch64_enc_modw(dst, src1, src2));
9623   ins_pipe(idiv_reg_reg);
9624 %}
9625 
9626 // Long Remainder
9627 
9628 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9629   match(Set dst (ModL src1 src2));
9630 
9631   ins_cost(INSN_COST * 38);
9632   format %{ "sdiv   rscratch1, $src1, $src2\n"
9633             "msub($dst, rscratch1, $src2, $src1" %}
9634 
9635   ins_encode(aarch64_enc_mod(dst, src1, src2));
9636   ins_pipe(ldiv_reg_reg);
9637 %}
9638 
9639 // Integer Shifts
9640 
9641 // Shift Left Register
9642 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9643   match(Set dst (LShiftI src1 src2));
9644 
9645   ins_cost(INSN_COST * 2);
9646   format %{ "lslvw  $dst, $src1, $src2" %}
9647 
9648   ins_encode %{
9649     __ lslvw(as_Register($dst$$reg),
9650              as_Register($src1$$reg),
9651              as_Register($src2$$reg));
9652   %}
9653 
9654   ins_pipe(ialu_reg_reg_vshift);
9655 %}
9656 
9657 // Shift Left Immediate
9658 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
9659   match(Set dst (LShiftI src1 src2));
9660 
9661   ins_cost(INSN_COST);
9662   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
9663 
9664   ins_encode %{
9665     __ lslw(as_Register($dst$$reg),
9666             as_Register($src1$$reg),
9667             $src2$$constant & 0x1f);
9668   %}
9669 
9670   ins_pipe(ialu_reg_shift);
9671 %}
9672 
9673 // Shift Right Logical Register
9674 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9675   match(Set dst (URShiftI src1 src2));
9676 
9677   ins_cost(INSN_COST * 2);
9678   format %{ "lsrvw  $dst, $src1, $src2" %}
9679 
9680   ins_encode %{
9681     __ lsrvw(as_Register($dst$$reg),
9682              as_Register($src1$$reg),
9683              as_Register($src2$$reg));
9684   %}
9685 
9686   ins_pipe(ialu_reg_reg_vshift);
9687 %}
9688 
9689 // Shift Right Logical Immediate
9690 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
9691   match(Set dst (URShiftI src1 src2));
9692 
9693   ins_cost(INSN_COST);
9694   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
9695 
9696   ins_encode %{
9697     __ lsrw(as_Register($dst$$reg),
9698             as_Register($src1$$reg),
9699             $src2$$constant & 0x1f);
9700   %}
9701 
9702   ins_pipe(ialu_reg_shift);
9703 %}
9704 
9705 // Shift Right Arithmetic Register
9706 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9707   match(Set dst (RShiftI src1 src2));
9708 
9709   ins_cost(INSN_COST * 2);
9710   format %{ "asrvw  $dst, $src1, $src2" %}
9711 
9712   ins_encode %{
9713     __ asrvw(as_Register($dst$$reg),
9714              as_Register($src1$$reg),
9715              as_Register($src2$$reg));
9716   %}
9717 
9718   ins_pipe(ialu_reg_reg_vshift);
9719 %}
9720 
9721 // Shift Right Arithmetic Immediate
9722 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
9723   match(Set dst (RShiftI src1 src2));
9724 
9725   ins_cost(INSN_COST);
9726   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
9727 
9728   ins_encode %{
9729     __ asrw(as_Register($dst$$reg),
9730             as_Register($src1$$reg),
9731             $src2$$constant & 0x1f);
9732   %}
9733 
9734   ins_pipe(ialu_reg_shift);
9735 %}
9736 
9737 // Combined Int Mask and Right Shift (using UBFM)
9738 // TODO
9739 
9740 // Long Shifts
9741 
9742 // Shift Left Register
9743 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
9744   match(Set dst (LShiftL src1 src2));
9745 
9746   ins_cost(INSN_COST * 2);
9747   format %{ "lslv  $dst, $src1, $src2" %}
9748 
9749   ins_encode %{
9750     __ lslv(as_Register($dst$$reg),
9751             as_Register($src1$$reg),
9752             as_Register($src2$$reg));
9753   %}
9754 
9755   ins_pipe(ialu_reg_reg_vshift);
9756 %}
9757 
9758 // Shift Left Immediate
9759 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
9760   match(Set dst (LShiftL src1 src2));
9761 
9762   ins_cost(INSN_COST);
9763   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
9764 
9765   ins_encode %{
9766     __ lsl(as_Register($dst$$reg),
9767             as_Register($src1$$reg),
9768             $src2$$constant & 0x3f);
9769   %}
9770 
9771   ins_pipe(ialu_reg_shift);
9772 %}
9773 
9774 // Shift Right Logical Register
9775 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
9776   match(Set dst (URShiftL src1 src2));
9777 
9778   ins_cost(INSN_COST * 2);
9779   format %{ "lsrv  $dst, $src1, $src2" %}
9780 
9781   ins_encode %{
9782     __ lsrv(as_Register($dst$$reg),
9783             as_Register($src1$$reg),
9784             as_Register($src2$$reg));
9785   %}
9786 
9787   ins_pipe(ialu_reg_reg_vshift);
9788 %}
9789 
9790 // Shift Right Logical Immediate
9791 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
9792   match(Set dst (URShiftL src1 src2));
9793 
9794   ins_cost(INSN_COST);
9795   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
9796 
9797   ins_encode %{
9798     __ lsr(as_Register($dst$$reg),
9799            as_Register($src1$$reg),
9800            $src2$$constant & 0x3f);
9801   %}
9802 
9803   ins_pipe(ialu_reg_shift);
9804 %}
9805 
9806 // A special-case pattern for card table stores.
9807 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
9808   match(Set dst (URShiftL (CastP2X src1) src2));
9809 
9810   ins_cost(INSN_COST);
9811   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
9812 
9813   ins_encode %{
9814     __ lsr(as_Register($dst$$reg),
9815            as_Register($src1$$reg),
9816            $src2$$constant & 0x3f);
9817   %}
9818 
9819   ins_pipe(ialu_reg_shift);
9820 %}
9821 
9822 // Shift Right Arithmetic Register
9823 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
9824   match(Set dst (RShiftL src1 src2));
9825 
9826   ins_cost(INSN_COST * 2);
9827   format %{ "asrv  $dst, $src1, $src2" %}
9828 
9829   ins_encode %{
9830     __ asrv(as_Register($dst$$reg),
9831             as_Register($src1$$reg),
9832             as_Register($src2$$reg));
9833   %}
9834 
9835   ins_pipe(ialu_reg_reg_vshift);
9836 %}
9837 
9838 // Shift Right Arithmetic Immediate
9839 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
9840   match(Set dst (RShiftL src1 src2));
9841 
9842   ins_cost(INSN_COST);
9843   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
9844 
9845   ins_encode %{
9846     __ asr(as_Register($dst$$reg),
9847            as_Register($src1$$reg),
9848            $src2$$constant & 0x3f);
9849   %}
9850 
9851   ins_pipe(ialu_reg_shift);
9852 %}
9853 
9854 // BEGIN This section of the file is automatically generated. Do not edit --------------
9855 
9856 instruct regL_not_reg(iRegLNoSp dst,
9857                          iRegL src1, immL_M1 m1,
9858                          rFlagsReg cr) %{
9859   match(Set dst (XorL src1 m1));
9860   ins_cost(INSN_COST);
9861   format %{ "eon  $dst, $src1, zr" %}
9862 
9863   ins_encode %{
9864     __ eon(as_Register($dst$$reg),
9865               as_Register($src1$$reg),
9866               zr,
9867               Assembler::LSL, 0);
9868   %}
9869 
9870   ins_pipe(ialu_reg);
9871 %}
9872 instruct regI_not_reg(iRegINoSp dst,
9873                          iRegIorL2I src1, immI_M1 m1,
9874                          rFlagsReg cr) %{
9875   match(Set dst (XorI src1 m1));
9876   ins_cost(INSN_COST);
9877   format %{ "eonw  $dst, $src1, zr" %}
9878 
9879   ins_encode %{
9880     __ eonw(as_Register($dst$$reg),
9881               as_Register($src1$$reg),
9882               zr,
9883               Assembler::LSL, 0);
9884   %}
9885 
9886   ins_pipe(ialu_reg);
9887 %}
9888 
9889 instruct AndI_reg_not_reg(iRegINoSp dst,
9890                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
9891                          rFlagsReg cr) %{
9892   match(Set dst (AndI src1 (XorI src2 m1)));
9893   ins_cost(INSN_COST);
9894   format %{ "bicw  $dst, $src1, $src2" %}
9895 
9896   ins_encode %{
9897     __ bicw(as_Register($dst$$reg),
9898               as_Register($src1$$reg),
9899               as_Register($src2$$reg),
9900               Assembler::LSL, 0);
9901   %}
9902 
9903   ins_pipe(ialu_reg_reg);
9904 %}
9905 
9906 instruct AndL_reg_not_reg(iRegLNoSp dst,
9907                          iRegL src1, iRegL src2, immL_M1 m1,
9908                          rFlagsReg cr) %{
9909   match(Set dst (AndL src1 (XorL src2 m1)));
9910   ins_cost(INSN_COST);
9911   format %{ "bic  $dst, $src1, $src2" %}
9912 
9913   ins_encode %{
9914     __ bic(as_Register($dst$$reg),
9915               as_Register($src1$$reg),
9916               as_Register($src2$$reg),
9917               Assembler::LSL, 0);
9918   %}
9919 
9920   ins_pipe(ialu_reg_reg);
9921 %}
9922 
9923 instruct OrI_reg_not_reg(iRegINoSp dst,
9924                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
9925                          rFlagsReg cr) %{
9926   match(Set dst (OrI src1 (XorI src2 m1)));
9927   ins_cost(INSN_COST);
9928   format %{ "ornw  $dst, $src1, $src2" %}
9929 
9930   ins_encode %{
9931     __ ornw(as_Register($dst$$reg),
9932               as_Register($src1$$reg),
9933               as_Register($src2$$reg),
9934               Assembler::LSL, 0);
9935   %}
9936 
9937   ins_pipe(ialu_reg_reg);
9938 %}
9939 
9940 instruct OrL_reg_not_reg(iRegLNoSp dst,
9941                          iRegL src1, iRegL src2, immL_M1 m1,
9942                          rFlagsReg cr) %{
9943   match(Set dst (OrL src1 (XorL src2 m1)));
9944   ins_cost(INSN_COST);
9945   format %{ "orn  $dst, $src1, $src2" %}
9946 
9947   ins_encode %{
9948     __ orn(as_Register($dst$$reg),
9949               as_Register($src1$$reg),
9950               as_Register($src2$$reg),
9951               Assembler::LSL, 0);
9952   %}
9953 
9954   ins_pipe(ialu_reg_reg);
9955 %}
9956 
9957 instruct XorI_reg_not_reg(iRegINoSp dst,
9958                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
9959                          rFlagsReg cr) %{
9960   match(Set dst (XorI m1 (XorI src2 src1)));
9961   ins_cost(INSN_COST);
9962   format %{ "eonw  $dst, $src1, $src2" %}
9963 
9964   ins_encode %{
9965     __ eonw(as_Register($dst$$reg),
9966               as_Register($src1$$reg),
9967               as_Register($src2$$reg),
9968               Assembler::LSL, 0);
9969   %}
9970 
9971   ins_pipe(ialu_reg_reg);
9972 %}
9973 
9974 instruct XorL_reg_not_reg(iRegLNoSp dst,
9975                          iRegL src1, iRegL src2, immL_M1 m1,
9976                          rFlagsReg cr) %{
9977   match(Set dst (XorL m1 (XorL src2 src1)));
9978   ins_cost(INSN_COST);
9979   format %{ "eon  $dst, $src1, $src2" %}
9980 
9981   ins_encode %{
9982     __ eon(as_Register($dst$$reg),
9983               as_Register($src1$$reg),
9984               as_Register($src2$$reg),
9985               Assembler::LSL, 0);
9986   %}
9987 
9988   ins_pipe(ialu_reg_reg);
9989 %}
9990 
9991 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
9992                          iRegIorL2I src1, iRegIorL2I src2,
9993                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9994   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
9995   ins_cost(1.9 * INSN_COST);
9996   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
9997 
9998   ins_encode %{
9999     __ bicw(as_Register($dst$$reg),
10000               as_Register($src1$$reg),
10001               as_Register($src2$$reg),
10002               Assembler::LSR,
10003               $src3$$constant & 0x1f);
10004   %}
10005 
10006   ins_pipe(ialu_reg_reg_shift);
10007 %}
10008 
10009 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
10010                          iRegL src1, iRegL src2,
10011                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10012   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
10013   ins_cost(1.9 * INSN_COST);
10014   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
10015 
10016   ins_encode %{
10017     __ bic(as_Register($dst$$reg),
10018               as_Register($src1$$reg),
10019               as_Register($src2$$reg),
10020               Assembler::LSR,
10021               $src3$$constant & 0x3f);
10022   %}
10023 
10024   ins_pipe(ialu_reg_reg_shift);
10025 %}
10026 
10027 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
10028                          iRegIorL2I src1, iRegIorL2I src2,
10029                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10030   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
10031   ins_cost(1.9 * INSN_COST);
10032   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
10033 
10034   ins_encode %{
10035     __ bicw(as_Register($dst$$reg),
10036               as_Register($src1$$reg),
10037               as_Register($src2$$reg),
10038               Assembler::ASR,
10039               $src3$$constant & 0x1f);
10040   %}
10041 
10042   ins_pipe(ialu_reg_reg_shift);
10043 %}
10044 
10045 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
10046                          iRegL src1, iRegL src2,
10047                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10048   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
10049   ins_cost(1.9 * INSN_COST);
10050   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
10051 
10052   ins_encode %{
10053     __ bic(as_Register($dst$$reg),
10054               as_Register($src1$$reg),
10055               as_Register($src2$$reg),
10056               Assembler::ASR,
10057               $src3$$constant & 0x3f);
10058   %}
10059 
10060   ins_pipe(ialu_reg_reg_shift);
10061 %}
10062 
10063 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
10064                          iRegIorL2I src1, iRegIorL2I src2,
10065                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10066   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
10067   ins_cost(1.9 * INSN_COST);
10068   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
10069 
10070   ins_encode %{
10071     __ bicw(as_Register($dst$$reg),
10072               as_Register($src1$$reg),
10073               as_Register($src2$$reg),
10074               Assembler::LSL,
10075               $src3$$constant & 0x1f);
10076   %}
10077 
10078   ins_pipe(ialu_reg_reg_shift);
10079 %}
10080 
10081 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
10082                          iRegL src1, iRegL src2,
10083                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10084   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
10085   ins_cost(1.9 * INSN_COST);
10086   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
10087 
10088   ins_encode %{
10089     __ bic(as_Register($dst$$reg),
10090               as_Register($src1$$reg),
10091               as_Register($src2$$reg),
10092               Assembler::LSL,
10093               $src3$$constant & 0x3f);
10094   %}
10095 
10096   ins_pipe(ialu_reg_reg_shift);
10097 %}
10098 
10099 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
10100                          iRegIorL2I src1, iRegIorL2I src2,
10101                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10102   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
10103   ins_cost(1.9 * INSN_COST);
10104   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
10105 
10106   ins_encode %{
10107     __ eonw(as_Register($dst$$reg),
10108               as_Register($src1$$reg),
10109               as_Register($src2$$reg),
10110               Assembler::LSR,
10111               $src3$$constant & 0x1f);
10112   %}
10113 
10114   ins_pipe(ialu_reg_reg_shift);
10115 %}
10116 
10117 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
10118                          iRegL src1, iRegL src2,
10119                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10120   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
10121   ins_cost(1.9 * INSN_COST);
10122   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
10123 
10124   ins_encode %{
10125     __ eon(as_Register($dst$$reg),
10126               as_Register($src1$$reg),
10127               as_Register($src2$$reg),
10128               Assembler::LSR,
10129               $src3$$constant & 0x3f);
10130   %}
10131 
10132   ins_pipe(ialu_reg_reg_shift);
10133 %}
10134 
10135 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
10136                          iRegIorL2I src1, iRegIorL2I src2,
10137                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10138   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
10139   ins_cost(1.9 * INSN_COST);
10140   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
10141 
10142   ins_encode %{
10143     __ eonw(as_Register($dst$$reg),
10144               as_Register($src1$$reg),
10145               as_Register($src2$$reg),
10146               Assembler::ASR,
10147               $src3$$constant & 0x1f);
10148   %}
10149 
10150   ins_pipe(ialu_reg_reg_shift);
10151 %}
10152 
10153 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
10154                          iRegL src1, iRegL src2,
10155                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10156   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
10157   ins_cost(1.9 * INSN_COST);
10158   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
10159 
10160   ins_encode %{
10161     __ eon(as_Register($dst$$reg),
10162               as_Register($src1$$reg),
10163               as_Register($src2$$reg),
10164               Assembler::ASR,
10165               $src3$$constant & 0x3f);
10166   %}
10167 
10168   ins_pipe(ialu_reg_reg_shift);
10169 %}
10170 
10171 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
10172                          iRegIorL2I src1, iRegIorL2I src2,
10173                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10174   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
10175   ins_cost(1.9 * INSN_COST);
10176   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
10177 
10178   ins_encode %{
10179     __ eonw(as_Register($dst$$reg),
10180               as_Register($src1$$reg),
10181               as_Register($src2$$reg),
10182               Assembler::LSL,
10183               $src3$$constant & 0x1f);
10184   %}
10185 
10186   ins_pipe(ialu_reg_reg_shift);
10187 %}
10188 
10189 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
10190                          iRegL src1, iRegL src2,
10191                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10192   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
10193   ins_cost(1.9 * INSN_COST);
10194   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
10195 
10196   ins_encode %{
10197     __ eon(as_Register($dst$$reg),
10198               as_Register($src1$$reg),
10199               as_Register($src2$$reg),
10200               Assembler::LSL,
10201               $src3$$constant & 0x3f);
10202   %}
10203 
10204   ins_pipe(ialu_reg_reg_shift);
10205 %}
10206 
10207 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
10208                          iRegIorL2I src1, iRegIorL2I src2,
10209                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10210   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
10211   ins_cost(1.9 * INSN_COST);
10212   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
10213 
10214   ins_encode %{
10215     __ ornw(as_Register($dst$$reg),
10216               as_Register($src1$$reg),
10217               as_Register($src2$$reg),
10218               Assembler::LSR,
10219               $src3$$constant & 0x1f);
10220   %}
10221 
10222   ins_pipe(ialu_reg_reg_shift);
10223 %}
10224 
10225 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
10226                          iRegL src1, iRegL src2,
10227                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10228   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
10229   ins_cost(1.9 * INSN_COST);
10230   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
10231 
10232   ins_encode %{
10233     __ orn(as_Register($dst$$reg),
10234               as_Register($src1$$reg),
10235               as_Register($src2$$reg),
10236               Assembler::LSR,
10237               $src3$$constant & 0x3f);
10238   %}
10239 
10240   ins_pipe(ialu_reg_reg_shift);
10241 %}
10242 
10243 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
10244                          iRegIorL2I src1, iRegIorL2I src2,
10245                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10246   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
10247   ins_cost(1.9 * INSN_COST);
10248   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
10249 
10250   ins_encode %{
10251     __ ornw(as_Register($dst$$reg),
10252               as_Register($src1$$reg),
10253               as_Register($src2$$reg),
10254               Assembler::ASR,
10255               $src3$$constant & 0x1f);
10256   %}
10257 
10258   ins_pipe(ialu_reg_reg_shift);
10259 %}
10260 
10261 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
10262                          iRegL src1, iRegL src2,
10263                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10264   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
10265   ins_cost(1.9 * INSN_COST);
10266   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
10267 
10268   ins_encode %{
10269     __ orn(as_Register($dst$$reg),
10270               as_Register($src1$$reg),
10271               as_Register($src2$$reg),
10272               Assembler::ASR,
10273               $src3$$constant & 0x3f);
10274   %}
10275 
10276   ins_pipe(ialu_reg_reg_shift);
10277 %}
10278 
10279 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
10280                          iRegIorL2I src1, iRegIorL2I src2,
10281                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10282   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
10283   ins_cost(1.9 * INSN_COST);
10284   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
10285 
10286   ins_encode %{
10287     __ ornw(as_Register($dst$$reg),
10288               as_Register($src1$$reg),
10289               as_Register($src2$$reg),
10290               Assembler::LSL,
10291               $src3$$constant & 0x1f);
10292   %}
10293 
10294   ins_pipe(ialu_reg_reg_shift);
10295 %}
10296 
10297 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
10298                          iRegL src1, iRegL src2,
10299                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10300   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
10301   ins_cost(1.9 * INSN_COST);
10302   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
10303 
10304   ins_encode %{
10305     __ orn(as_Register($dst$$reg),
10306               as_Register($src1$$reg),
10307               as_Register($src2$$reg),
10308               Assembler::LSL,
10309               $src3$$constant & 0x3f);
10310   %}
10311 
10312   ins_pipe(ialu_reg_reg_shift);
10313 %}
10314 
10315 instruct AndI_reg_URShift_reg(iRegINoSp dst,
10316                          iRegIorL2I src1, iRegIorL2I src2,
10317                          immI src3, rFlagsReg cr) %{
10318   match(Set dst (AndI src1 (URShiftI src2 src3)));
10319 
10320   ins_cost(1.9 * INSN_COST);
10321   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
10322 
10323   ins_encode %{
10324     __ andw(as_Register($dst$$reg),
10325               as_Register($src1$$reg),
10326               as_Register($src2$$reg),
10327               Assembler::LSR,
10328               $src3$$constant & 0x1f);
10329   %}
10330 
10331   ins_pipe(ialu_reg_reg_shift);
10332 %}
10333 
10334 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
10335                          iRegL src1, iRegL src2,
10336                          immI src3, rFlagsReg cr) %{
10337   match(Set dst (AndL src1 (URShiftL src2 src3)));
10338 
10339   ins_cost(1.9 * INSN_COST);
10340   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
10341 
10342   ins_encode %{
10343     __ andr(as_Register($dst$$reg),
10344               as_Register($src1$$reg),
10345               as_Register($src2$$reg),
10346               Assembler::LSR,
10347               $src3$$constant & 0x3f);
10348   %}
10349 
10350   ins_pipe(ialu_reg_reg_shift);
10351 %}
10352 
10353 instruct AndI_reg_RShift_reg(iRegINoSp dst,
10354                          iRegIorL2I src1, iRegIorL2I src2,
10355                          immI src3, rFlagsReg cr) %{
10356   match(Set dst (AndI src1 (RShiftI src2 src3)));
10357 
10358   ins_cost(1.9 * INSN_COST);
10359   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
10360 
10361   ins_encode %{
10362     __ andw(as_Register($dst$$reg),
10363               as_Register($src1$$reg),
10364               as_Register($src2$$reg),
10365               Assembler::ASR,
10366               $src3$$constant & 0x1f);
10367   %}
10368 
10369   ins_pipe(ialu_reg_reg_shift);
10370 %}
10371 
10372 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
10373                          iRegL src1, iRegL src2,
10374                          immI src3, rFlagsReg cr) %{
10375   match(Set dst (AndL src1 (RShiftL src2 src3)));
10376 
10377   ins_cost(1.9 * INSN_COST);
10378   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
10379 
10380   ins_encode %{
10381     __ andr(as_Register($dst$$reg),
10382               as_Register($src1$$reg),
10383               as_Register($src2$$reg),
10384               Assembler::ASR,
10385               $src3$$constant & 0x3f);
10386   %}
10387 
10388   ins_pipe(ialu_reg_reg_shift);
10389 %}
10390 
10391 instruct AndI_reg_LShift_reg(iRegINoSp dst,
10392                          iRegIorL2I src1, iRegIorL2I src2,
10393                          immI src3, rFlagsReg cr) %{
10394   match(Set dst (AndI src1 (LShiftI src2 src3)));
10395 
10396   ins_cost(1.9 * INSN_COST);
10397   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
10398 
10399   ins_encode %{
10400     __ andw(as_Register($dst$$reg),
10401               as_Register($src1$$reg),
10402               as_Register($src2$$reg),
10403               Assembler::LSL,
10404               $src3$$constant & 0x1f);
10405   %}
10406 
10407   ins_pipe(ialu_reg_reg_shift);
10408 %}
10409 
10410 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
10411                          iRegL src1, iRegL src2,
10412                          immI src3, rFlagsReg cr) %{
10413   match(Set dst (AndL src1 (LShiftL src2 src3)));
10414 
10415   ins_cost(1.9 * INSN_COST);
10416   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
10417 
10418   ins_encode %{
10419     __ andr(as_Register($dst$$reg),
10420               as_Register($src1$$reg),
10421               as_Register($src2$$reg),
10422               Assembler::LSL,
10423               $src3$$constant & 0x3f);
10424   %}
10425 
10426   ins_pipe(ialu_reg_reg_shift);
10427 %}
10428 
10429 instruct XorI_reg_URShift_reg(iRegINoSp dst,
10430                          iRegIorL2I src1, iRegIorL2I src2,
10431                          immI src3, rFlagsReg cr) %{
10432   match(Set dst (XorI src1 (URShiftI src2 src3)));
10433 
10434   ins_cost(1.9 * INSN_COST);
10435   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
10436 
10437   ins_encode %{
10438     __ eorw(as_Register($dst$$reg),
10439               as_Register($src1$$reg),
10440               as_Register($src2$$reg),
10441               Assembler::LSR,
10442               $src3$$constant & 0x1f);
10443   %}
10444 
10445   ins_pipe(ialu_reg_reg_shift);
10446 %}
10447 
10448 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
10449                          iRegL src1, iRegL src2,
10450                          immI src3, rFlagsReg cr) %{
10451   match(Set dst (XorL src1 (URShiftL src2 src3)));
10452 
10453   ins_cost(1.9 * INSN_COST);
10454   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
10455 
10456   ins_encode %{
10457     __ eor(as_Register($dst$$reg),
10458               as_Register($src1$$reg),
10459               as_Register($src2$$reg),
10460               Assembler::LSR,
10461               $src3$$constant & 0x3f);
10462   %}
10463 
10464   ins_pipe(ialu_reg_reg_shift);
10465 %}
10466 
10467 instruct XorI_reg_RShift_reg(iRegINoSp dst,
10468                          iRegIorL2I src1, iRegIorL2I src2,
10469                          immI src3, rFlagsReg cr) %{
10470   match(Set dst (XorI src1 (RShiftI src2 src3)));
10471 
10472   ins_cost(1.9 * INSN_COST);
10473   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
10474 
10475   ins_encode %{
10476     __ eorw(as_Register($dst$$reg),
10477               as_Register($src1$$reg),
10478               as_Register($src2$$reg),
10479               Assembler::ASR,
10480               $src3$$constant & 0x1f);
10481   %}
10482 
10483   ins_pipe(ialu_reg_reg_shift);
10484 %}
10485 
10486 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
10487                          iRegL src1, iRegL src2,
10488                          immI src3, rFlagsReg cr) %{
10489   match(Set dst (XorL src1 (RShiftL src2 src3)));
10490 
10491   ins_cost(1.9 * INSN_COST);
10492   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
10493 
10494   ins_encode %{
10495     __ eor(as_Register($dst$$reg),
10496               as_Register($src1$$reg),
10497               as_Register($src2$$reg),
10498               Assembler::ASR,
10499               $src3$$constant & 0x3f);
10500   %}
10501 
10502   ins_pipe(ialu_reg_reg_shift);
10503 %}
10504 
10505 instruct XorI_reg_LShift_reg(iRegINoSp dst,
10506                          iRegIorL2I src1, iRegIorL2I src2,
10507                          immI src3, rFlagsReg cr) %{
10508   match(Set dst (XorI src1 (LShiftI src2 src3)));
10509 
10510   ins_cost(1.9 * INSN_COST);
10511   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
10512 
10513   ins_encode %{
10514     __ eorw(as_Register($dst$$reg),
10515               as_Register($src1$$reg),
10516               as_Register($src2$$reg),
10517               Assembler::LSL,
10518               $src3$$constant & 0x1f);
10519   %}
10520 
10521   ins_pipe(ialu_reg_reg_shift);
10522 %}
10523 
10524 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
10525                          iRegL src1, iRegL src2,
10526                          immI src3, rFlagsReg cr) %{
10527   match(Set dst (XorL src1 (LShiftL src2 src3)));
10528 
10529   ins_cost(1.9 * INSN_COST);
10530   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
10531 
10532   ins_encode %{
10533     __ eor(as_Register($dst$$reg),
10534               as_Register($src1$$reg),
10535               as_Register($src2$$reg),
10536               Assembler::LSL,
10537               $src3$$constant & 0x3f);
10538   %}
10539 
10540   ins_pipe(ialu_reg_reg_shift);
10541 %}
10542 
10543 instruct OrI_reg_URShift_reg(iRegINoSp dst,
10544                          iRegIorL2I src1, iRegIorL2I src2,
10545                          immI src3, rFlagsReg cr) %{
10546   match(Set dst (OrI src1 (URShiftI src2 src3)));
10547 
10548   ins_cost(1.9 * INSN_COST);
10549   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
10550 
10551   ins_encode %{
10552     __ orrw(as_Register($dst$$reg),
10553               as_Register($src1$$reg),
10554               as_Register($src2$$reg),
10555               Assembler::LSR,
10556               $src3$$constant & 0x1f);
10557   %}
10558 
10559   ins_pipe(ialu_reg_reg_shift);
10560 %}
10561 
10562 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
10563                          iRegL src1, iRegL src2,
10564                          immI src3, rFlagsReg cr) %{
10565   match(Set dst (OrL src1 (URShiftL src2 src3)));
10566 
10567   ins_cost(1.9 * INSN_COST);
10568   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
10569 
10570   ins_encode %{
10571     __ orr(as_Register($dst$$reg),
10572               as_Register($src1$$reg),
10573               as_Register($src2$$reg),
10574               Assembler::LSR,
10575               $src3$$constant & 0x3f);
10576   %}
10577 
10578   ins_pipe(ialu_reg_reg_shift);
10579 %}
10580 
10581 instruct OrI_reg_RShift_reg(iRegINoSp dst,
10582                          iRegIorL2I src1, iRegIorL2I src2,
10583                          immI src3, rFlagsReg cr) %{
10584   match(Set dst (OrI src1 (RShiftI src2 src3)));
10585 
10586   ins_cost(1.9 * INSN_COST);
10587   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
10588 
10589   ins_encode %{
10590     __ orrw(as_Register($dst$$reg),
10591               as_Register($src1$$reg),
10592               as_Register($src2$$reg),
10593               Assembler::ASR,
10594               $src3$$constant & 0x1f);
10595   %}
10596 
10597   ins_pipe(ialu_reg_reg_shift);
10598 %}
10599 
10600 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
10601                          iRegL src1, iRegL src2,
10602                          immI src3, rFlagsReg cr) %{
10603   match(Set dst (OrL src1 (RShiftL src2 src3)));
10604 
10605   ins_cost(1.9 * INSN_COST);
10606   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
10607 
10608   ins_encode %{
10609     __ orr(as_Register($dst$$reg),
10610               as_Register($src1$$reg),
10611               as_Register($src2$$reg),
10612               Assembler::ASR,
10613               $src3$$constant & 0x3f);
10614   %}
10615 
10616   ins_pipe(ialu_reg_reg_shift);
10617 %}
10618 
10619 instruct OrI_reg_LShift_reg(iRegINoSp dst,
10620                          iRegIorL2I src1, iRegIorL2I src2,
10621                          immI src3, rFlagsReg cr) %{
10622   match(Set dst (OrI src1 (LShiftI src2 src3)));
10623 
10624   ins_cost(1.9 * INSN_COST);
10625   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
10626 
10627   ins_encode %{
10628     __ orrw(as_Register($dst$$reg),
10629               as_Register($src1$$reg),
10630               as_Register($src2$$reg),
10631               Assembler::LSL,
10632               $src3$$constant & 0x1f);
10633   %}
10634 
10635   ins_pipe(ialu_reg_reg_shift);
10636 %}
10637 
10638 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
10639                          iRegL src1, iRegL src2,
10640                          immI src3, rFlagsReg cr) %{
10641   match(Set dst (OrL src1 (LShiftL src2 src3)));
10642 
10643   ins_cost(1.9 * INSN_COST);
10644   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
10645 
10646   ins_encode %{
10647     __ orr(as_Register($dst$$reg),
10648               as_Register($src1$$reg),
10649               as_Register($src2$$reg),
10650               Assembler::LSL,
10651               $src3$$constant & 0x3f);
10652   %}
10653 
10654   ins_pipe(ialu_reg_reg_shift);
10655 %}
10656 
10657 instruct AddI_reg_URShift_reg(iRegINoSp dst,
10658                          iRegIorL2I src1, iRegIorL2I src2,
10659                          immI src3, rFlagsReg cr) %{
10660   match(Set dst (AddI src1 (URShiftI src2 src3)));
10661 
10662   ins_cost(1.9 * INSN_COST);
10663   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
10664 
10665   ins_encode %{
10666     __ addw(as_Register($dst$$reg),
10667               as_Register($src1$$reg),
10668               as_Register($src2$$reg),
10669               Assembler::LSR,
10670               $src3$$constant & 0x1f);
10671   %}
10672 
10673   ins_pipe(ialu_reg_reg_shift);
10674 %}
10675 
10676 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
10677                          iRegL src1, iRegL src2,
10678                          immI src3, rFlagsReg cr) %{
10679   match(Set dst (AddL src1 (URShiftL src2 src3)));
10680 
10681   ins_cost(1.9 * INSN_COST);
10682   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
10683 
10684   ins_encode %{
10685     __ add(as_Register($dst$$reg),
10686               as_Register($src1$$reg),
10687               as_Register($src2$$reg),
10688               Assembler::LSR,
10689               $src3$$constant & 0x3f);
10690   %}
10691 
10692   ins_pipe(ialu_reg_reg_shift);
10693 %}
10694 
10695 instruct AddI_reg_RShift_reg(iRegINoSp dst,
10696                          iRegIorL2I src1, iRegIorL2I src2,
10697                          immI src3, rFlagsReg cr) %{
10698   match(Set dst (AddI src1 (RShiftI src2 src3)));
10699 
10700   ins_cost(1.9 * INSN_COST);
10701   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
10702 
10703   ins_encode %{
10704     __ addw(as_Register($dst$$reg),
10705               as_Register($src1$$reg),
10706               as_Register($src2$$reg),
10707               Assembler::ASR,
10708               $src3$$constant & 0x1f);
10709   %}
10710 
10711   ins_pipe(ialu_reg_reg_shift);
10712 %}
10713 
10714 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
10715                          iRegL src1, iRegL src2,
10716                          immI src3, rFlagsReg cr) %{
10717   match(Set dst (AddL src1 (RShiftL src2 src3)));
10718 
10719   ins_cost(1.9 * INSN_COST);
10720   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
10721 
10722   ins_encode %{
10723     __ add(as_Register($dst$$reg),
10724               as_Register($src1$$reg),
10725               as_Register($src2$$reg),
10726               Assembler::ASR,
10727               $src3$$constant & 0x3f);
10728   %}
10729 
10730   ins_pipe(ialu_reg_reg_shift);
10731 %}
10732 
10733 instruct AddI_reg_LShift_reg(iRegINoSp dst,
10734                          iRegIorL2I src1, iRegIorL2I src2,
10735                          immI src3, rFlagsReg cr) %{
10736   match(Set dst (AddI src1 (LShiftI src2 src3)));
10737 
10738   ins_cost(1.9 * INSN_COST);
10739   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
10740 
10741   ins_encode %{
10742     __ addw(as_Register($dst$$reg),
10743               as_Register($src1$$reg),
10744               as_Register($src2$$reg),
10745               Assembler::LSL,
10746               $src3$$constant & 0x1f);
10747   %}
10748 
10749   ins_pipe(ialu_reg_reg_shift);
10750 %}
10751 
10752 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
10753                          iRegL src1, iRegL src2,
10754                          immI src3, rFlagsReg cr) %{
10755   match(Set dst (AddL src1 (LShiftL src2 src3)));
10756 
10757   ins_cost(1.9 * INSN_COST);
10758   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
10759 
10760   ins_encode %{
10761     __ add(as_Register($dst$$reg),
10762               as_Register($src1$$reg),
10763               as_Register($src2$$reg),
10764               Assembler::LSL,
10765               $src3$$constant & 0x3f);
10766   %}
10767 
10768   ins_pipe(ialu_reg_reg_shift);
10769 %}
10770 
10771 instruct SubI_reg_URShift_reg(iRegINoSp dst,
10772                          iRegIorL2I src1, iRegIorL2I src2,
10773                          immI src3, rFlagsReg cr) %{
10774   match(Set dst (SubI src1 (URShiftI src2 src3)));
10775 
10776   ins_cost(1.9 * INSN_COST);
10777   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
10778 
10779   ins_encode %{
10780     __ subw(as_Register($dst$$reg),
10781               as_Register($src1$$reg),
10782               as_Register($src2$$reg),
10783               Assembler::LSR,
10784               $src3$$constant & 0x1f);
10785   %}
10786 
10787   ins_pipe(ialu_reg_reg_shift);
10788 %}
10789 
10790 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
10791                          iRegL src1, iRegL src2,
10792                          immI src3, rFlagsReg cr) %{
10793   match(Set dst (SubL src1 (URShiftL src2 src3)));
10794 
10795   ins_cost(1.9 * INSN_COST);
10796   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
10797 
10798   ins_encode %{
10799     __ sub(as_Register($dst$$reg),
10800               as_Register($src1$$reg),
10801               as_Register($src2$$reg),
10802               Assembler::LSR,
10803               $src3$$constant & 0x3f);
10804   %}
10805 
10806   ins_pipe(ialu_reg_reg_shift);
10807 %}
10808 
10809 instruct SubI_reg_RShift_reg(iRegINoSp dst,
10810                          iRegIorL2I src1, iRegIorL2I src2,
10811                          immI src3, rFlagsReg cr) %{
10812   match(Set dst (SubI src1 (RShiftI src2 src3)));
10813 
10814   ins_cost(1.9 * INSN_COST);
10815   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
10816 
10817   ins_encode %{
10818     __ subw(as_Register($dst$$reg),
10819               as_Register($src1$$reg),
10820               as_Register($src2$$reg),
10821               Assembler::ASR,
10822               $src3$$constant & 0x1f);
10823   %}
10824 
10825   ins_pipe(ialu_reg_reg_shift);
10826 %}
10827 
10828 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
10829                          iRegL src1, iRegL src2,
10830                          immI src3, rFlagsReg cr) %{
10831   match(Set dst (SubL src1 (RShiftL src2 src3)));
10832 
10833   ins_cost(1.9 * INSN_COST);
10834   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
10835 
10836   ins_encode %{
10837     __ sub(as_Register($dst$$reg),
10838               as_Register($src1$$reg),
10839               as_Register($src2$$reg),
10840               Assembler::ASR,
10841               $src3$$constant & 0x3f);
10842   %}
10843 
10844   ins_pipe(ialu_reg_reg_shift);
10845 %}
10846 
10847 instruct SubI_reg_LShift_reg(iRegINoSp dst,
10848                          iRegIorL2I src1, iRegIorL2I src2,
10849                          immI src3, rFlagsReg cr) %{
10850   match(Set dst (SubI src1 (LShiftI src2 src3)));
10851 
10852   ins_cost(1.9 * INSN_COST);
10853   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
10854 
10855   ins_encode %{
10856     __ subw(as_Register($dst$$reg),
10857               as_Register($src1$$reg),
10858               as_Register($src2$$reg),
10859               Assembler::LSL,
10860               $src3$$constant & 0x1f);
10861   %}
10862 
10863   ins_pipe(ialu_reg_reg_shift);
10864 %}
10865 
10866 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
10867                          iRegL src1, iRegL src2,
10868                          immI src3, rFlagsReg cr) %{
10869   match(Set dst (SubL src1 (LShiftL src2 src3)));
10870 
10871   ins_cost(1.9 * INSN_COST);
10872   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
10873 
10874   ins_encode %{
10875     __ sub(as_Register($dst$$reg),
10876               as_Register($src1$$reg),
10877               as_Register($src2$$reg),
10878               Assembler::LSL,
10879               $src3$$constant & 0x3f);
10880   %}
10881 
10882   ins_pipe(ialu_reg_reg_shift);
10883 %}
10884 
10885 
10886 
10887 // Shift Left followed by Shift Right.
10888 // This idiom is used by the compiler for the i2b bytecode etc.
10889 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
10890 %{
10891   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
10892   // Make sure we are not going to exceed what sbfm can do.
10893   predicate((unsigned int)n->in(2)->get_int() <= 63
10894             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
10895 
10896   ins_cost(INSN_COST * 2);
10897   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
10898   ins_encode %{
10899     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
10900     int s = 63 - lshift;
10901     int r = (rshift - lshift) & 63;
10902     __ sbfm(as_Register($dst$$reg),
10903             as_Register($src$$reg),
10904             r, s);
10905   %}
10906 
10907   ins_pipe(ialu_reg_shift);
10908 %}
10909 
10910 // Shift Left followed by Shift Right.
10911 // This idiom is used by the compiler for the i2b bytecode etc.
10912 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
10913 %{
10914   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
10915   // Make sure we are not going to exceed what sbfmw can do.
10916   predicate((unsigned int)n->in(2)->get_int() <= 31
10917             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
10918 
10919   ins_cost(INSN_COST * 2);
10920   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
10921   ins_encode %{
10922     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
10923     int s = 31 - lshift;
10924     int r = (rshift - lshift) & 31;
10925     __ sbfmw(as_Register($dst$$reg),
10926             as_Register($src$$reg),
10927             r, s);
10928   %}
10929 
10930   ins_pipe(ialu_reg_shift);
10931 %}
10932 
10933 // Shift Left followed by Shift Right.
10934 // This idiom is used by the compiler for the i2b bytecode etc.
10935 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
10936 %{
10937   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
10938   // Make sure we are not going to exceed what ubfm can do.
10939   predicate((unsigned int)n->in(2)->get_int() <= 63
10940             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
10941 
10942   ins_cost(INSN_COST * 2);
10943   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
10944   ins_encode %{
10945     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
10946     int s = 63 - lshift;
10947     int r = (rshift - lshift) & 63;
10948     __ ubfm(as_Register($dst$$reg),
10949             as_Register($src$$reg),
10950             r, s);
10951   %}
10952 
10953   ins_pipe(ialu_reg_shift);
10954 %}
10955 
10956 // Shift Left followed by Shift Right.
10957 // This idiom is used by the compiler for the i2b bytecode etc.
10958 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
10959 %{
10960   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
10961   // Make sure we are not going to exceed what ubfmw can do.
10962   predicate((unsigned int)n->in(2)->get_int() <= 31
10963             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
10964 
10965   ins_cost(INSN_COST * 2);
10966   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
10967   ins_encode %{
10968     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
10969     int s = 31 - lshift;
10970     int r = (rshift - lshift) & 31;
10971     __ ubfmw(as_Register($dst$$reg),
10972             as_Register($src$$reg),
10973             r, s);
10974   %}
10975 
10976   ins_pipe(ialu_reg_shift);
10977 %}
10978 // Bitfield extract with shift & mask
10979 
10980 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
10981 %{
10982   match(Set dst (AndI (URShiftI src rshift) mask));
10983 
10984   ins_cost(INSN_COST);
10985   format %{ "ubfxw $dst, $src, $mask" %}
10986   ins_encode %{
10987     int rshift = $rshift$$constant;
10988     long mask = $mask$$constant;
10989     int width = exact_log2(mask+1);
10990     __ ubfxw(as_Register($dst$$reg),
10991             as_Register($src$$reg), rshift, width);
10992   %}
10993   ins_pipe(ialu_reg_shift);
10994 %}
10995 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
10996 %{
10997   match(Set dst (AndL (URShiftL src rshift) mask));
10998 
10999   ins_cost(INSN_COST);
11000   format %{ "ubfx $dst, $src, $mask" %}
11001   ins_encode %{
11002     int rshift = $rshift$$constant;
11003     long mask = $mask$$constant;
11004     int width = exact_log2(mask+1);
11005     __ ubfx(as_Register($dst$$reg),
11006             as_Register($src$$reg), rshift, width);
11007   %}
11008   ins_pipe(ialu_reg_shift);
11009 %}
11010 
11011 // We can use ubfx when extending an And with a mask when we know mask
11012 // is positive.  We know that because immI_bitmask guarantees it.
11013 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11014 %{
11015   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
11016 
11017   ins_cost(INSN_COST * 2);
11018   format %{ "ubfx $dst, $src, $mask" %}
11019   ins_encode %{
11020     int rshift = $rshift$$constant;
11021     long mask = $mask$$constant;
11022     int width = exact_log2(mask+1);
11023     __ ubfx(as_Register($dst$$reg),
11024             as_Register($src$$reg), rshift, width);
11025   %}
11026   ins_pipe(ialu_reg_shift);
11027 %}
11028 
11029 // Rotations
11030 
11031 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11032 %{
11033   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11034   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11035 
11036   ins_cost(INSN_COST);
11037   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11038 
11039   ins_encode %{
11040     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11041             $rshift$$constant & 63);
11042   %}
11043   ins_pipe(ialu_reg_reg_extr);
11044 %}
11045 
11046 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11047 %{
11048   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11049   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11050 
11051   ins_cost(INSN_COST);
11052   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11053 
11054   ins_encode %{
11055     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11056             $rshift$$constant & 31);
11057   %}
11058   ins_pipe(ialu_reg_reg_extr);
11059 %}
11060 
11061 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11062 %{
11063   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11064   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11065 
11066   ins_cost(INSN_COST);
11067   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11068 
11069   ins_encode %{
11070     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11071             $rshift$$constant & 63);
11072   %}
11073   ins_pipe(ialu_reg_reg_extr);
11074 %}
11075 
11076 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11077 %{
11078   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11079   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11080 
11081   ins_cost(INSN_COST);
11082   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11083 
11084   ins_encode %{
11085     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11086             $rshift$$constant & 31);
11087   %}
11088   ins_pipe(ialu_reg_reg_extr);
11089 %}
11090 
11091 
11092 // rol expander
11093 
11094 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11095 %{
11096   effect(DEF dst, USE src, USE shift);
11097 
11098   format %{ "rol    $dst, $src, $shift" %}
11099   ins_cost(INSN_COST * 3);
11100   ins_encode %{
11101     __ subw(rscratch1, zr, as_Register($shift$$reg));
11102     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
11103             rscratch1);
11104     %}
11105   ins_pipe(ialu_reg_reg_vshift);
11106 %}
11107 
11108 // rol expander
11109 
11110 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
11111 %{
11112   effect(DEF dst, USE src, USE shift);
11113 
11114   format %{ "rol    $dst, $src, $shift" %}
11115   ins_cost(INSN_COST * 3);
11116   ins_encode %{
11117     __ subw(rscratch1, zr, as_Register($shift$$reg));
11118     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
11119             rscratch1);
11120     %}
11121   ins_pipe(ialu_reg_reg_vshift);
11122 %}
11123 
11124 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
11125 %{
11126   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
11127 
11128   expand %{
11129     rolL_rReg(dst, src, shift, cr);
11130   %}
11131 %}
11132 
11133 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11134 %{
11135   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
11136 
11137   expand %{
11138     rolL_rReg(dst, src, shift, cr);
11139   %}
11140 %}
11141 
11142 instruct rolI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
11143 %{
11144   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
11145 
11146   expand %{
11147     rolL_rReg(dst, src, shift, cr);
11148   %}
11149 %}
11150 
11151 instruct rolI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11152 %{
11153   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
11154 
11155   expand %{
11156     rolL_rReg(dst, src, shift, cr);
11157   %}
11158 %}
11159 
11160 // ror expander
11161 
11162 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11163 %{
11164   effect(DEF dst, USE src, USE shift);
11165 
11166   format %{ "ror    $dst, $src, $shift" %}
11167   ins_cost(INSN_COST);
11168   ins_encode %{
11169     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
11170             as_Register($shift$$reg));
11171     %}
11172   ins_pipe(ialu_reg_reg_vshift);
11173 %}
11174 
11175 // ror expander
11176 
11177 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
11178 %{
11179   effect(DEF dst, USE src, USE shift);
11180 
11181   format %{ "ror    $dst, $src, $shift" %}
11182   ins_cost(INSN_COST);
11183   ins_encode %{
11184     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
11185             as_Register($shift$$reg));
11186     %}
11187   ins_pipe(ialu_reg_reg_vshift);
11188 %}
11189 
11190 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
11191 %{
11192   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
11193 
11194   expand %{
11195     rorL_rReg(dst, src, shift, cr);
11196   %}
11197 %}
11198 
11199 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11200 %{
11201   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
11202 
11203   expand %{
11204     rorL_rReg(dst, src, shift, cr);
11205   %}
11206 %}
11207 
11208 instruct rorI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
11209 %{
11210   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
11211 
11212   expand %{
11213     rorL_rReg(dst, src, shift, cr);
11214   %}
11215 %}
11216 
11217 instruct rorI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11218 %{
11219   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
11220 
11221   expand %{
11222     rorL_rReg(dst, src, shift, cr);
11223   %}
11224 %}
11225 
11226 // Add/subtract (extended)
11227 
11228 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11229 %{
11230   match(Set dst (AddL src1 (ConvI2L src2)));
11231   ins_cost(INSN_COST);
11232   format %{ "add  $dst, $src1, sxtw $src2" %}
11233 
11234    ins_encode %{
11235      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11236             as_Register($src2$$reg), ext::sxtw);
11237    %}
11238   ins_pipe(ialu_reg_reg);
11239 %};
11240 
11241 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11242 %{
11243   match(Set dst (SubL src1 (ConvI2L src2)));
11244   ins_cost(INSN_COST);
11245   format %{ "sub  $dst, $src1, sxtw $src2" %}
11246 
11247    ins_encode %{
11248      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11249             as_Register($src2$$reg), ext::sxtw);
11250    %}
11251   ins_pipe(ialu_reg_reg);
11252 %};
11253 
11254 
11255 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
11256 %{
11257   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11258   ins_cost(INSN_COST);
11259   format %{ "add  $dst, $src1, sxth $src2" %}
11260 
11261    ins_encode %{
11262      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11263             as_Register($src2$$reg), ext::sxth);
11264    %}
11265   ins_pipe(ialu_reg_reg);
11266 %}
11267 
11268 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11269 %{
11270   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11271   ins_cost(INSN_COST);
11272   format %{ "add  $dst, $src1, sxtb $src2" %}
11273 
11274    ins_encode %{
11275      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11276             as_Register($src2$$reg), ext::sxtb);
11277    %}
11278   ins_pipe(ialu_reg_reg);
11279 %}
11280 
11281 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11282 %{
11283   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
11284   ins_cost(INSN_COST);
11285   format %{ "add  $dst, $src1, uxtb $src2" %}
11286 
11287    ins_encode %{
11288      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11289             as_Register($src2$$reg), ext::uxtb);
11290    %}
11291   ins_pipe(ialu_reg_reg);
11292 %}
11293 
11294 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
11295 %{
11296   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11297   ins_cost(INSN_COST);
11298   format %{ "add  $dst, $src1, sxth $src2" %}
11299 
11300    ins_encode %{
11301      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11302             as_Register($src2$$reg), ext::sxth);
11303    %}
11304   ins_pipe(ialu_reg_reg);
11305 %}
11306 
11307 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
11308 %{
11309   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11310   ins_cost(INSN_COST);
11311   format %{ "add  $dst, $src1, sxtw $src2" %}
11312 
11313    ins_encode %{
11314      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11315             as_Register($src2$$reg), ext::sxtw);
11316    %}
11317   ins_pipe(ialu_reg_reg);
11318 %}
11319 
11320 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11321 %{
11322   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11323   ins_cost(INSN_COST);
11324   format %{ "add  $dst, $src1, sxtb $src2" %}
11325 
11326    ins_encode %{
11327      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11328             as_Register($src2$$reg), ext::sxtb);
11329    %}
11330   ins_pipe(ialu_reg_reg);
11331 %}
11332 
11333 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11334 %{
11335   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
11336   ins_cost(INSN_COST);
11337   format %{ "add  $dst, $src1, uxtb $src2" %}
11338 
11339    ins_encode %{
11340      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11341             as_Register($src2$$reg), ext::uxtb);
11342    %}
11343   ins_pipe(ialu_reg_reg);
11344 %}
11345 
11346 
11347 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
11348 %{
11349   match(Set dst (AddI src1 (AndI src2 mask)));
11350   ins_cost(INSN_COST);
11351   format %{ "addw  $dst, $src1, $src2, uxtb" %}
11352 
11353    ins_encode %{
11354      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11355             as_Register($src2$$reg), ext::uxtb);
11356    %}
11357   ins_pipe(ialu_reg_reg);
11358 %}
11359 
11360 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
11361 %{
11362   match(Set dst (AddI src1 (AndI src2 mask)));
11363   ins_cost(INSN_COST);
11364   format %{ "addw  $dst, $src1, $src2, uxth" %}
11365 
11366    ins_encode %{
11367      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11368             as_Register($src2$$reg), ext::uxth);
11369    %}
11370   ins_pipe(ialu_reg_reg);
11371 %}
11372 
11373 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
11374 %{
11375   match(Set dst (AddL src1 (AndL src2 mask)));
11376   ins_cost(INSN_COST);
11377   format %{ "add  $dst, $src1, $src2, uxtb" %}
11378 
11379    ins_encode %{
11380      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11381             as_Register($src2$$reg), ext::uxtb);
11382    %}
11383   ins_pipe(ialu_reg_reg);
11384 %}
11385 
11386 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
11387 %{
11388   match(Set dst (AddL src1 (AndL src2 mask)));
11389   ins_cost(INSN_COST);
11390   format %{ "add  $dst, $src1, $src2, uxth" %}
11391 
11392    ins_encode %{
11393      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11394             as_Register($src2$$reg), ext::uxth);
11395    %}
11396   ins_pipe(ialu_reg_reg);
11397 %}
11398 
11399 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
11400 %{
11401   match(Set dst (AddL src1 (AndL src2 mask)));
11402   ins_cost(INSN_COST);
11403   format %{ "add  $dst, $src1, $src2, uxtw" %}
11404 
11405    ins_encode %{
11406      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11407             as_Register($src2$$reg), ext::uxtw);
11408    %}
11409   ins_pipe(ialu_reg_reg);
11410 %}
11411 
11412 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
11413 %{
11414   match(Set dst (SubI src1 (AndI src2 mask)));
11415   ins_cost(INSN_COST);
11416   format %{ "subw  $dst, $src1, $src2, uxtb" %}
11417 
11418    ins_encode %{
11419      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
11420             as_Register($src2$$reg), ext::uxtb);
11421    %}
11422   ins_pipe(ialu_reg_reg);
11423 %}
11424 
11425 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
11426 %{
11427   match(Set dst (SubI src1 (AndI src2 mask)));
11428   ins_cost(INSN_COST);
11429   format %{ "subw  $dst, $src1, $src2, uxth" %}
11430 
11431    ins_encode %{
11432      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
11433             as_Register($src2$$reg), ext::uxth);
11434    %}
11435   ins_pipe(ialu_reg_reg);
11436 %}
11437 
11438 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
11439 %{
11440   match(Set dst (SubL src1 (AndL src2 mask)));
11441   ins_cost(INSN_COST);
11442   format %{ "sub  $dst, $src1, $src2, uxtb" %}
11443 
11444    ins_encode %{
11445      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11446             as_Register($src2$$reg), ext::uxtb);
11447    %}
11448   ins_pipe(ialu_reg_reg);
11449 %}
11450 
11451 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
11452 %{
11453   match(Set dst (SubL src1 (AndL src2 mask)));
11454   ins_cost(INSN_COST);
11455   format %{ "sub  $dst, $src1, $src2, uxth" %}
11456 
11457    ins_encode %{
11458      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11459             as_Register($src2$$reg), ext::uxth);
11460    %}
11461   ins_pipe(ialu_reg_reg);
11462 %}
11463 
11464 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
11465 %{
11466   match(Set dst (SubL src1 (AndL src2 mask)));
11467   ins_cost(INSN_COST);
11468   format %{ "sub  $dst, $src1, $src2, uxtw" %}
11469 
11470    ins_encode %{
11471      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11472             as_Register($src2$$reg), ext::uxtw);
11473    %}
11474   ins_pipe(ialu_reg_reg);
11475 %}
11476 
11477 // END This section of the file is automatically generated. Do not edit --------------
11478 
11479 // ============================================================================
11480 // Floating Point Arithmetic Instructions
11481 
11482 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
11483   match(Set dst (AddF src1 src2));
11484 
11485   ins_cost(INSN_COST * 5);
11486   format %{ "fadds   $dst, $src1, $src2" %}
11487 
11488   ins_encode %{
11489     __ fadds(as_FloatRegister($dst$$reg),
11490              as_FloatRegister($src1$$reg),
11491              as_FloatRegister($src2$$reg));
11492   %}
11493 
11494   ins_pipe(pipe_class_default);
11495 %}
11496 
11497 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
11498   match(Set dst (AddD src1 src2));
11499 
11500   ins_cost(INSN_COST * 5);
11501   format %{ "faddd   $dst, $src1, $src2" %}
11502 
11503   ins_encode %{
11504     __ faddd(as_FloatRegister($dst$$reg),
11505              as_FloatRegister($src1$$reg),
11506              as_FloatRegister($src2$$reg));
11507   %}
11508 
11509   ins_pipe(pipe_class_default);
11510 %}
11511 
11512 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
11513   match(Set dst (SubF src1 src2));
11514 
11515   ins_cost(INSN_COST * 5);
11516   format %{ "fsubs   $dst, $src1, $src2" %}
11517 
11518   ins_encode %{
11519     __ fsubs(as_FloatRegister($dst$$reg),
11520              as_FloatRegister($src1$$reg),
11521              as_FloatRegister($src2$$reg));
11522   %}
11523 
11524   ins_pipe(pipe_class_default);
11525 %}
11526 
11527 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
11528   match(Set dst (SubD src1 src2));
11529 
11530   ins_cost(INSN_COST * 5);
11531   format %{ "fsubd   $dst, $src1, $src2" %}
11532 
11533   ins_encode %{
11534     __ fsubd(as_FloatRegister($dst$$reg),
11535              as_FloatRegister($src1$$reg),
11536              as_FloatRegister($src2$$reg));
11537   %}
11538 
11539   ins_pipe(pipe_class_default);
11540 %}
11541 
11542 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
11543   match(Set dst (MulF src1 src2));
11544 
11545   ins_cost(INSN_COST * 6);
11546   format %{ "fmuls   $dst, $src1, $src2" %}
11547 
11548   ins_encode %{
11549     __ fmuls(as_FloatRegister($dst$$reg),
11550              as_FloatRegister($src1$$reg),
11551              as_FloatRegister($src2$$reg));
11552   %}
11553 
11554   ins_pipe(pipe_class_default);
11555 %}
11556 
11557 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
11558   match(Set dst (MulD src1 src2));
11559 
11560   ins_cost(INSN_COST * 6);
11561   format %{ "fmuld   $dst, $src1, $src2" %}
11562 
11563   ins_encode %{
11564     __ fmuld(as_FloatRegister($dst$$reg),
11565              as_FloatRegister($src1$$reg),
11566              as_FloatRegister($src2$$reg));
11567   %}
11568 
11569   ins_pipe(pipe_class_default);
11570 %}
11571 
11572 // We cannot use these fused mul w add/sub ops because they don't
11573 // produce the same result as the equivalent separated ops
11574 // (essentially they don't round the intermediate result). that's a
11575 // shame. leaving them here in case we can idenitfy cases where it is
11576 // legitimate to use them
11577 
11578 
11579 // instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
11580 //   match(Set dst (AddF (MulF src1 src2) src3));
11581 
11582 //   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
11583 
11584 //   ins_encode %{
11585 //     __ fmadds(as_FloatRegister($dst$$reg),
11586 //              as_FloatRegister($src1$$reg),
11587 //              as_FloatRegister($src2$$reg),
11588 //              as_FloatRegister($src3$$reg));
11589 //   %}
11590 
11591 //   ins_pipe(pipe_class_default);
11592 // %}
11593 
11594 // instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
11595 //   match(Set dst (AddD (MulD src1 src2) src3));
11596 
11597 //   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
11598 
11599 //   ins_encode %{
11600 //     __ fmaddd(as_FloatRegister($dst$$reg),
11601 //              as_FloatRegister($src1$$reg),
11602 //              as_FloatRegister($src2$$reg),
11603 //              as_FloatRegister($src3$$reg));
11604 //   %}
11605 
11606 //   ins_pipe(pipe_class_default);
11607 // %}
11608 
11609 // instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
11610 //   match(Set dst (AddF (MulF (NegF src1) src2) src3));
11611 //   match(Set dst (AddF (NegF (MulF src1 src2)) src3));
11612 
11613 //   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
11614 
11615 //   ins_encode %{
11616 //     __ fmsubs(as_FloatRegister($dst$$reg),
11617 //               as_FloatRegister($src1$$reg),
11618 //               as_FloatRegister($src2$$reg),
11619 //              as_FloatRegister($src3$$reg));
11620 //   %}
11621 
11622 //   ins_pipe(pipe_class_default);
11623 // %}
11624 
11625 // instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
11626 //   match(Set dst (AddD (MulD (NegD src1) src2) src3));
11627 //   match(Set dst (AddD (NegD (MulD src1 src2)) src3));
11628 
11629 //   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
11630 
11631 //   ins_encode %{
11632 //     __ fmsubd(as_FloatRegister($dst$$reg),
11633 //               as_FloatRegister($src1$$reg),
11634 //               as_FloatRegister($src2$$reg),
11635 //               as_FloatRegister($src3$$reg));
11636 //   %}
11637 
11638 //   ins_pipe(pipe_class_default);
11639 // %}
11640 
11641 // instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
11642 //   match(Set dst (SubF (MulF (NegF src1) src2) src3));
11643 //   match(Set dst (SubF (NegF (MulF src1 src2)) src3));
11644 
11645 //   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
11646 
11647 //   ins_encode %{
11648 //     __ fnmadds(as_FloatRegister($dst$$reg),
11649 //                as_FloatRegister($src1$$reg),
11650 //                as_FloatRegister($src2$$reg),
11651 //                as_FloatRegister($src3$$reg));
11652 //   %}
11653 
11654 //   ins_pipe(pipe_class_default);
11655 // %}
11656 
11657 // instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
11658 //   match(Set dst (SubD (MulD (NegD src1) src2) src3));
11659 //   match(Set dst (SubD (NegD (MulD src1 src2)) src3));
11660 
11661 //   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
11662 
11663 //   ins_encode %{
11664 //     __ fnmaddd(as_FloatRegister($dst$$reg),
11665 //                as_FloatRegister($src1$$reg),
11666 //                as_FloatRegister($src2$$reg),
11667 //                as_FloatRegister($src3$$reg));
11668 //   %}
11669 
11670 //   ins_pipe(pipe_class_default);
11671 // %}
11672 
11673 // instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
11674 //   match(Set dst (SubF (MulF src1 src2) src3));
11675 
11676 //   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
11677 
11678 //   ins_encode %{
11679 //     __ fnmsubs(as_FloatRegister($dst$$reg),
11680 //                as_FloatRegister($src1$$reg),
11681 //                as_FloatRegister($src2$$reg),
11682 //                as_FloatRegister($src3$$reg));
11683 //   %}
11684 
11685 //   ins_pipe(pipe_class_default);
11686 // %}
11687 
11688 // instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
11689 //   match(Set dst (SubD (MulD src1 src2) src3));
11690 
11691 //   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
11692 
11693 //   ins_encode %{
11694 //   // n.b. insn name should be fnmsubd
11695 //     __ fnmsub(as_FloatRegister($dst$$reg),
11696 //                as_FloatRegister($src1$$reg),
11697 //                as_FloatRegister($src2$$reg),
11698 //                as_FloatRegister($src3$$reg));
11699 //   %}
11700 
11701 //   ins_pipe(pipe_class_default);
11702 // %}
11703 
11704 
11705 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
11706   match(Set dst (DivF src1  src2));
11707 
11708   ins_cost(INSN_COST * 18);
11709   format %{ "fdivs   $dst, $src1, $src2" %}
11710 
11711   ins_encode %{
11712     __ fdivs(as_FloatRegister($dst$$reg),
11713              as_FloatRegister($src1$$reg),
11714              as_FloatRegister($src2$$reg));
11715   %}
11716 
11717   ins_pipe(pipe_class_default);
11718 %}
11719 
11720 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
11721   match(Set dst (DivD src1  src2));
11722 
11723   ins_cost(INSN_COST * 32);
11724   format %{ "fdivd   $dst, $src1, $src2" %}
11725 
11726   ins_encode %{
11727     __ fdivd(as_FloatRegister($dst$$reg),
11728              as_FloatRegister($src1$$reg),
11729              as_FloatRegister($src2$$reg));
11730   %}
11731 
11732   ins_pipe(pipe_class_default);
11733 %}
11734 
11735 instruct negF_reg_reg(vRegF dst, vRegF src) %{
11736   match(Set dst (NegF src));
11737 
11738   ins_cost(INSN_COST * 3);
11739   format %{ "fneg   $dst, $src" %}
11740 
11741   ins_encode %{
11742     __ fnegs(as_FloatRegister($dst$$reg),
11743              as_FloatRegister($src$$reg));
11744   %}
11745 
11746   ins_pipe(pipe_class_default);
11747 %}
11748 
11749 instruct negD_reg_reg(vRegD dst, vRegD src) %{
11750   match(Set dst (NegD src));
11751 
11752   ins_cost(INSN_COST * 3);
11753   format %{ "fnegd   $dst, $src" %}
11754 
11755   ins_encode %{
11756     __ fnegd(as_FloatRegister($dst$$reg),
11757              as_FloatRegister($src$$reg));
11758   %}
11759 
11760   ins_pipe(pipe_class_default);
11761 %}
11762 
11763 instruct absF_reg(vRegF dst, vRegF src) %{
11764   match(Set dst (AbsF src));
11765 
11766   ins_cost(INSN_COST * 3);
11767   format %{ "fabss   $dst, $src" %}
11768   ins_encode %{
11769     __ fabss(as_FloatRegister($dst$$reg),
11770              as_FloatRegister($src$$reg));
11771   %}
11772 
11773   ins_pipe(pipe_class_default);
11774 %}
11775 
11776 instruct absD_reg(vRegD dst, vRegD src) %{
11777   match(Set dst (AbsD src));
11778 
11779   ins_cost(INSN_COST * 3);
11780   format %{ "fabsd   $dst, $src" %}
11781   ins_encode %{
11782     __ fabsd(as_FloatRegister($dst$$reg),
11783              as_FloatRegister($src$$reg));
11784   %}
11785 
11786   ins_pipe(pipe_class_default);
11787 %}
11788 
11789 instruct sqrtD_reg(vRegD dst, vRegD src) %{
11790   match(Set dst (SqrtD src));
11791 
11792   ins_cost(INSN_COST * 50);
11793   format %{ "fsqrtd  $dst, $src" %}
11794   ins_encode %{
11795     __ fsqrtd(as_FloatRegister($dst$$reg),
11796              as_FloatRegister($src$$reg));
11797   %}
11798 
11799   ins_pipe(pipe_class_default);
11800 %}
11801 
11802 instruct sqrtF_reg(vRegF dst, vRegF src) %{
11803   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
11804 
11805   ins_cost(INSN_COST * 50);
11806   format %{ "fsqrts  $dst, $src" %}
11807   ins_encode %{
11808     __ fsqrts(as_FloatRegister($dst$$reg),
11809              as_FloatRegister($src$$reg));
11810   %}
11811 
11812   ins_pipe(pipe_class_default);
11813 %}
11814 
11815 // ============================================================================
11816 // Logical Instructions
11817 
11818 // Integer Logical Instructions
11819 
11820 // And Instructions
11821 
11822 
11823 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
11824   match(Set dst (AndI src1 src2));
11825 
11826   format %{ "andw  $dst, $src1, $src2\t# int" %}
11827 
11828   ins_cost(INSN_COST);
11829   ins_encode %{
11830     __ andw(as_Register($dst$$reg),
11831             as_Register($src1$$reg),
11832             as_Register($src2$$reg));
11833   %}
11834 
11835   ins_pipe(ialu_reg_reg);
11836 %}
11837 
11838 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
11839   match(Set dst (AndI src1 src2));
11840 
11841   format %{ "andsw  $dst, $src1, $src2\t# int" %}
11842 
11843   ins_cost(INSN_COST);
11844   ins_encode %{
11845     __ andw(as_Register($dst$$reg),
11846             as_Register($src1$$reg),
11847             (unsigned long)($src2$$constant));
11848   %}
11849 
11850   ins_pipe(ialu_reg_imm);
11851 %}
11852 
11853 // Or Instructions
11854 
11855 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11856   match(Set dst (OrI src1 src2));
11857 
11858   format %{ "orrw  $dst, $src1, $src2\t# int" %}
11859 
11860   ins_cost(INSN_COST);
11861   ins_encode %{
11862     __ orrw(as_Register($dst$$reg),
11863             as_Register($src1$$reg),
11864             as_Register($src2$$reg));
11865   %}
11866 
11867   ins_pipe(ialu_reg_reg);
11868 %}
11869 
11870 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
11871   match(Set dst (OrI src1 src2));
11872 
11873   format %{ "orrw  $dst, $src1, $src2\t# int" %}
11874 
11875   ins_cost(INSN_COST);
11876   ins_encode %{
11877     __ orrw(as_Register($dst$$reg),
11878             as_Register($src1$$reg),
11879             (unsigned long)($src2$$constant));
11880   %}
11881 
11882   ins_pipe(ialu_reg_imm);
11883 %}
11884 
11885 // Xor Instructions
11886 
11887 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11888   match(Set dst (XorI src1 src2));
11889 
11890   format %{ "eorw  $dst, $src1, $src2\t# int" %}
11891 
11892   ins_cost(INSN_COST);
11893   ins_encode %{
11894     __ eorw(as_Register($dst$$reg),
11895             as_Register($src1$$reg),
11896             as_Register($src2$$reg));
11897   %}
11898 
11899   ins_pipe(ialu_reg_reg);
11900 %}
11901 
11902 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
11903   match(Set dst (XorI src1 src2));
11904 
11905   format %{ "eorw  $dst, $src1, $src2\t# int" %}
11906 
11907   ins_cost(INSN_COST);
11908   ins_encode %{
11909     __ eorw(as_Register($dst$$reg),
11910             as_Register($src1$$reg),
11911             (unsigned long)($src2$$constant));
11912   %}
11913 
11914   ins_pipe(ialu_reg_imm);
11915 %}
11916 
11917 // Long Logical Instructions
11918 // TODO
11919 
11920 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
11921   match(Set dst (AndL src1 src2));
11922 
11923   format %{ "and  $dst, $src1, $src2\t# int" %}
11924 
11925   ins_cost(INSN_COST);
11926   ins_encode %{
11927     __ andr(as_Register($dst$$reg),
11928             as_Register($src1$$reg),
11929             as_Register($src2$$reg));
11930   %}
11931 
11932   ins_pipe(ialu_reg_reg);
11933 %}
11934 
11935 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
11936   match(Set dst (AndL src1 src2));
11937 
11938   format %{ "and  $dst, $src1, $src2\t# int" %}
11939 
11940   ins_cost(INSN_COST);
11941   ins_encode %{
11942     __ andr(as_Register($dst$$reg),
11943             as_Register($src1$$reg),
11944             (unsigned long)($src2$$constant));
11945   %}
11946 
11947   ins_pipe(ialu_reg_imm);
11948 %}
11949 
11950 // Or Instructions
11951 
11952 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11953   match(Set dst (OrL src1 src2));
11954 
11955   format %{ "orr  $dst, $src1, $src2\t# int" %}
11956 
11957   ins_cost(INSN_COST);
11958   ins_encode %{
11959     __ orr(as_Register($dst$$reg),
11960            as_Register($src1$$reg),
11961            as_Register($src2$$reg));
11962   %}
11963 
11964   ins_pipe(ialu_reg_reg);
11965 %}
11966 
11967 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
11968   match(Set dst (OrL src1 src2));
11969 
11970   format %{ "orr  $dst, $src1, $src2\t# int" %}
11971 
11972   ins_cost(INSN_COST);
11973   ins_encode %{
11974     __ orr(as_Register($dst$$reg),
11975            as_Register($src1$$reg),
11976            (unsigned long)($src2$$constant));
11977   %}
11978 
11979   ins_pipe(ialu_reg_imm);
11980 %}
11981 
11982 // Xor Instructions
11983 
11984 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11985   match(Set dst (XorL src1 src2));
11986 
11987   format %{ "eor  $dst, $src1, $src2\t# int" %}
11988 
11989   ins_cost(INSN_COST);
11990   ins_encode %{
11991     __ eor(as_Register($dst$$reg),
11992            as_Register($src1$$reg),
11993            as_Register($src2$$reg));
11994   %}
11995 
11996   ins_pipe(ialu_reg_reg);
11997 %}
11998 
11999 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12000   match(Set dst (XorL src1 src2));
12001 
12002   ins_cost(INSN_COST);
12003   format %{ "eor  $dst, $src1, $src2\t# int" %}
12004 
12005   ins_encode %{
12006     __ eor(as_Register($dst$$reg),
12007            as_Register($src1$$reg),
12008            (unsigned long)($src2$$constant));
12009   %}
12010 
12011   ins_pipe(ialu_reg_imm);
12012 %}
12013 
12014 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
12015 %{
12016   match(Set dst (ConvI2L src));
12017 
12018   ins_cost(INSN_COST);
12019   format %{ "sxtw  $dst, $src\t# i2l" %}
12020   ins_encode %{
12021     __ sbfm($dst$$Register, $src$$Register, 0, 31);
12022   %}
12023   ins_pipe(ialu_reg_shift);
12024 %}
12025 
12026 // this pattern occurs in bigmath arithmetic
12027 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
12028 %{
12029   match(Set dst (AndL (ConvI2L src) mask));
12030 
12031   ins_cost(INSN_COST);
12032   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
12033   ins_encode %{
12034     __ ubfm($dst$$Register, $src$$Register, 0, 31);
12035   %}
12036 
12037   ins_pipe(ialu_reg_shift);
12038 %}
12039 
12040 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
12041   match(Set dst (ConvL2I src));
12042 
12043   ins_cost(INSN_COST);
12044   format %{ "movw  $dst, $src \t// l2i" %}
12045 
12046   ins_encode %{
12047     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
12048   %}
12049 
12050   ins_pipe(ialu_reg);
12051 %}
12052 
12053 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
12054 %{
12055   match(Set dst (Conv2B src));
12056   effect(KILL cr);
12057 
12058   format %{
12059     "cmpw $src, zr\n\t"
12060     "cset $dst, ne"
12061   %}
12062 
12063   ins_encode %{
12064     __ cmpw(as_Register($src$$reg), zr);
12065     __ cset(as_Register($dst$$reg), Assembler::NE);
12066   %}
12067 
12068   ins_pipe(ialu_reg);
12069 %}
12070 
12071 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
12072 %{
12073   match(Set dst (Conv2B src));
12074   effect(KILL cr);
12075 
12076   format %{
12077     "cmp  $src, zr\n\t"
12078     "cset $dst, ne"
12079   %}
12080 
12081   ins_encode %{
12082     __ cmp(as_Register($src$$reg), zr);
12083     __ cset(as_Register($dst$$reg), Assembler::NE);
12084   %}
12085 
12086   ins_pipe(ialu_reg);
12087 %}
12088 
12089 instruct convD2F_reg(vRegF dst, vRegD src) %{
12090   match(Set dst (ConvD2F src));
12091 
12092   ins_cost(INSN_COST * 5);
12093   format %{ "fcvtd  $dst, $src \t// d2f" %}
12094 
12095   ins_encode %{
12096     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
12097   %}
12098 
12099   ins_pipe(pipe_class_default);
12100 %}
12101 
12102 instruct convF2D_reg(vRegD dst, vRegF src) %{
12103   match(Set dst (ConvF2D src));
12104 
12105   ins_cost(INSN_COST * 5);
12106   format %{ "fcvts  $dst, $src \t// f2d" %}
12107 
12108   ins_encode %{
12109     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
12110   %}
12111 
12112   ins_pipe(pipe_class_default);
12113 %}
12114 
12115 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
12116   match(Set dst (ConvF2I src));
12117 
12118   ins_cost(INSN_COST * 5);
12119   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
12120 
12121   ins_encode %{
12122     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12123   %}
12124 
12125   ins_pipe(pipe_class_default);
12126 %}
12127 
12128 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
12129   match(Set dst (ConvF2L src));
12130 
12131   ins_cost(INSN_COST * 5);
12132   format %{ "fcvtzs  $dst, $src \t// f2l" %}
12133 
12134   ins_encode %{
12135     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12136   %}
12137 
12138   ins_pipe(pipe_class_default);
12139 %}
12140 
12141 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
12142   match(Set dst (ConvI2F src));
12143 
12144   ins_cost(INSN_COST * 5);
12145   format %{ "scvtfws  $dst, $src \t// i2f" %}
12146 
12147   ins_encode %{
12148     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12149   %}
12150 
12151   ins_pipe(pipe_class_default);
12152 %}
12153 
12154 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
12155   match(Set dst (ConvL2F src));
12156 
12157   ins_cost(INSN_COST * 5);
12158   format %{ "scvtfs  $dst, $src \t// l2f" %}
12159 
12160   ins_encode %{
12161     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12162   %}
12163 
12164   ins_pipe(pipe_class_default);
12165 %}
12166 
12167 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
12168   match(Set dst (ConvD2I src));
12169 
12170   ins_cost(INSN_COST * 5);
12171   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
12172 
12173   ins_encode %{
12174     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12175   %}
12176 
12177   ins_pipe(pipe_class_default);
12178 %}
12179 
12180 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
12181   match(Set dst (ConvD2L src));
12182 
12183   ins_cost(INSN_COST * 5);
12184   format %{ "fcvtzd  $dst, $src \t// d2l" %}
12185 
12186   ins_encode %{
12187     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12188   %}
12189 
12190   ins_pipe(pipe_class_default);
12191 %}
12192 
12193 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
12194   match(Set dst (ConvI2D src));
12195 
12196   ins_cost(INSN_COST * 5);
12197   format %{ "scvtfwd  $dst, $src \t// i2d" %}
12198 
12199   ins_encode %{
12200     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12201   %}
12202 
12203   ins_pipe(pipe_class_default);
12204 %}
12205 
12206 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
12207   match(Set dst (ConvL2D src));
12208 
12209   ins_cost(INSN_COST * 5);
12210   format %{ "scvtfd  $dst, $src \t// l2d" %}
12211 
12212   ins_encode %{
12213     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12214   %}
12215 
12216   ins_pipe(pipe_class_default);
12217 %}
12218 
12219 // stack <-> reg and reg <-> reg shuffles with no conversion
12220 
12221 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
12222 
12223   match(Set dst (MoveF2I src));
12224 
12225   effect(DEF dst, USE src);
12226 
12227   ins_cost(4 * INSN_COST);
12228 
12229   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
12230 
12231   ins_encode %{
12232     __ ldrw($dst$$Register, Address(sp, $src$$disp));
12233   %}
12234 
12235   ins_pipe(iload_reg_reg);
12236 
12237 %}
12238 
12239 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
12240 
12241   match(Set dst (MoveI2F src));
12242 
12243   effect(DEF dst, USE src);
12244 
12245   ins_cost(4 * INSN_COST);
12246 
12247   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
12248 
12249   ins_encode %{
12250     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
12251   %}
12252 
12253   ins_pipe(pipe_class_memory);
12254 
12255 %}
12256 
12257 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
12258 
12259   match(Set dst (MoveD2L src));
12260 
12261   effect(DEF dst, USE src);
12262 
12263   ins_cost(4 * INSN_COST);
12264 
12265   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
12266 
12267   ins_encode %{
12268     __ ldr($dst$$Register, Address(sp, $src$$disp));
12269   %}
12270 
12271   ins_pipe(iload_reg_reg);
12272 
12273 %}
12274 
12275 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
12276 
12277   match(Set dst (MoveL2D src));
12278 
12279   effect(DEF dst, USE src);
12280 
12281   ins_cost(4 * INSN_COST);
12282 
12283   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
12284 
12285   ins_encode %{
12286     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
12287   %}
12288 
12289   ins_pipe(pipe_class_memory);
12290 
12291 %}
12292 
12293 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
12294 
12295   match(Set dst (MoveF2I src));
12296 
12297   effect(DEF dst, USE src);
12298 
12299   ins_cost(INSN_COST);
12300 
12301   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
12302 
12303   ins_encode %{
12304     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
12305   %}
12306 
12307   ins_pipe(pipe_class_memory);
12308 
12309 %}
12310 
12311 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
12312 
12313   match(Set dst (MoveI2F src));
12314 
12315   effect(DEF dst, USE src);
12316 
12317   ins_cost(INSN_COST);
12318 
12319   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
12320 
12321   ins_encode %{
12322     __ strw($src$$Register, Address(sp, $dst$$disp));
12323   %}
12324 
12325   ins_pipe(istore_reg_reg);
12326 
12327 %}
12328 
12329 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
12330 
12331   match(Set dst (MoveD2L src));
12332 
12333   effect(DEF dst, USE src);
12334 
12335   ins_cost(INSN_COST);
12336 
12337   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
12338 
12339   ins_encode %{
12340     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
12341   %}
12342 
12343   ins_pipe(pipe_class_memory);
12344 
12345 %}
12346 
12347 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
12348 
12349   match(Set dst (MoveL2D src));
12350 
12351   effect(DEF dst, USE src);
12352 
12353   ins_cost(INSN_COST);
12354 
12355   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
12356 
12357   ins_encode %{
12358     __ str($src$$Register, Address(sp, $dst$$disp));
12359   %}
12360 
12361   ins_pipe(istore_reg_reg);
12362 
12363 %}
12364 
12365 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
12366 
12367   match(Set dst (MoveF2I src));
12368 
12369   effect(DEF dst, USE src);
12370 
12371   ins_cost(INSN_COST);
12372 
12373   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
12374 
12375   ins_encode %{
12376     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
12377   %}
12378 
12379   ins_pipe(pipe_class_memory);
12380 
12381 %}
12382 
12383 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
12384 
12385   match(Set dst (MoveI2F src));
12386 
12387   effect(DEF dst, USE src);
12388 
12389   ins_cost(INSN_COST);
12390 
12391   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
12392 
12393   ins_encode %{
12394     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
12395   %}
12396 
12397   ins_pipe(pipe_class_memory);
12398 
12399 %}
12400 
12401 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
12402 
12403   match(Set dst (MoveD2L src));
12404 
12405   effect(DEF dst, USE src);
12406 
12407   ins_cost(INSN_COST);
12408 
12409   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
12410 
12411   ins_encode %{
12412     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
12413   %}
12414 
12415   ins_pipe(pipe_class_memory);
12416 
12417 %}
12418 
12419 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
12420 
12421   match(Set dst (MoveL2D src));
12422 
12423   effect(DEF dst, USE src);
12424 
12425   ins_cost(INSN_COST);
12426 
12427   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
12428 
12429   ins_encode %{
12430     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
12431   %}
12432 
12433   ins_pipe(pipe_class_memory);
12434 
12435 %}
12436 
12437 // ============================================================================
12438 // clearing of an array
12439 
12440 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
12441 %{
12442   match(Set dummy (ClearArray cnt base));
12443   effect(USE_KILL cnt, USE_KILL base);
12444 
12445   ins_cost(4 * INSN_COST);
12446   format %{ "ClearArray $cnt, $base" %}
12447 
12448   ins_encode(aarch64_enc_clear_array_reg_reg(cnt, base));
12449 
12450   ins_pipe(pipe_class_memory);
12451 %}
12452 
12453 // ============================================================================
12454 // Overflow Math Instructions
12455 
12456 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
12457 %{
12458   match(Set cr (OverflowAddI op1 op2));
12459 
12460   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
12461   ins_cost(INSN_COST);
12462   ins_encode %{
12463     __ cmnw($op1$$Register, $op2$$Register);
12464   %}
12465 
12466   ins_pipe(icmp_reg_reg);
12467 %}
12468 
12469 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
12470 %{
12471   match(Set cr (OverflowAddI op1 op2));
12472 
12473   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
12474   ins_cost(INSN_COST);
12475   ins_encode %{
12476     __ cmnw($op1$$Register, $op2$$constant);
12477   %}
12478 
12479   ins_pipe(icmp_reg_imm);
12480 %}
12481 
12482 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
12483 %{
12484   match(Set cr (OverflowAddL op1 op2));
12485 
12486   format %{ "cmn   $op1, $op2\t# overflow check long" %}
12487   ins_cost(INSN_COST);
12488   ins_encode %{
12489     __ cmn($op1$$Register, $op2$$Register);
12490   %}
12491 
12492   ins_pipe(icmp_reg_reg);
12493 %}
12494 
12495 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
12496 %{
12497   match(Set cr (OverflowAddL op1 op2));
12498 
12499   format %{ "cmn   $op1, $op2\t# overflow check long" %}
12500   ins_cost(INSN_COST);
12501   ins_encode %{
12502     __ cmn($op1$$Register, $op2$$constant);
12503   %}
12504 
12505   ins_pipe(icmp_reg_imm);
12506 %}
12507 
12508 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
12509 %{
12510   match(Set cr (OverflowSubI op1 op2));
12511 
12512   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
12513   ins_cost(INSN_COST);
12514   ins_encode %{
12515     __ cmpw($op1$$Register, $op2$$Register);
12516   %}
12517 
12518   ins_pipe(icmp_reg_reg);
12519 %}
12520 
12521 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
12522 %{
12523   match(Set cr (OverflowSubI op1 op2));
12524 
12525   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
12526   ins_cost(INSN_COST);
12527   ins_encode %{
12528     __ cmpw($op1$$Register, $op2$$constant);
12529   %}
12530 
12531   ins_pipe(icmp_reg_imm);
12532 %}
12533 
12534 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
12535 %{
12536   match(Set cr (OverflowSubL op1 op2));
12537 
12538   format %{ "cmp   $op1, $op2\t# overflow check long" %}
12539   ins_cost(INSN_COST);
12540   ins_encode %{
12541     __ cmp($op1$$Register, $op2$$Register);
12542   %}
12543 
12544   ins_pipe(icmp_reg_reg);
12545 %}
12546 
12547 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
12548 %{
12549   match(Set cr (OverflowSubL op1 op2));
12550 
12551   format %{ "cmp   $op1, $op2\t# overflow check long" %}
12552   ins_cost(INSN_COST);
12553   ins_encode %{
12554     __ cmp($op1$$Register, $op2$$constant);
12555   %}
12556 
12557   ins_pipe(icmp_reg_imm);
12558 %}
12559 
12560 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
12561 %{
12562   match(Set cr (OverflowSubI zero op1));
12563 
12564   format %{ "cmpw  zr, $op1\t# overflow check int" %}
12565   ins_cost(INSN_COST);
12566   ins_encode %{
12567     __ cmpw(zr, $op1$$Register);
12568   %}
12569 
12570   ins_pipe(icmp_reg_imm);
12571 %}
12572 
12573 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
12574 %{
12575   match(Set cr (OverflowSubL zero op1));
12576 
12577   format %{ "cmp   zr, $op1\t# overflow check long" %}
12578   ins_cost(INSN_COST);
12579   ins_encode %{
12580     __ cmp(zr, $op1$$Register);
12581   %}
12582 
12583   ins_pipe(icmp_reg_imm);
12584 %}
12585 
12586 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
12587 %{
12588   match(Set cr (OverflowMulI op1 op2));
12589 
12590   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
12591             "cmp   rscratch1, rscratch1, sxtw\n\t"
12592             "movw  rscratch1, #0x80000000\n\t"
12593             "cselw rscratch1, rscratch1, zr, NE\n\t"
12594             "cmpw  rscratch1, #1" %}
12595   ins_cost(5 * INSN_COST);
12596   ins_encode %{
12597     __ smull(rscratch1, $op1$$Register, $op2$$Register);
12598     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
12599     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
12600     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
12601     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
12602   %}
12603 
12604   ins_pipe(pipe_slow);
12605 %}
12606 
12607 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
12608 %{
12609   match(If cmp (OverflowMulI op1 op2));
12610   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
12611             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
12612   effect(USE labl, KILL cr);
12613 
12614   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
12615             "cmp   rscratch1, rscratch1, sxtw\n\t"
12616             "b$cmp   $labl" %}
12617   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
12618   ins_encode %{
12619     Label* L = $labl$$label;
12620     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
12621     __ smull(rscratch1, $op1$$Register, $op2$$Register);
12622     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
12623     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
12624   %}
12625 
12626   ins_pipe(pipe_serial);
12627 %}
12628 
12629 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
12630 %{
12631   match(Set cr (OverflowMulL op1 op2));
12632 
12633   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
12634             "smulh rscratch2, $op1, $op2\n\t"
12635             "cmp   rscratch2, rscratch1, ASR #31\n\t"
12636             "movw  rscratch1, #0x80000000\n\t"
12637             "cselw rscratch1, rscratch1, zr, NE\n\t"
12638             "cmpw  rscratch1, #1" %}
12639   ins_cost(6 * INSN_COST);
12640   ins_encode %{
12641     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
12642     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
12643     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
12644     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
12645     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
12646     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
12647   %}
12648 
12649   ins_pipe(pipe_slow);
12650 %}
12651 
12652 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
12653 %{
12654   match(If cmp (OverflowMulL op1 op2));
12655   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
12656             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
12657   effect(USE labl, KILL cr);
12658 
12659   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
12660             "smulh rscratch2, $op1, $op2\n\t"
12661             "cmp   rscratch2, rscratch1, ASR #31\n\t"
12662             "b$cmp $labl" %}
12663   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
12664   ins_encode %{
12665     Label* L = $labl$$label;
12666     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
12667     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
12668     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
12669     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
12670     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
12671   %}
12672 
12673   ins_pipe(pipe_serial);
12674 %}
12675 
12676 // ============================================================================
12677 // Compare Instructions
12678 
12679 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
12680 %{
12681   match(Set cr (CmpI op1 op2));
12682 
12683   effect(DEF cr, USE op1, USE op2);
12684 
12685   ins_cost(INSN_COST);
12686   format %{ "cmpw  $op1, $op2" %}
12687 
12688   ins_encode(aarch64_enc_cmpw(op1, op2));
12689 
12690   ins_pipe(icmp_reg_reg);
12691 %}
12692 
12693 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
12694 %{
12695   match(Set cr (CmpI op1 zero));
12696 
12697   effect(DEF cr, USE op1);
12698 
12699   ins_cost(INSN_COST);
12700   format %{ "cmpw $op1, 0" %}
12701 
12702   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
12703 
12704   ins_pipe(icmp_reg_imm);
12705 %}
12706 
12707 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
12708 %{
12709   match(Set cr (CmpI op1 op2));
12710 
12711   effect(DEF cr, USE op1);
12712 
12713   ins_cost(INSN_COST);
12714   format %{ "cmpw  $op1, $op2" %}
12715 
12716   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
12717 
12718   ins_pipe(icmp_reg_imm);
12719 %}
12720 
12721 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
12722 %{
12723   match(Set cr (CmpI op1 op2));
12724 
12725   effect(DEF cr, USE op1);
12726 
12727   ins_cost(INSN_COST * 2);
12728   format %{ "cmpw  $op1, $op2" %}
12729 
12730   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
12731 
12732   ins_pipe(icmp_reg_imm);
12733 %}
12734 
12735 // Unsigned compare Instructions; really, same as signed compare
12736 // except it should only be used to feed an If or a CMovI which takes a
12737 // cmpOpU.
12738 
12739 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
12740 %{
12741   match(Set cr (CmpU op1 op2));
12742 
12743   effect(DEF cr, USE op1, USE op2);
12744 
12745   ins_cost(INSN_COST);
12746   format %{ "cmpw  $op1, $op2\t# unsigned" %}
12747 
12748   ins_encode(aarch64_enc_cmpw(op1, op2));
12749 
12750   ins_pipe(icmp_reg_reg);
12751 %}
12752 
12753 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
12754 %{
12755   match(Set cr (CmpU op1 zero));
12756 
12757   effect(DEF cr, USE op1);
12758 
12759   ins_cost(INSN_COST);
12760   format %{ "cmpw $op1, #0\t# unsigned" %}
12761 
12762   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
12763 
12764   ins_pipe(icmp_reg_imm);
12765 %}
12766 
12767 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
12768 %{
12769   match(Set cr (CmpU op1 op2));
12770 
12771   effect(DEF cr, USE op1);
12772 
12773   ins_cost(INSN_COST);
12774   format %{ "cmpw  $op1, $op2\t# unsigned" %}
12775 
12776   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
12777 
12778   ins_pipe(icmp_reg_imm);
12779 %}
12780 
12781 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
12782 %{
12783   match(Set cr (CmpU op1 op2));
12784 
12785   effect(DEF cr, USE op1);
12786 
12787   ins_cost(INSN_COST * 2);
12788   format %{ "cmpw  $op1, $op2\t# unsigned" %}
12789 
12790   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
12791 
12792   ins_pipe(icmp_reg_imm);
12793 %}
12794 
12795 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
12796 %{
12797   match(Set cr (CmpL op1 op2));
12798 
12799   effect(DEF cr, USE op1, USE op2);
12800 
12801   ins_cost(INSN_COST);
12802   format %{ "cmp  $op1, $op2" %}
12803 
12804   ins_encode(aarch64_enc_cmp(op1, op2));
12805 
12806   ins_pipe(icmp_reg_reg);
12807 %}
12808 
12809 instruct compL_reg_immI0(rFlagsReg cr, iRegL op1, immI0 zero)
12810 %{
12811   match(Set cr (CmpL op1 zero));
12812 
12813   effect(DEF cr, USE op1);
12814 
12815   ins_cost(INSN_COST);
12816   format %{ "tst  $op1" %}
12817 
12818   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
12819 
12820   ins_pipe(icmp_reg_imm);
12821 %}
12822 
12823 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
12824 %{
12825   match(Set cr (CmpL op1 op2));
12826 
12827   effect(DEF cr, USE op1);
12828 
12829   ins_cost(INSN_COST);
12830   format %{ "cmp  $op1, $op2" %}
12831 
12832   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
12833 
12834   ins_pipe(icmp_reg_imm);
12835 %}
12836 
12837 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
12838 %{
12839   match(Set cr (CmpL op1 op2));
12840 
12841   effect(DEF cr, USE op1);
12842 
12843   ins_cost(INSN_COST * 2);
12844   format %{ "cmp  $op1, $op2" %}
12845 
12846   ins_encode(aarch64_enc_cmp_imm(op1, op2));
12847 
12848   ins_pipe(icmp_reg_imm);
12849 %}
12850 
12851 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
12852 %{
12853   match(Set cr (CmpP op1 op2));
12854 
12855   effect(DEF cr, USE op1, USE op2);
12856 
12857   ins_cost(INSN_COST);
12858   format %{ "cmp  $op1, $op2\t // ptr" %}
12859 
12860   ins_encode(aarch64_enc_cmpp(op1, op2));
12861 
12862   ins_pipe(icmp_reg_reg);
12863 %}
12864 
12865 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
12866 %{
12867   match(Set cr (CmpN op1 op2));
12868 
12869   effect(DEF cr, USE op1, USE op2);
12870 
12871   ins_cost(INSN_COST);
12872   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
12873 
12874   ins_encode(aarch64_enc_cmpn(op1, op2));
12875 
12876   ins_pipe(icmp_reg_reg);
12877 %}
12878 
12879 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
12880 %{
12881   match(Set cr (CmpP op1 zero));
12882 
12883   effect(DEF cr, USE op1, USE zero);
12884 
12885   ins_cost(INSN_COST);
12886   format %{ "cmp  $op1, 0\t // ptr" %}
12887 
12888   ins_encode(aarch64_enc_testp(op1));
12889 
12890   ins_pipe(icmp_reg_imm);
12891 %}
12892 
12893 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
12894 %{
12895   match(Set cr (CmpN op1 zero));
12896 
12897   effect(DEF cr, USE op1, USE zero);
12898 
12899   ins_cost(INSN_COST);
12900   format %{ "cmp  $op1, 0\t // compressed ptr" %}
12901 
12902   ins_encode(aarch64_enc_testn(op1));
12903 
12904   ins_pipe(icmp_reg_imm);
12905 %}
12906 
12907 // FP comparisons
12908 //
12909 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
12910 // using normal cmpOp. See declaration of rFlagsReg for details.
12911 
12912 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
12913 %{
12914   match(Set cr (CmpF src1 src2));
12915 
12916   ins_cost(3 * INSN_COST);
12917   format %{ "fcmps $src1, $src2" %}
12918 
12919   ins_encode %{
12920     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
12921   %}
12922 
12923   ins_pipe(pipe_class_compare);
12924 %}
12925 
12926 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
12927 %{
12928   match(Set cr (CmpF src1 src2));
12929 
12930   ins_cost(3 * INSN_COST);
12931   format %{ "fcmps $src1, 0.0" %}
12932 
12933   ins_encode %{
12934     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
12935   %}
12936 
12937   ins_pipe(pipe_class_compare);
12938 %}
12939 // FROM HERE
12940 
12941 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
12942 %{
12943   match(Set cr (CmpD src1 src2));
12944 
12945   ins_cost(3 * INSN_COST);
12946   format %{ "fcmpd $src1, $src2" %}
12947 
12948   ins_encode %{
12949     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
12950   %}
12951 
12952   ins_pipe(pipe_class_compare);
12953 %}
12954 
12955 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
12956 %{
12957   match(Set cr (CmpD src1 src2));
12958 
12959   ins_cost(3 * INSN_COST);
12960   format %{ "fcmpd $src1, 0.0" %}
12961 
12962   ins_encode %{
12963     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
12964   %}
12965 
12966   ins_pipe(pipe_class_compare);
12967 %}
12968 
12969 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
12970 %{
12971   match(Set dst (CmpF3 src1 src2));
12972   effect(KILL cr);
12973 
12974   ins_cost(5 * INSN_COST);
12975   format %{ "fcmps $src1, $src2\n\t"
12976             "csinvw($dst, zr, zr, eq\n\t"
12977             "csnegw($dst, $dst, $dst, lt)"
12978   %}
12979 
12980   ins_encode %{
12981     Label done;
12982     FloatRegister s1 = as_FloatRegister($src1$$reg);
12983     FloatRegister s2 = as_FloatRegister($src2$$reg);
12984     Register d = as_Register($dst$$reg);
12985     __ fcmps(s1, s2);
12986     // installs 0 if EQ else -1
12987     __ csinvw(d, zr, zr, Assembler::EQ);
12988     // keeps -1 if less or unordered else installs 1
12989     __ csnegw(d, d, d, Assembler::LT);
12990     __ bind(done);
12991   %}
12992 
12993   ins_pipe(pipe_class_default);
12994 
12995 %}
12996 
12997 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
12998 %{
12999   match(Set dst (CmpD3 src1 src2));
13000   effect(KILL cr);
13001 
13002   ins_cost(5 * INSN_COST);
13003   format %{ "fcmpd $src1, $src2\n\t"
13004             "csinvw($dst, zr, zr, eq\n\t"
13005             "csnegw($dst, $dst, $dst, lt)"
13006   %}
13007 
13008   ins_encode %{
13009     Label done;
13010     FloatRegister s1 = as_FloatRegister($src1$$reg);
13011     FloatRegister s2 = as_FloatRegister($src2$$reg);
13012     Register d = as_Register($dst$$reg);
13013     __ fcmpd(s1, s2);
13014     // installs 0 if EQ else -1
13015     __ csinvw(d, zr, zr, Assembler::EQ);
13016     // keeps -1 if less or unordered else installs 1
13017     __ csnegw(d, d, d, Assembler::LT);
13018     __ bind(done);
13019   %}
13020   ins_pipe(pipe_class_default);
13021 
13022 %}
13023 
13024 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
13025 %{
13026   match(Set dst (CmpF3 src1 zero));
13027   effect(KILL cr);
13028 
13029   ins_cost(5 * INSN_COST);
13030   format %{ "fcmps $src1, 0.0\n\t"
13031             "csinvw($dst, zr, zr, eq\n\t"
13032             "csnegw($dst, $dst, $dst, lt)"
13033   %}
13034 
13035   ins_encode %{
13036     Label done;
13037     FloatRegister s1 = as_FloatRegister($src1$$reg);
13038     Register d = as_Register($dst$$reg);
13039     __ fcmps(s1, 0.0D);
13040     // installs 0 if EQ else -1
13041     __ csinvw(d, zr, zr, Assembler::EQ);
13042     // keeps -1 if less or unordered else installs 1
13043     __ csnegw(d, d, d, Assembler::LT);
13044     __ bind(done);
13045   %}
13046 
13047   ins_pipe(pipe_class_default);
13048 
13049 %}
13050 
13051 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
13052 %{
13053   match(Set dst (CmpD3 src1 zero));
13054   effect(KILL cr);
13055 
13056   ins_cost(5 * INSN_COST);
13057   format %{ "fcmpd $src1, 0.0\n\t"
13058             "csinvw($dst, zr, zr, eq\n\t"
13059             "csnegw($dst, $dst, $dst, lt)"
13060   %}
13061 
13062   ins_encode %{
13063     Label done;
13064     FloatRegister s1 = as_FloatRegister($src1$$reg);
13065     Register d = as_Register($dst$$reg);
13066     __ fcmpd(s1, 0.0D);
13067     // installs 0 if EQ else -1
13068     __ csinvw(d, zr, zr, Assembler::EQ);
13069     // keeps -1 if less or unordered else installs 1
13070     __ csnegw(d, d, d, Assembler::LT);
13071     __ bind(done);
13072   %}
13073   ins_pipe(pipe_class_default);
13074 
13075 %}
13076 
13077 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
13078 %{
13079   match(Set dst (CmpLTMask p q));
13080   effect(KILL cr);
13081 
13082   ins_cost(3 * INSN_COST);
13083 
13084   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
13085             "csetw $dst, lt\n\t"
13086             "subw $dst, zr, $dst"
13087   %}
13088 
13089   ins_encode %{
13090     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
13091     __ csetw(as_Register($dst$$reg), Assembler::LT);
13092     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
13093   %}
13094 
13095   ins_pipe(ialu_reg_reg);
13096 %}
13097 
13098 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
13099 %{
13100   match(Set dst (CmpLTMask src zero));
13101   effect(KILL cr);
13102 
13103   ins_cost(INSN_COST);
13104 
13105   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
13106 
13107   ins_encode %{
13108     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
13109   %}
13110 
13111   ins_pipe(ialu_reg_shift);
13112 %}
13113 
13114 // ============================================================================
13115 // Max and Min
13116 
13117 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13118 %{
13119   match(Set dst (MinI src1 src2));
13120 
13121   effect(DEF dst, USE src1, USE src2, KILL cr);
13122   size(8);
13123 
13124   ins_cost(INSN_COST * 3);
13125   format %{
13126     "cmpw $src1 $src2\t signed int\n\t"
13127     "cselw $dst, $src1, $src2 lt\t"
13128   %}
13129 
13130   ins_encode %{
13131     __ cmpw(as_Register($src1$$reg),
13132             as_Register($src2$$reg));
13133     __ cselw(as_Register($dst$$reg),
13134              as_Register($src1$$reg),
13135              as_Register($src2$$reg),
13136              Assembler::LT);
13137   %}
13138 
13139   ins_pipe(ialu_reg_reg);
13140 %}
13141 // FROM HERE
13142 
13143 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13144 %{
13145   match(Set dst (MaxI src1 src2));
13146 
13147   effect(DEF dst, USE src1, USE src2, KILL cr);
13148   size(8);
13149 
13150   ins_cost(INSN_COST * 3);
13151   format %{
13152     "cmpw $src1 $src2\t signed int\n\t"
13153     "cselw $dst, $src1, $src2 gt\t"
13154   %}
13155 
13156   ins_encode %{
13157     __ cmpw(as_Register($src1$$reg),
13158             as_Register($src2$$reg));
13159     __ cselw(as_Register($dst$$reg),
13160              as_Register($src1$$reg),
13161              as_Register($src2$$reg),
13162              Assembler::GT);
13163   %}
13164 
13165   ins_pipe(ialu_reg_reg);
13166 %}
13167 
13168 // ============================================================================
13169 // Branch Instructions
13170 
13171 // Direct Branch.
13172 instruct branch(label lbl)
13173 %{
13174   match(Goto);
13175 
13176   effect(USE lbl);
13177 
13178   ins_cost(BRANCH_COST);
13179   format %{ "b  $lbl" %}
13180 
13181   ins_encode(aarch64_enc_b(lbl));
13182 
13183   ins_pipe(pipe_branch);
13184 %}
13185 
13186 // Conditional Near Branch
13187 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
13188 %{
13189   // Same match rule as `branchConFar'.
13190   match(If cmp cr);
13191 
13192   effect(USE lbl);
13193 
13194   ins_cost(BRANCH_COST);
13195   // If set to 1 this indicates that the current instruction is a
13196   // short variant of a long branch. This avoids using this
13197   // instruction in first-pass matching. It will then only be used in
13198   // the `Shorten_branches' pass.
13199   // ins_short_branch(1);
13200   format %{ "b$cmp  $lbl" %}
13201 
13202   ins_encode(aarch64_enc_br_con(cmp, lbl));
13203 
13204   ins_pipe(pipe_branch_cond);
13205 %}
13206 
13207 // Conditional Near Branch Unsigned
13208 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
13209 %{
13210   // Same match rule as `branchConFar'.
13211   match(If cmp cr);
13212 
13213   effect(USE lbl);
13214 
13215   ins_cost(BRANCH_COST);
13216   // If set to 1 this indicates that the current instruction is a
13217   // short variant of a long branch. This avoids using this
13218   // instruction in first-pass matching. It will then only be used in
13219   // the `Shorten_branches' pass.
13220   // ins_short_branch(1);
13221   format %{ "b$cmp  $lbl\t# unsigned" %}
13222 
13223   ins_encode(aarch64_enc_br_conU(cmp, lbl));
13224 
13225   ins_pipe(pipe_branch_cond);
13226 %}
13227 
13228 // Make use of CBZ and CBNZ.  These instructions, as well as being
13229 // shorter than (cmp; branch), have the additional benefit of not
13230 // killing the flags.
13231 
13232 instruct cmpI_imm0_branch(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
13233   match(If cmp (CmpI op1 op2));
13234   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
13235             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
13236   effect(USE labl);
13237 
13238   ins_cost(BRANCH_COST);
13239   format %{ "cbw$cmp   $op1, $labl" %}
13240   ins_encode %{
13241     Label* L = $labl$$label;
13242     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13243     if (cond == Assembler::EQ)
13244       __ cbzw($op1$$Register, *L);
13245     else
13246       __ cbnzw($op1$$Register, *L);
13247   %}
13248   ins_pipe(pipe_cmp_branch);
13249 %}
13250 
13251 instruct cmpL_imm0_branch(cmpOp cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
13252   match(If cmp (CmpL op1 op2));
13253   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
13254             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
13255   effect(USE labl);
13256 
13257   ins_cost(BRANCH_COST);
13258   format %{ "cb$cmp   $op1, $labl" %}
13259   ins_encode %{
13260     Label* L = $labl$$label;
13261     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13262     if (cond == Assembler::EQ)
13263       __ cbz($op1$$Register, *L);
13264     else
13265       __ cbnz($op1$$Register, *L);
13266   %}
13267   ins_pipe(pipe_cmp_branch);
13268 %}
13269 
13270 instruct cmpP_imm0_branch(cmpOp cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
13271   match(If cmp (CmpP op1 op2));
13272   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
13273             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
13274   effect(USE labl);
13275 
13276   ins_cost(BRANCH_COST);
13277   format %{ "cb$cmp   $op1, $labl" %}
13278   ins_encode %{
13279     Label* L = $labl$$label;
13280     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13281     if (cond == Assembler::EQ)
13282       __ cbz($op1$$Register, *L);
13283     else
13284       __ cbnz($op1$$Register, *L);
13285   %}
13286   ins_pipe(pipe_cmp_branch);
13287 %}
13288 
13289 // Conditional Far Branch
13290 // Conditional Far Branch Unsigned
13291 // TODO: fixme
13292 
13293 // counted loop end branch near
13294 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
13295 %{
13296   match(CountedLoopEnd cmp cr);
13297 
13298   effect(USE lbl);
13299 
13300   ins_cost(BRANCH_COST);
13301   // short variant.
13302   // ins_short_branch(1);
13303   format %{ "b$cmp $lbl \t// counted loop end" %}
13304 
13305   ins_encode(aarch64_enc_br_con(cmp, lbl));
13306 
13307   ins_pipe(pipe_branch);
13308 %}
13309 
13310 // counted loop end branch near Unsigned
13311 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
13312 %{
13313   match(CountedLoopEnd cmp cr);
13314 
13315   effect(USE lbl);
13316 
13317   ins_cost(BRANCH_COST);
13318   // short variant.
13319   // ins_short_branch(1);
13320   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
13321 
13322   ins_encode(aarch64_enc_br_conU(cmp, lbl));
13323 
13324   ins_pipe(pipe_branch);
13325 %}
13326 
13327 // counted loop end branch far
13328 // counted loop end branch far unsigned
13329 // TODO: fixme
13330 
13331 // ============================================================================
13332 // inlined locking and unlocking
13333 
13334 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
13335 %{
13336   match(Set cr (FastLock object box));
13337   effect(TEMP tmp, TEMP tmp2);
13338 
13339   // TODO
13340   // identify correct cost
13341   ins_cost(5 * INSN_COST);
13342   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
13343 
13344   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
13345 
13346   ins_pipe(pipe_serial);
13347 %}
13348 
13349 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
13350 %{
13351   match(Set cr (FastUnlock object box));
13352   effect(TEMP tmp, TEMP tmp2);
13353 
13354   ins_cost(5 * INSN_COST);
13355   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
13356 
13357   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
13358 
13359   ins_pipe(pipe_serial);
13360 %}
13361 
13362 
13363 // ============================================================================
13364 // Safepoint Instructions
13365 
13366 // TODO
13367 // provide a near and far version of this code
13368 
13369 instruct safePoint(iRegP poll)
13370 %{
13371   match(SafePoint poll);
13372 
13373   format %{
13374     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
13375   %}
13376   ins_encode %{
13377     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
13378   %}
13379   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
13380 %}
13381 
13382 
13383 // ============================================================================
13384 // Procedure Call/Return Instructions
13385 
13386 // Call Java Static Instruction
13387 
13388 instruct CallStaticJavaDirect(method meth)
13389 %{
13390   match(CallStaticJava);
13391 
13392   effect(USE meth);
13393 
13394   ins_cost(CALL_COST);
13395 
13396   format %{ "call,static $meth \t// ==> " %}
13397 
13398   ins_encode( aarch64_enc_java_static_call(meth),
13399               aarch64_enc_call_epilog );
13400 
13401   ins_pipe(pipe_class_call);
13402 %}
13403 
13404 // TO HERE
13405 
13406 // Call Java Dynamic Instruction
13407 instruct CallDynamicJavaDirect(method meth)
13408 %{
13409   match(CallDynamicJava);
13410 
13411   effect(USE meth);
13412 
13413   ins_cost(CALL_COST);
13414 
13415   format %{ "CALL,dynamic $meth \t// ==> " %}
13416 
13417   ins_encode( aarch64_enc_java_dynamic_call(meth),
13418                aarch64_enc_call_epilog );
13419 
13420   ins_pipe(pipe_class_call);
13421 %}
13422 
13423 // Call Runtime Instruction
13424 
13425 instruct CallRuntimeDirect(method meth)
13426 %{
13427   match(CallRuntime);
13428 
13429   effect(USE meth);
13430 
13431   ins_cost(CALL_COST);
13432 
13433   format %{ "CALL, runtime $meth" %}
13434 
13435   ins_encode( aarch64_enc_java_to_runtime(meth) );
13436 
13437   ins_pipe(pipe_class_call);
13438 %}
13439 
13440 // Call Runtime Instruction
13441 
13442 instruct CallLeafDirect(method meth)
13443 %{
13444   match(CallLeaf);
13445 
13446   effect(USE meth);
13447 
13448   ins_cost(CALL_COST);
13449 
13450   format %{ "CALL, runtime leaf $meth" %}
13451 
13452   ins_encode( aarch64_enc_java_to_runtime(meth) );
13453 
13454   ins_pipe(pipe_class_call);
13455 %}
13456 
13457 // Call Runtime Instruction
13458 
13459 instruct CallLeafNoFPDirect(method meth)
13460 %{
13461   match(CallLeafNoFP);
13462 
13463   effect(USE meth);
13464 
13465   ins_cost(CALL_COST);
13466 
13467   format %{ "CALL, runtime leaf nofp $meth" %}
13468 
13469   ins_encode( aarch64_enc_java_to_runtime(meth) );
13470 
13471   ins_pipe(pipe_class_call);
13472 %}
13473 
13474 // Tail Call; Jump from runtime stub to Java code.
13475 // Also known as an 'interprocedural jump'.
13476 // Target of jump will eventually return to caller.
13477 // TailJump below removes the return address.
13478 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
13479 %{
13480   match(TailCall jump_target method_oop);
13481 
13482   ins_cost(CALL_COST);
13483 
13484   format %{ "br $jump_target\t# $method_oop holds method oop" %}
13485 
13486   ins_encode(aarch64_enc_tail_call(jump_target));
13487 
13488   ins_pipe(pipe_class_call);
13489 %}
13490 
13491 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
13492 %{
13493   match(TailJump jump_target ex_oop);
13494 
13495   ins_cost(CALL_COST);
13496 
13497   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
13498 
13499   ins_encode(aarch64_enc_tail_jmp(jump_target));
13500 
13501   ins_pipe(pipe_class_call);
13502 %}
13503 
13504 // Create exception oop: created by stack-crawling runtime code.
13505 // Created exception is now available to this handler, and is setup
13506 // just prior to jumping to this handler. No code emitted.
13507 // TODO check
13508 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
13509 instruct CreateException(iRegP_R0 ex_oop)
13510 %{
13511   match(Set ex_oop (CreateEx));
13512 
13513   format %{ " -- \t// exception oop; no code emitted" %}
13514 
13515   size(0);
13516 
13517   ins_encode( /*empty*/ );
13518 
13519   ins_pipe(pipe_class_empty);
13520 %}
13521 
13522 // Rethrow exception: The exception oop will come in the first
13523 // argument position. Then JUMP (not call) to the rethrow stub code.
13524 instruct RethrowException() %{
13525   match(Rethrow);
13526   ins_cost(CALL_COST);
13527 
13528   format %{ "b rethrow_stub" %}
13529 
13530   ins_encode( aarch64_enc_rethrow() );
13531 
13532   ins_pipe(pipe_class_call);
13533 %}
13534 
13535 
13536 // Return Instruction
13537 // epilog node loads ret address into lr as part of frame pop
13538 instruct Ret()
13539 %{
13540   match(Return);
13541 
13542   format %{ "ret\t// return register" %}
13543 
13544   ins_encode( aarch64_enc_ret() );
13545 
13546   ins_pipe(pipe_branch);
13547 %}
13548 
13549 // Die now.
13550 instruct ShouldNotReachHere() %{
13551   match(Halt);
13552 
13553   ins_cost(CALL_COST);
13554   format %{ "ShouldNotReachHere" %}
13555 
13556   ins_encode %{
13557     // TODO
13558     // implement proper trap call here
13559     __ brk(999);
13560   %}
13561 
13562   ins_pipe(pipe_class_default);
13563 %}
13564 
13565 // ============================================================================
13566 // Partial Subtype Check
13567 //
13568 // superklass array for an instance of the superklass.  Set a hidden
13569 // internal cache on a hit (cache is checked with exposed code in
13570 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
13571 // encoding ALSO sets flags.
13572 
13573 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
13574 %{
13575   match(Set result (PartialSubtypeCheck sub super));
13576   effect(KILL cr, KILL temp);
13577 
13578   ins_cost(1100);  // slightly larger than the next version
13579   format %{ "partialSubtypeCheck $result, $sub, $super" %}
13580 
13581   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
13582 
13583   opcode(0x1); // Force zero of result reg on hit
13584 
13585   ins_pipe(pipe_class_memory);
13586 %}
13587 
13588 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
13589 %{
13590   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
13591   effect(KILL temp, KILL result);
13592 
13593   ins_cost(1100);  // slightly larger than the next version
13594   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
13595 
13596   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
13597 
13598   opcode(0x0); // Don't zero result reg on hit
13599 
13600   ins_pipe(pipe_class_memory);
13601 %}
13602 
13603 instruct string_compare(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
13604                         iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
13605 %{
13606   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
13607   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
13608 
13609   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
13610   ins_encode %{
13611     __ string_compare($str1$$Register, $str2$$Register,
13612                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
13613                       $tmp1$$Register);
13614   %}
13615   ins_pipe(pipe_class_memory);
13616 %}
13617 
13618 instruct string_indexof(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
13619        iRegI_R0 result, iRegI tmp1, iRegI tmp2, iRegI tmp3, iRegI tmp4, rFlagsReg cr)
13620 %{
13621   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
13622   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
13623          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
13624   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result" %}
13625 
13626   ins_encode %{
13627     __ string_indexof($str1$$Register, $str2$$Register,
13628                       $cnt1$$Register, $cnt2$$Register,
13629                       $tmp1$$Register, $tmp2$$Register,
13630                       $tmp3$$Register, $tmp4$$Register,
13631                       -1, $result$$Register);
13632   %}
13633   ins_pipe(pipe_class_memory);
13634 %}
13635 
13636 instruct string_indexof_con(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
13637                  immI_le_4 int_cnt2, iRegI_R0 result, iRegI tmp1, iRegI tmp2,
13638                  iRegI tmp3, iRegI tmp4, rFlagsReg cr)
13639 %{
13640   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
13641   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
13642          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
13643   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result" %}
13644 
13645   ins_encode %{
13646     int icnt2 = (int)$int_cnt2$$constant;
13647     __ string_indexof($str1$$Register, $str2$$Register,
13648                       $cnt1$$Register, zr,
13649                       $tmp1$$Register, $tmp2$$Register,
13650                       $tmp3$$Register, $tmp4$$Register,
13651                       icnt2, $result$$Register);
13652   %}
13653   ins_pipe(pipe_class_memory);
13654 %}
13655 
13656 instruct string_equals(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
13657                         iRegI_R0 result, iRegP_R10 tmp, rFlagsReg cr)
13658 %{
13659   match(Set result (StrEquals (Binary str1 str2) cnt));
13660   effect(KILL tmp, USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
13661 
13662   format %{ "String Equals $str1,$str2,$cnt -> $result    // KILL $tmp" %}
13663   ins_encode %{
13664     __ string_equals($str1$$Register, $str2$$Register,
13665                       $cnt$$Register, $result$$Register,
13666                       $tmp$$Register);
13667   %}
13668   ins_pipe(pipe_class_memory);
13669 %}
13670 
13671 instruct array_equals(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
13672                       iRegP_R10 tmp, rFlagsReg cr)
13673 %{
13674   match(Set result (AryEq ary1 ary2));
13675   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
13676 
13677   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
13678   ins_encode %{
13679     __ char_arrays_equals($ary1$$Register, $ary2$$Register,
13680                           $result$$Register, $tmp$$Register);
13681   %}
13682   ins_pipe(pipe_class_memory);
13683 %}
13684 
13685 // encode char[] to byte[] in ISO_8859_1
13686 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
13687                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
13688                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
13689                           iRegI_R0 result, rFlagsReg cr)
13690 %{
13691   match(Set result (EncodeISOArray src (Binary dst len)));
13692   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
13693          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
13694 
13695   format %{ "Encode array $src,$dst,$len -> $result" %}
13696   ins_encode %{
13697     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
13698          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
13699          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
13700   %}
13701   ins_pipe( pipe_class_memory );
13702 %}
13703 
13704 // ============================================================================
13705 // This name is KNOWN by the ADLC and cannot be changed.
13706 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
13707 // for this guy.
13708 instruct tlsLoadP(thread_RegP dst)
13709 %{
13710   match(Set dst (ThreadLocal));
13711 
13712   ins_cost(0);
13713 
13714   format %{ " -- \t// $dst=Thread::current(), empty" %}
13715 
13716   size(0);
13717 
13718   ins_encode( /*empty*/ );
13719 
13720   ins_pipe(pipe_class_empty);
13721 %}
13722 
13723 // ====================VECTOR INSTRUCTIONS=====================================
13724 
13725 // Load vector (32 bits)
13726 instruct loadV4(vecD dst, vmem mem)
13727 %{
13728   predicate(n->as_LoadVector()->memory_size() == 4);
13729   match(Set dst (LoadVector mem));
13730   ins_cost(4 * INSN_COST);
13731   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
13732   ins_encode( aarch64_enc_ldrvS(dst, mem) );
13733   ins_pipe(pipe_class_memory);
13734 %}
13735 
13736 // Load vector (64 bits)
13737 instruct loadV8(vecD dst, vmem mem)
13738 %{
13739   predicate(n->as_LoadVector()->memory_size() == 8);
13740   match(Set dst (LoadVector mem));
13741   ins_cost(4 * INSN_COST);
13742   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
13743   ins_encode( aarch64_enc_ldrvD(dst, mem) );
13744   ins_pipe(pipe_class_memory);
13745 %}
13746 
13747 // Load Vector (128 bits)
13748 instruct loadV16(vecX dst, vmem mem)
13749 %{
13750   predicate(n->as_LoadVector()->memory_size() == 16);
13751   match(Set dst (LoadVector mem));
13752   ins_cost(4 * INSN_COST);
13753   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
13754   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
13755   ins_pipe(pipe_class_memory);
13756 %}
13757 
13758 // Store Vector (32 bits)
13759 instruct storeV4(vecD src, vmem mem)
13760 %{
13761   predicate(n->as_StoreVector()->memory_size() == 4);
13762   match(Set mem (StoreVector mem src));
13763   ins_cost(4 * INSN_COST);
13764   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
13765   ins_encode( aarch64_enc_strvS(src, mem) );
13766   ins_pipe(pipe_class_memory);
13767 %}
13768 
13769 // Store Vector (64 bits)
13770 instruct storeV8(vecD src, vmem mem)
13771 %{
13772   predicate(n->as_StoreVector()->memory_size() == 8);
13773   match(Set mem (StoreVector mem src));
13774   ins_cost(4 * INSN_COST);
13775   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
13776   ins_encode( aarch64_enc_strvD(src, mem) );
13777   ins_pipe(pipe_class_memory);
13778 %}
13779 
13780 // Store Vector (128 bits)
13781 instruct storeV16(vecX src, vmem mem)
13782 %{
13783   predicate(n->as_StoreVector()->memory_size() == 16);
13784   match(Set mem (StoreVector mem src));
13785   ins_cost(4 * INSN_COST);
13786   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
13787   ins_encode( aarch64_enc_strvQ(src, mem) );
13788   ins_pipe(pipe_class_memory);
13789 %}
13790 
13791 instruct replicate8B(vecD dst, iRegIorL2I src)
13792 %{
13793   predicate(n->as_Vector()->length() == 4 ||
13794             n->as_Vector()->length() == 8);
13795   match(Set dst (ReplicateB src));
13796   ins_cost(INSN_COST);
13797   format %{ "dup  $dst, $src\t# vector (8B)" %}
13798   ins_encode %{
13799     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
13800   %}
13801   ins_pipe(pipe_class_default);
13802 %}
13803 
13804 instruct replicate16B(vecX dst, iRegIorL2I src)
13805 %{
13806   predicate(n->as_Vector()->length() == 16);
13807   match(Set dst (ReplicateB src));
13808   ins_cost(INSN_COST);
13809   format %{ "dup  $dst, $src\t# vector (16B)" %}
13810   ins_encode %{
13811     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
13812   %}
13813   ins_pipe(pipe_class_default);
13814 %}
13815 
13816 instruct replicate8B_imm(vecD dst, immI con)
13817 %{
13818   predicate(n->as_Vector()->length() == 4 ||
13819             n->as_Vector()->length() == 8);
13820   match(Set dst (ReplicateB con));
13821   ins_cost(INSN_COST);
13822   format %{ "movi  $dst, $con\t# vector(8B)" %}
13823   ins_encode %{
13824     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
13825   %}
13826   ins_pipe(pipe_class_default);
13827 %}
13828 
13829 instruct replicate16B_imm(vecX dst, immI con)
13830 %{
13831   predicate(n->as_Vector()->length() == 16);
13832   match(Set dst (ReplicateB con));
13833   ins_cost(INSN_COST);
13834   format %{ "movi  $dst, $con\t# vector(16B)" %}
13835   ins_encode %{
13836     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
13837   %}
13838   ins_pipe(pipe_class_default);
13839 %}
13840 
13841 instruct replicate4S(vecD dst, iRegIorL2I src)
13842 %{
13843   predicate(n->as_Vector()->length() == 2 ||
13844             n->as_Vector()->length() == 4);
13845   match(Set dst (ReplicateS src));
13846   ins_cost(INSN_COST);
13847   format %{ "dup  $dst, $src\t# vector (4S)" %}
13848   ins_encode %{
13849     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
13850   %}
13851   ins_pipe(pipe_class_default);
13852 %}
13853 
13854 instruct replicate8S(vecX dst, iRegIorL2I src)
13855 %{
13856   predicate(n->as_Vector()->length() == 8);
13857   match(Set dst (ReplicateS src));
13858   ins_cost(INSN_COST);
13859   format %{ "dup  $dst, $src\t# vector (8S)" %}
13860   ins_encode %{
13861     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
13862   %}
13863   ins_pipe(pipe_class_default);
13864 %}
13865 
13866 instruct replicate4S_imm(vecD dst, immI con)
13867 %{
13868   predicate(n->as_Vector()->length() == 2 ||
13869             n->as_Vector()->length() == 4);
13870   match(Set dst (ReplicateS con));
13871   ins_cost(INSN_COST);
13872   format %{ "movi  $dst, $con\t# vector(4H)" %}
13873   ins_encode %{
13874     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
13875   %}
13876   ins_pipe(pipe_class_default);
13877 %}
13878 
13879 instruct replicate8S_imm(vecX dst, immI con)
13880 %{
13881   predicate(n->as_Vector()->length() == 8);
13882   match(Set dst (ReplicateS con));
13883   ins_cost(INSN_COST);
13884   format %{ "movi  $dst, $con\t# vector(8H)" %}
13885   ins_encode %{
13886     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
13887   %}
13888   ins_pipe(pipe_class_default);
13889 %}
13890 
13891 instruct replicate2I(vecD dst, iRegIorL2I src)
13892 %{
13893   predicate(n->as_Vector()->length() == 2);
13894   match(Set dst (ReplicateI src));
13895   ins_cost(INSN_COST);
13896   format %{ "dup  $dst, $src\t# vector (2I)" %}
13897   ins_encode %{
13898     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
13899   %}
13900   ins_pipe(pipe_class_default);
13901 %}
13902 
13903 instruct replicate4I(vecX dst, iRegIorL2I src)
13904 %{
13905   predicate(n->as_Vector()->length() == 4);
13906   match(Set dst (ReplicateI src));
13907   ins_cost(INSN_COST);
13908   format %{ "dup  $dst, $src\t# vector (4I)" %}
13909   ins_encode %{
13910     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
13911   %}
13912   ins_pipe(pipe_class_default);
13913 %}
13914 
13915 instruct replicate2I_imm(vecD dst, immI con)
13916 %{
13917   predicate(n->as_Vector()->length() == 2);
13918   match(Set dst (ReplicateI con));
13919   ins_cost(INSN_COST);
13920   format %{ "movi  $dst, $con\t# vector(2I)" %}
13921   ins_encode %{
13922     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
13923   %}
13924   ins_pipe(pipe_class_default);
13925 %}
13926 
13927 instruct replicate4I_imm(vecX dst, immI con)
13928 %{
13929   predicate(n->as_Vector()->length() == 4);
13930   match(Set dst (ReplicateI con));
13931   ins_cost(INSN_COST);
13932   format %{ "movi  $dst, $con\t# vector(4I)" %}
13933   ins_encode %{
13934     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
13935   %}
13936   ins_pipe(pipe_class_default);
13937 %}
13938 
13939 instruct replicate2L(vecX dst, iRegL src)
13940 %{
13941   predicate(n->as_Vector()->length() == 2);
13942   match(Set dst (ReplicateL src));
13943   ins_cost(INSN_COST);
13944   format %{ "dup  $dst, $src\t# vector (2L)" %}
13945   ins_encode %{
13946     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
13947   %}
13948   ins_pipe(pipe_class_default);
13949 %}
13950 
13951 instruct replicate2L_zero(vecX dst, immI0 zero)
13952 %{
13953   predicate(n->as_Vector()->length() == 2);
13954   match(Set dst (ReplicateI zero));
13955   ins_cost(INSN_COST);
13956   format %{ "movi  $dst, $zero\t# vector(4I)" %}
13957   ins_encode %{
13958     __ eor(as_FloatRegister($dst$$reg), __ T16B,
13959            as_FloatRegister($dst$$reg),
13960            as_FloatRegister($dst$$reg));
13961   %}
13962   ins_pipe(pipe_class_default);
13963 %}
13964 
13965 instruct replicate2F(vecD dst, vRegF src)
13966 %{
13967   predicate(n->as_Vector()->length() == 2);
13968   match(Set dst (ReplicateF src));
13969   ins_cost(INSN_COST);
13970   format %{ "dup  $dst, $src\t# vector (2F)" %}
13971   ins_encode %{
13972     __ dup(as_FloatRegister($dst$$reg), __ T2S,
13973            as_FloatRegister($src$$reg));
13974   %}
13975   ins_pipe(pipe_class_default);
13976 %}
13977 
13978 instruct replicate4F(vecX dst, vRegF src)
13979 %{
13980   predicate(n->as_Vector()->length() == 4);
13981   match(Set dst (ReplicateF src));
13982   ins_cost(INSN_COST);
13983   format %{ "dup  $dst, $src\t# vector (4F)" %}
13984   ins_encode %{
13985     __ dup(as_FloatRegister($dst$$reg), __ T4S,
13986            as_FloatRegister($src$$reg));
13987   %}
13988   ins_pipe(pipe_class_default);
13989 %}
13990 
13991 instruct replicate2D(vecX dst, vRegD src)
13992 %{
13993   predicate(n->as_Vector()->length() == 2);
13994   match(Set dst (ReplicateD src));
13995   ins_cost(INSN_COST);
13996   format %{ "dup  $dst, $src\t# vector (2D)" %}
13997   ins_encode %{
13998     __ dup(as_FloatRegister($dst$$reg), __ T2D,
13999            as_FloatRegister($src$$reg));
14000   %}
14001   ins_pipe(pipe_class_default);
14002 %}
14003 
14004 // ====================REDUCTION ARITHMETIC====================================
14005 
14006 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp, iRegI tmp2)
14007 %{
14008   match(Set dst (AddReductionVI src1 src2));
14009   ins_cost(INSN_COST);
14010   effect(TEMP tmp, TEMP tmp2);
14011   format %{ "umov  $tmp, $src2, S, 0\n\t"
14012             "umov  $tmp2, $src2, S, 1\n\t"
14013             "addw  $dst, $src1, $tmp\n\t"
14014             "addw  $dst, $dst, $tmp2\t add reduction2i"
14015   %}
14016   ins_encode %{
14017     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
14018     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
14019     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
14020     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
14021   %}
14022   ins_pipe(pipe_class_default);
14023 %}
14024 
14025 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
14026 %{
14027   match(Set dst (AddReductionVI src1 src2));
14028   ins_cost(INSN_COST);
14029   effect(TEMP tmp, TEMP tmp2);
14030   format %{ "addv  $tmp, T4S, $src2\n\t"
14031             "umov  $tmp2, $tmp, S, 0\n\t"
14032             "addw  $dst, $tmp2, $src1\t add reduction4i"
14033   %}
14034   ins_encode %{
14035     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
14036             as_FloatRegister($src2$$reg));
14037     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
14038     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
14039   %}
14040   ins_pipe(pipe_class_default);
14041 %}
14042 
14043 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp)
14044 %{
14045   match(Set dst (MulReductionVI src1 src2));
14046   ins_cost(INSN_COST);
14047   effect(TEMP tmp, TEMP dst);
14048   format %{ "umov  $tmp, $src2, S, 0\n\t"
14049             "mul   $dst, $tmp, $src1\n\t"
14050             "umov  $tmp, $src2, S, 1\n\t"
14051             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
14052   %}
14053   ins_encode %{
14054     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
14055     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
14056     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
14057     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
14058   %}
14059   ins_pipe(pipe_class_default);
14060 %}
14061 
14062 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
14063 %{
14064   match(Set dst (MulReductionVI src1 src2));
14065   ins_cost(INSN_COST);
14066   effect(TEMP tmp, TEMP tmp2, TEMP dst);
14067   format %{ "ins   $tmp, $src2, 0, 1\n\t"
14068             "mul   $tmp, $tmp, $src2\n\t"
14069             "umov  $tmp2, $tmp, S, 0\n\t"
14070             "mul   $dst, $tmp2, $src1\n\t"
14071             "umov  $tmp2, $tmp, S, 1\n\t"
14072             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
14073   %}
14074   ins_encode %{
14075     __ ins(as_FloatRegister($tmp$$reg), __ D,
14076            as_FloatRegister($src2$$reg), 0, 1);
14077     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
14078            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
14079     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
14080     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
14081     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
14082     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
14083   %}
14084   ins_pipe(pipe_class_default);
14085 %}
14086 
14087 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
14088 %{
14089   match(Set dst (AddReductionVF src1 src2));
14090   ins_cost(INSN_COST);
14091   effect(TEMP tmp, TEMP dst);
14092   format %{ "fadds $dst, $src1, $src2\n\t"
14093             "ins   $tmp, S, $src2, 0, 1\n\t"
14094             "fadds $dst, $dst, $tmp\t add reduction2f"
14095   %}
14096   ins_encode %{
14097     __ fadds(as_FloatRegister($dst$$reg),
14098              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14099     __ ins(as_FloatRegister($tmp$$reg), __ S,
14100            as_FloatRegister($src2$$reg), 0, 1);
14101     __ fadds(as_FloatRegister($dst$$reg),
14102              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14103   %}
14104   ins_pipe(pipe_class_default);
14105 %}
14106 
14107 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
14108 %{
14109   match(Set dst (AddReductionVF src1 src2));
14110   ins_cost(INSN_COST);
14111   effect(TEMP tmp, TEMP dst);
14112   format %{ "fadds $dst, $src1, $src2\n\t"
14113             "ins   $tmp, S, $src2, 0, 1\n\t"
14114             "fadds $dst, $dst, $tmp\n\t"
14115             "ins   $tmp, S, $src2, 0, 2\n\t"
14116             "fadds $dst, $dst, $tmp\n\t"
14117             "ins   $tmp, S, $src2, 0, 3\n\t"
14118             "fadds $dst, $dst, $tmp\t add reduction4f"
14119   %}
14120   ins_encode %{
14121     __ fadds(as_FloatRegister($dst$$reg),
14122              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14123     __ ins(as_FloatRegister($tmp$$reg), __ S,
14124            as_FloatRegister($src2$$reg), 0, 1);
14125     __ fadds(as_FloatRegister($dst$$reg),
14126              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14127     __ ins(as_FloatRegister($tmp$$reg), __ S,
14128            as_FloatRegister($src2$$reg), 0, 2);
14129     __ fadds(as_FloatRegister($dst$$reg),
14130              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14131     __ ins(as_FloatRegister($tmp$$reg), __ S,
14132            as_FloatRegister($src2$$reg), 0, 3);
14133     __ fadds(as_FloatRegister($dst$$reg),
14134              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14135   %}
14136   ins_pipe(pipe_class_default);
14137 %}
14138 
14139 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
14140 %{
14141   match(Set dst (MulReductionVF src1 src2));
14142   ins_cost(INSN_COST);
14143   effect(TEMP tmp, TEMP dst);
14144   format %{ "fmuls $dst, $src1, $src2\n\t"
14145             "ins   $tmp, S, $src2, 0, 1\n\t"
14146             "fmuls $dst, $dst, $tmp\t add reduction4f"
14147   %}
14148   ins_encode %{
14149     __ fmuls(as_FloatRegister($dst$$reg),
14150              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14151     __ ins(as_FloatRegister($tmp$$reg), __ S,
14152            as_FloatRegister($src2$$reg), 0, 1);
14153     __ fmuls(as_FloatRegister($dst$$reg),
14154              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14155   %}
14156   ins_pipe(pipe_class_default);
14157 %}
14158 
14159 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
14160 %{
14161   match(Set dst (MulReductionVF src1 src2));
14162   ins_cost(INSN_COST);
14163   effect(TEMP tmp, TEMP dst);
14164   format %{ "fmuls $dst, $src1, $src2\n\t"
14165             "ins   $tmp, S, $src2, 0, 1\n\t"
14166             "fmuls $dst, $dst, $tmp\n\t"
14167             "ins   $tmp, S, $src2, 0, 2\n\t"
14168             "fmuls $dst, $dst, $tmp\n\t"
14169             "ins   $tmp, S, $src2, 0, 3\n\t"
14170             "fmuls $dst, $dst, $tmp\t add reduction4f"
14171   %}
14172   ins_encode %{
14173     __ fmuls(as_FloatRegister($dst$$reg),
14174              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14175     __ ins(as_FloatRegister($tmp$$reg), __ S,
14176            as_FloatRegister($src2$$reg), 0, 1);
14177     __ fmuls(as_FloatRegister($dst$$reg),
14178              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14179     __ ins(as_FloatRegister($tmp$$reg), __ S,
14180            as_FloatRegister($src2$$reg), 0, 2);
14181     __ fmuls(as_FloatRegister($dst$$reg),
14182              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14183     __ ins(as_FloatRegister($tmp$$reg), __ S,
14184            as_FloatRegister($src2$$reg), 0, 3);
14185     __ fmuls(as_FloatRegister($dst$$reg),
14186              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14187   %}
14188   ins_pipe(pipe_class_default);
14189 %}
14190 
14191 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
14192 %{
14193   match(Set dst (AddReductionVD src1 src2));
14194   ins_cost(INSN_COST);
14195   effect(TEMP tmp, TEMP dst);
14196   format %{ "faddd $dst, $src1, $src2\n\t"
14197             "ins   $tmp, D, $src2, 0, 1\n\t"
14198             "faddd $dst, $dst, $tmp\t add reduction2d"
14199   %}
14200   ins_encode %{
14201     __ faddd(as_FloatRegister($dst$$reg),
14202              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14203     __ ins(as_FloatRegister($tmp$$reg), __ D,
14204            as_FloatRegister($src2$$reg), 0, 1);
14205     __ faddd(as_FloatRegister($dst$$reg),
14206              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14207   %}
14208   ins_pipe(pipe_class_default);
14209 %}
14210 
14211 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
14212 %{
14213   match(Set dst (MulReductionVD src1 src2));
14214   ins_cost(INSN_COST);
14215   effect(TEMP tmp, TEMP dst);
14216   format %{ "fmuld $dst, $src1, $src2\n\t"
14217             "ins   $tmp, D, $src2, 0, 1\n\t"
14218             "fmuld $dst, $dst, $tmp\t add reduction2d"
14219   %}
14220   ins_encode %{
14221     __ fmuld(as_FloatRegister($dst$$reg),
14222              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14223     __ ins(as_FloatRegister($tmp$$reg), __ D,
14224            as_FloatRegister($src2$$reg), 0, 1);
14225     __ fmuld(as_FloatRegister($dst$$reg),
14226              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14227   %}
14228   ins_pipe(pipe_class_default);
14229 %}
14230 
14231 // ====================VECTOR ARITHMETIC=======================================
14232 
14233 // --------------------------------- ADD --------------------------------------
14234 
14235 instruct vadd8B(vecD dst, vecD src1, vecD src2)
14236 %{
14237   predicate(n->as_Vector()->length() == 4 ||
14238             n->as_Vector()->length() == 8);
14239   match(Set dst (AddVB src1 src2));
14240   ins_cost(INSN_COST);
14241   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
14242   ins_encode %{
14243     __ addv(as_FloatRegister($dst$$reg), __ T8B,
14244             as_FloatRegister($src1$$reg),
14245             as_FloatRegister($src2$$reg));
14246   %}
14247   ins_pipe(pipe_class_default);
14248 %}
14249 
14250 instruct vadd16B(vecX dst, vecX src1, vecX src2)
14251 %{
14252   predicate(n->as_Vector()->length() == 16);
14253   match(Set dst (AddVB src1 src2));
14254   ins_cost(INSN_COST);
14255   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
14256   ins_encode %{
14257     __ addv(as_FloatRegister($dst$$reg), __ T16B,
14258             as_FloatRegister($src1$$reg),
14259             as_FloatRegister($src2$$reg));
14260   %}
14261   ins_pipe(pipe_class_default);
14262 %}
14263 
14264 instruct vadd4S(vecD dst, vecD src1, vecD src2)
14265 %{
14266   predicate(n->as_Vector()->length() == 2 ||
14267             n->as_Vector()->length() == 4);
14268   match(Set dst (AddVS src1 src2));
14269   ins_cost(INSN_COST);
14270   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
14271   ins_encode %{
14272     __ addv(as_FloatRegister($dst$$reg), __ T4H,
14273             as_FloatRegister($src1$$reg),
14274             as_FloatRegister($src2$$reg));
14275   %}
14276   ins_pipe(pipe_class_default);
14277 %}
14278 
14279 instruct vadd8S(vecX dst, vecX src1, vecX src2)
14280 %{
14281   predicate(n->as_Vector()->length() == 8);
14282   match(Set dst (AddVS src1 src2));
14283   ins_cost(INSN_COST);
14284   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
14285   ins_encode %{
14286     __ addv(as_FloatRegister($dst$$reg), __ T8H,
14287             as_FloatRegister($src1$$reg),
14288             as_FloatRegister($src2$$reg));
14289   %}
14290   ins_pipe(pipe_class_default);
14291 %}
14292 
14293 instruct vadd2I(vecD dst, vecD src1, vecD src2)
14294 %{
14295   predicate(n->as_Vector()->length() == 2);
14296   match(Set dst (AddVI src1 src2));
14297   ins_cost(INSN_COST);
14298   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
14299   ins_encode %{
14300     __ addv(as_FloatRegister($dst$$reg), __ T2S,
14301             as_FloatRegister($src1$$reg),
14302             as_FloatRegister($src2$$reg));
14303   %}
14304   ins_pipe(pipe_class_default);
14305 %}
14306 
14307 instruct vadd4I(vecX dst, vecX src1, vecX src2)
14308 %{
14309   predicate(n->as_Vector()->length() == 4);
14310   match(Set dst (AddVI src1 src2));
14311   ins_cost(INSN_COST);
14312   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
14313   ins_encode %{
14314     __ addv(as_FloatRegister($dst$$reg), __ T4S,
14315             as_FloatRegister($src1$$reg),
14316             as_FloatRegister($src2$$reg));
14317   %}
14318   ins_pipe(pipe_class_default);
14319 %}
14320 
14321 instruct vadd2L(vecX dst, vecX src1, vecX src2)
14322 %{
14323   predicate(n->as_Vector()->length() == 2);
14324   match(Set dst (AddVL src1 src2));
14325   ins_cost(INSN_COST);
14326   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
14327   ins_encode %{
14328     __ addv(as_FloatRegister($dst$$reg), __ T2D,
14329             as_FloatRegister($src1$$reg),
14330             as_FloatRegister($src2$$reg));
14331   %}
14332   ins_pipe(pipe_class_default);
14333 %}
14334 
14335 instruct vadd2F(vecD dst, vecD src1, vecD src2)
14336 %{
14337   predicate(n->as_Vector()->length() == 2);
14338   match(Set dst (AddVF src1 src2));
14339   ins_cost(INSN_COST);
14340   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
14341   ins_encode %{
14342     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
14343             as_FloatRegister($src1$$reg),
14344             as_FloatRegister($src2$$reg));
14345   %}
14346   ins_pipe(pipe_class_default);
14347 %}
14348 
14349 instruct vadd4F(vecX dst, vecX src1, vecX src2)
14350 %{
14351   predicate(n->as_Vector()->length() == 4);
14352   match(Set dst (AddVF src1 src2));
14353   ins_cost(INSN_COST);
14354   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
14355   ins_encode %{
14356     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
14357             as_FloatRegister($src1$$reg),
14358             as_FloatRegister($src2$$reg));
14359   %}
14360   ins_pipe(pipe_class_default);
14361 %}
14362 
14363 instruct vadd2D(vecX dst, vecX src1, vecX src2)
14364 %{
14365   match(Set dst (AddVD src1 src2));
14366   ins_cost(INSN_COST);
14367   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
14368   ins_encode %{
14369     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
14370             as_FloatRegister($src1$$reg),
14371             as_FloatRegister($src2$$reg));
14372   %}
14373   ins_pipe(pipe_class_default);
14374 %}
14375 
14376 // --------------------------------- SUB --------------------------------------
14377 
14378 instruct vsub8B(vecD dst, vecD src1, vecD src2)
14379 %{
14380   predicate(n->as_Vector()->length() == 4 ||
14381             n->as_Vector()->length() == 8);
14382   match(Set dst (SubVB src1 src2));
14383   ins_cost(INSN_COST);
14384   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
14385   ins_encode %{
14386     __ subv(as_FloatRegister($dst$$reg), __ T8B,
14387             as_FloatRegister($src1$$reg),
14388             as_FloatRegister($src2$$reg));
14389   %}
14390   ins_pipe(pipe_class_default);
14391 %}
14392 
14393 instruct vsub16B(vecX dst, vecX src1, vecX src2)
14394 %{
14395   predicate(n->as_Vector()->length() == 16);
14396   match(Set dst (SubVB src1 src2));
14397   ins_cost(INSN_COST);
14398   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
14399   ins_encode %{
14400     __ subv(as_FloatRegister($dst$$reg), __ T16B,
14401             as_FloatRegister($src1$$reg),
14402             as_FloatRegister($src2$$reg));
14403   %}
14404   ins_pipe(pipe_class_default);
14405 %}
14406 
14407 instruct vsub4S(vecD dst, vecD src1, vecD src2)
14408 %{
14409   predicate(n->as_Vector()->length() == 2 ||
14410             n->as_Vector()->length() == 4);
14411   match(Set dst (SubVS src1 src2));
14412   ins_cost(INSN_COST);
14413   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
14414   ins_encode %{
14415     __ subv(as_FloatRegister($dst$$reg), __ T4H,
14416             as_FloatRegister($src1$$reg),
14417             as_FloatRegister($src2$$reg));
14418   %}
14419   ins_pipe(pipe_class_default);
14420 %}
14421 
14422 instruct vsub8S(vecX dst, vecX src1, vecX src2)
14423 %{
14424   predicate(n->as_Vector()->length() == 8);
14425   match(Set dst (SubVS src1 src2));
14426   ins_cost(INSN_COST);
14427   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
14428   ins_encode %{
14429     __ subv(as_FloatRegister($dst$$reg), __ T8H,
14430             as_FloatRegister($src1$$reg),
14431             as_FloatRegister($src2$$reg));
14432   %}
14433   ins_pipe(pipe_class_default);
14434 %}
14435 
14436 instruct vsub2I(vecD dst, vecD src1, vecD src2)
14437 %{
14438   predicate(n->as_Vector()->length() == 2);
14439   match(Set dst (SubVI src1 src2));
14440   ins_cost(INSN_COST);
14441   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
14442   ins_encode %{
14443     __ subv(as_FloatRegister($dst$$reg), __ T2S,
14444             as_FloatRegister($src1$$reg),
14445             as_FloatRegister($src2$$reg));
14446   %}
14447   ins_pipe(pipe_class_default);
14448 %}
14449 
14450 instruct vsub4I(vecX dst, vecX src1, vecX src2)
14451 %{
14452   predicate(n->as_Vector()->length() == 4);
14453   match(Set dst (SubVI src1 src2));
14454   ins_cost(INSN_COST);
14455   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
14456   ins_encode %{
14457     __ subv(as_FloatRegister($dst$$reg), __ T4S,
14458             as_FloatRegister($src1$$reg),
14459             as_FloatRegister($src2$$reg));
14460   %}
14461   ins_pipe(pipe_class_default);
14462 %}
14463 
14464 instruct vsub2L(vecX dst, vecX src1, vecX src2)
14465 %{
14466   predicate(n->as_Vector()->length() == 2);
14467   match(Set dst (SubVL src1 src2));
14468   ins_cost(INSN_COST);
14469   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
14470   ins_encode %{
14471     __ subv(as_FloatRegister($dst$$reg), __ T2D,
14472             as_FloatRegister($src1$$reg),
14473             as_FloatRegister($src2$$reg));
14474   %}
14475   ins_pipe(pipe_class_default);
14476 %}
14477 
14478 instruct vsub2F(vecD dst, vecD src1, vecD src2)
14479 %{
14480   predicate(n->as_Vector()->length() == 2);
14481   match(Set dst (SubVF src1 src2));
14482   ins_cost(INSN_COST);
14483   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
14484   ins_encode %{
14485     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
14486             as_FloatRegister($src1$$reg),
14487             as_FloatRegister($src2$$reg));
14488   %}
14489   ins_pipe(pipe_class_default);
14490 %}
14491 
14492 instruct vsub4F(vecX dst, vecX src1, vecX src2)
14493 %{
14494   predicate(n->as_Vector()->length() == 4);
14495   match(Set dst (SubVF src1 src2));
14496   ins_cost(INSN_COST);
14497   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
14498   ins_encode %{
14499     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
14500             as_FloatRegister($src1$$reg),
14501             as_FloatRegister($src2$$reg));
14502   %}
14503   ins_pipe(pipe_class_default);
14504 %}
14505 
14506 instruct vsub2D(vecX dst, vecX src1, vecX src2)
14507 %{
14508   predicate(n->as_Vector()->length() == 2);
14509   match(Set dst (SubVD src1 src2));
14510   ins_cost(INSN_COST);
14511   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
14512   ins_encode %{
14513     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
14514             as_FloatRegister($src1$$reg),
14515             as_FloatRegister($src2$$reg));
14516   %}
14517   ins_pipe(pipe_class_default);
14518 %}
14519 
14520 // --------------------------------- MUL --------------------------------------
14521 
14522 instruct vmul4S(vecD dst, vecD src1, vecD src2)
14523 %{
14524   predicate(n->as_Vector()->length() == 2 ||
14525             n->as_Vector()->length() == 4);
14526   match(Set dst (MulVS src1 src2));
14527   ins_cost(INSN_COST);
14528   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
14529   ins_encode %{
14530     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
14531             as_FloatRegister($src1$$reg),
14532             as_FloatRegister($src2$$reg));
14533   %}
14534   ins_pipe(pipe_class_default);
14535 %}
14536 
14537 instruct vmul8S(vecX dst, vecX src1, vecX src2)
14538 %{
14539   predicate(n->as_Vector()->length() == 8);
14540   match(Set dst (MulVS src1 src2));
14541   ins_cost(INSN_COST);
14542   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
14543   ins_encode %{
14544     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
14545             as_FloatRegister($src1$$reg),
14546             as_FloatRegister($src2$$reg));
14547   %}
14548   ins_pipe(pipe_class_default);
14549 %}
14550 
14551 instruct vmul2I(vecD dst, vecD src1, vecD src2)
14552 %{
14553   predicate(n->as_Vector()->length() == 2);
14554   match(Set dst (MulVI src1 src2));
14555   ins_cost(INSN_COST);
14556   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
14557   ins_encode %{
14558     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
14559             as_FloatRegister($src1$$reg),
14560             as_FloatRegister($src2$$reg));
14561   %}
14562   ins_pipe(pipe_class_default);
14563 %}
14564 
14565 instruct vmul4I(vecX dst, vecX src1, vecX src2)
14566 %{
14567   predicate(n->as_Vector()->length() == 4);
14568   match(Set dst (MulVI src1 src2));
14569   ins_cost(INSN_COST);
14570   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
14571   ins_encode %{
14572     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
14573             as_FloatRegister($src1$$reg),
14574             as_FloatRegister($src2$$reg));
14575   %}
14576   ins_pipe(pipe_class_default);
14577 %}
14578 
14579 instruct vmul2F(vecD dst, vecD src1, vecD src2)
14580 %{
14581   predicate(n->as_Vector()->length() == 2);
14582   match(Set dst (MulVF src1 src2));
14583   ins_cost(INSN_COST);
14584   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
14585   ins_encode %{
14586     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
14587             as_FloatRegister($src1$$reg),
14588             as_FloatRegister($src2$$reg));
14589   %}
14590   ins_pipe(pipe_class_default);
14591 %}
14592 
14593 instruct vmul4F(vecX dst, vecX src1, vecX src2)
14594 %{
14595   predicate(n->as_Vector()->length() == 4);
14596   match(Set dst (MulVF src1 src2));
14597   ins_cost(INSN_COST);
14598   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
14599   ins_encode %{
14600     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
14601             as_FloatRegister($src1$$reg),
14602             as_FloatRegister($src2$$reg));
14603   %}
14604   ins_pipe(pipe_class_default);
14605 %}
14606 
14607 instruct vmul2D(vecX dst, vecX src1, vecX src2)
14608 %{
14609   predicate(n->as_Vector()->length() == 2);
14610   match(Set dst (MulVD src1 src2));
14611   ins_cost(INSN_COST);
14612   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
14613   ins_encode %{
14614     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
14615             as_FloatRegister($src1$$reg),
14616             as_FloatRegister($src2$$reg));
14617   %}
14618   ins_pipe(pipe_class_default);
14619 %}
14620 
14621 // --------------------------------- DIV --------------------------------------
14622 
14623 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
14624 %{
14625   predicate(n->as_Vector()->length() == 2);
14626   match(Set dst (DivVF src1 src2));
14627   ins_cost(INSN_COST);
14628   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
14629   ins_encode %{
14630     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
14631             as_FloatRegister($src1$$reg),
14632             as_FloatRegister($src2$$reg));
14633   %}
14634   ins_pipe(pipe_class_default);
14635 %}
14636 
14637 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
14638 %{
14639   predicate(n->as_Vector()->length() == 4);
14640   match(Set dst (DivVF src1 src2));
14641   ins_cost(INSN_COST);
14642   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
14643   ins_encode %{
14644     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
14645             as_FloatRegister($src1$$reg),
14646             as_FloatRegister($src2$$reg));
14647   %}
14648   ins_pipe(pipe_class_default);
14649 %}
14650 
14651 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
14652 %{
14653   predicate(n->as_Vector()->length() == 2);
14654   match(Set dst (DivVD src1 src2));
14655   ins_cost(INSN_COST);
14656   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
14657   ins_encode %{
14658     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
14659             as_FloatRegister($src1$$reg),
14660             as_FloatRegister($src2$$reg));
14661   %}
14662   ins_pipe(pipe_class_default);
14663 %}
14664 
14665 // --------------------------------- AND --------------------------------------
14666 
14667 instruct vand8B(vecD dst, vecD src1, vecD src2)
14668 %{
14669   predicate(n->as_Vector()->length_in_bytes() == 4 ||
14670             n->as_Vector()->length_in_bytes() == 8);
14671   match(Set dst (AndV src1 src2));
14672   ins_cost(INSN_COST);
14673   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
14674   ins_encode %{
14675     __ andr(as_FloatRegister($dst$$reg), __ T8B,
14676             as_FloatRegister($src1$$reg),
14677             as_FloatRegister($src2$$reg));
14678   %}
14679   ins_pipe(pipe_class_default);
14680 %}
14681 
14682 instruct vand16B(vecX dst, vecX src1, vecX src2)
14683 %{
14684   predicate(n->as_Vector()->length_in_bytes() == 16);
14685   match(Set dst (AndV src1 src2));
14686   ins_cost(INSN_COST);
14687   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
14688   ins_encode %{
14689     __ andr(as_FloatRegister($dst$$reg), __ T16B,
14690             as_FloatRegister($src1$$reg),
14691             as_FloatRegister($src2$$reg));
14692   %}
14693   ins_pipe(pipe_class_default);
14694 %}
14695 
14696 // --------------------------------- OR ---------------------------------------
14697 
14698 instruct vor8B(vecD dst, vecD src1, vecD src2)
14699 %{
14700   predicate(n->as_Vector()->length_in_bytes() == 4 ||
14701             n->as_Vector()->length_in_bytes() == 8);
14702   match(Set dst (OrV src1 src2));
14703   ins_cost(INSN_COST);
14704   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
14705   ins_encode %{
14706     __ orr(as_FloatRegister($dst$$reg), __ T8B,
14707             as_FloatRegister($src1$$reg),
14708             as_FloatRegister($src2$$reg));
14709   %}
14710   ins_pipe(pipe_class_default);
14711 %}
14712 
14713 instruct vor16B(vecX dst, vecX src1, vecX src2)
14714 %{
14715   predicate(n->as_Vector()->length_in_bytes() == 16);
14716   match(Set dst (OrV src1 src2));
14717   ins_cost(INSN_COST);
14718   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
14719   ins_encode %{
14720     __ orr(as_FloatRegister($dst$$reg), __ T16B,
14721             as_FloatRegister($src1$$reg),
14722             as_FloatRegister($src2$$reg));
14723   %}
14724   ins_pipe(pipe_class_default);
14725 %}
14726 
14727 // --------------------------------- XOR --------------------------------------
14728 
14729 instruct vxor8B(vecD dst, vecD src1, vecD src2)
14730 %{
14731   predicate(n->as_Vector()->length_in_bytes() == 4 ||
14732             n->as_Vector()->length_in_bytes() == 8);
14733   match(Set dst (XorV src1 src2));
14734   ins_cost(INSN_COST);
14735   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
14736   ins_encode %{
14737     __ eor(as_FloatRegister($dst$$reg), __ T8B,
14738             as_FloatRegister($src1$$reg),
14739             as_FloatRegister($src2$$reg));
14740   %}
14741   ins_pipe(pipe_class_default);
14742 %}
14743 
14744 instruct vxor16B(vecX dst, vecX src1, vecX src2)
14745 %{
14746   predicate(n->as_Vector()->length_in_bytes() == 16);
14747   match(Set dst (XorV src1 src2));
14748   ins_cost(INSN_COST);
14749   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
14750   ins_encode %{
14751     __ eor(as_FloatRegister($dst$$reg), __ T16B,
14752             as_FloatRegister($src1$$reg),
14753             as_FloatRegister($src2$$reg));
14754   %}
14755   ins_pipe(pipe_class_default);
14756 %}
14757 
14758 // ------------------------------ Shift ---------------------------------------
14759 
14760 instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
14761   match(Set dst (LShiftCntV cnt));
14762   format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
14763   ins_encode %{
14764     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
14765   %}
14766   ins_pipe(pipe_class_default);
14767 %}
14768 
14769 // Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
14770 instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
14771   match(Set dst (RShiftCntV cnt));
14772   format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
14773   ins_encode %{
14774     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
14775     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
14776   %}
14777   ins_pipe(pipe_class_default);
14778 %}
14779 
14780 instruct vsll8B(vecD dst, vecD src, vecX shift) %{
14781   predicate(n->as_Vector()->length() == 4 ||
14782             n->as_Vector()->length() == 8);
14783   match(Set dst (LShiftVB src shift));
14784   match(Set dst (RShiftVB src shift));
14785   ins_cost(INSN_COST);
14786   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
14787   ins_encode %{
14788     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
14789             as_FloatRegister($src$$reg),
14790             as_FloatRegister($shift$$reg));
14791   %}
14792   ins_pipe(pipe_class_default);
14793 %}
14794 
14795 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
14796   predicate(n->as_Vector()->length() == 16);
14797   match(Set dst (LShiftVB src shift));
14798   match(Set dst (RShiftVB src shift));
14799   ins_cost(INSN_COST);
14800   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
14801   ins_encode %{
14802     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
14803             as_FloatRegister($src$$reg),
14804             as_FloatRegister($shift$$reg));
14805   %}
14806   ins_pipe(pipe_class_default);
14807 %}
14808 
14809 instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
14810   predicate(n->as_Vector()->length() == 4 ||
14811             n->as_Vector()->length() == 8);
14812   match(Set dst (URShiftVB src shift));
14813   ins_cost(INSN_COST);
14814   format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
14815   ins_encode %{
14816     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
14817             as_FloatRegister($src$$reg),
14818             as_FloatRegister($shift$$reg));
14819   %}
14820   ins_pipe(pipe_class_default);
14821 %}
14822 
14823 instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
14824   predicate(n->as_Vector()->length() == 16);
14825   match(Set dst (URShiftVB src shift));
14826   ins_cost(INSN_COST);
14827   format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
14828   ins_encode %{
14829     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
14830             as_FloatRegister($src$$reg),
14831             as_FloatRegister($shift$$reg));
14832   %}
14833   ins_pipe(pipe_class_default);
14834 %}
14835 
14836 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
14837   predicate(n->as_Vector()->length() == 4 ||
14838             n->as_Vector()->length() == 8);
14839   match(Set dst (LShiftVB src shift));
14840   ins_cost(INSN_COST);
14841   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
14842   ins_encode %{
14843     int sh = (int)$shift$$constant & 31;
14844     if (sh >= 8) {
14845       __ eor(as_FloatRegister($dst$$reg), __ T8B,
14846              as_FloatRegister($src$$reg),
14847              as_FloatRegister($src$$reg));
14848     } else {
14849       __ shl(as_FloatRegister($dst$$reg), __ T8B,
14850              as_FloatRegister($src$$reg), sh);
14851     }
14852   %}
14853   ins_pipe(pipe_class_default);
14854 %}
14855 
14856 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
14857   predicate(n->as_Vector()->length() == 16);
14858   match(Set dst (LShiftVB src shift));
14859   ins_cost(INSN_COST);
14860   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
14861   ins_encode %{
14862     int sh = (int)$shift$$constant & 31;
14863     if (sh >= 8) {
14864       __ eor(as_FloatRegister($dst$$reg), __ T16B,
14865              as_FloatRegister($src$$reg),
14866              as_FloatRegister($src$$reg));
14867     } else {
14868       __ shl(as_FloatRegister($dst$$reg), __ T16B,
14869              as_FloatRegister($src$$reg), sh);
14870     }
14871   %}
14872   ins_pipe(pipe_class_default);
14873 %}
14874 
14875 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
14876   predicate(n->as_Vector()->length() == 4 ||
14877             n->as_Vector()->length() == 8);
14878   match(Set dst (RShiftVB src shift));
14879   ins_cost(INSN_COST);
14880   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
14881   ins_encode %{
14882     int sh = (int)$shift$$constant & 31;
14883     if (sh >= 8) sh = 7;
14884     sh = -sh & 7;
14885     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
14886            as_FloatRegister($src$$reg), sh);
14887   %}
14888   ins_pipe(pipe_class_default);
14889 %}
14890 
14891 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
14892   predicate(n->as_Vector()->length() == 16);
14893   match(Set dst (RShiftVB src shift));
14894   ins_cost(INSN_COST);
14895   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
14896   ins_encode %{
14897     int sh = (int)$shift$$constant & 31;
14898     if (sh >= 8) sh = 7;
14899     sh = -sh & 7;
14900     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
14901            as_FloatRegister($src$$reg), sh);
14902   %}
14903   ins_pipe(pipe_class_default);
14904 %}
14905 
14906 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
14907   predicate(n->as_Vector()->length() == 4 ||
14908             n->as_Vector()->length() == 8);
14909   match(Set dst (URShiftVB src shift));
14910   ins_cost(INSN_COST);
14911   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
14912   ins_encode %{
14913     int sh = (int)$shift$$constant & 31;
14914     if (sh >= 8) {
14915       __ eor(as_FloatRegister($dst$$reg), __ T8B,
14916              as_FloatRegister($src$$reg),
14917              as_FloatRegister($src$$reg));
14918     } else {
14919       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
14920              as_FloatRegister($src$$reg), -sh & 7);
14921     }
14922   %}
14923   ins_pipe(pipe_class_default);
14924 %}
14925 
14926 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
14927   predicate(n->as_Vector()->length() == 16);
14928   match(Set dst (URShiftVB src shift));
14929   ins_cost(INSN_COST);
14930   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
14931   ins_encode %{
14932     int sh = (int)$shift$$constant & 31;
14933     if (sh >= 8) {
14934       __ eor(as_FloatRegister($dst$$reg), __ T16B,
14935              as_FloatRegister($src$$reg),
14936              as_FloatRegister($src$$reg));
14937     } else {
14938       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
14939              as_FloatRegister($src$$reg), -sh & 7);
14940     }
14941   %}
14942   ins_pipe(pipe_class_default);
14943 %}
14944 
14945 instruct vsll4S(vecD dst, vecD src, vecX shift) %{
14946   predicate(n->as_Vector()->length() == 2 ||
14947             n->as_Vector()->length() == 4);
14948   match(Set dst (LShiftVS src shift));
14949   match(Set dst (RShiftVS src shift));
14950   ins_cost(INSN_COST);
14951   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
14952   ins_encode %{
14953     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
14954             as_FloatRegister($src$$reg),
14955             as_FloatRegister($shift$$reg));
14956   %}
14957   ins_pipe(pipe_class_default);
14958 %}
14959 
14960 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
14961   predicate(n->as_Vector()->length() == 8);
14962   match(Set dst (LShiftVS src shift));
14963   match(Set dst (RShiftVS src shift));
14964   ins_cost(INSN_COST);
14965   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
14966   ins_encode %{
14967     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
14968             as_FloatRegister($src$$reg),
14969             as_FloatRegister($shift$$reg));
14970   %}
14971   ins_pipe(pipe_class_default);
14972 %}
14973 
14974 instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
14975   predicate(n->as_Vector()->length() == 2 ||
14976             n->as_Vector()->length() == 4);
14977   match(Set dst (URShiftVS src shift));
14978   ins_cost(INSN_COST);
14979   format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
14980   ins_encode %{
14981     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
14982             as_FloatRegister($src$$reg),
14983             as_FloatRegister($shift$$reg));
14984   %}
14985   ins_pipe(pipe_class_default);
14986 %}
14987 
14988 instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
14989   predicate(n->as_Vector()->length() == 8);
14990   match(Set dst (URShiftVS src shift));
14991   ins_cost(INSN_COST);
14992   format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
14993   ins_encode %{
14994     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
14995             as_FloatRegister($src$$reg),
14996             as_FloatRegister($shift$$reg));
14997   %}
14998   ins_pipe(pipe_class_default);
14999 %}
15000 
15001 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
15002   predicate(n->as_Vector()->length() == 2 ||
15003             n->as_Vector()->length() == 4);
15004   match(Set dst (LShiftVS src shift));
15005   ins_cost(INSN_COST);
15006   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
15007   ins_encode %{
15008     int sh = (int)$shift$$constant & 31;
15009     if (sh >= 16) {
15010       __ eor(as_FloatRegister($dst$$reg), __ T8B,
15011              as_FloatRegister($src$$reg),
15012              as_FloatRegister($src$$reg));
15013     } else {
15014       __ shl(as_FloatRegister($dst$$reg), __ T4H,
15015              as_FloatRegister($src$$reg), sh);
15016     }
15017   %}
15018   ins_pipe(pipe_class_default);
15019 %}
15020 
15021 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
15022   predicate(n->as_Vector()->length() == 8);
15023   match(Set dst (LShiftVS src shift));
15024   ins_cost(INSN_COST);
15025   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
15026   ins_encode %{
15027     int sh = (int)$shift$$constant & 31;
15028     if (sh >= 16) {
15029       __ eor(as_FloatRegister($dst$$reg), __ T16B,
15030              as_FloatRegister($src$$reg),
15031              as_FloatRegister($src$$reg));
15032     } else {
15033       __ shl(as_FloatRegister($dst$$reg), __ T8H,
15034              as_FloatRegister($src$$reg), sh);
15035     }
15036   %}
15037   ins_pipe(pipe_class_default);
15038 %}
15039 
15040 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
15041   predicate(n->as_Vector()->length() == 2 ||
15042             n->as_Vector()->length() == 4);
15043   match(Set dst (RShiftVS src shift));
15044   ins_cost(INSN_COST);
15045   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
15046   ins_encode %{
15047     int sh = (int)$shift$$constant & 31;
15048     if (sh >= 16) sh = 15;
15049     sh = -sh & 15;
15050     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
15051            as_FloatRegister($src$$reg), sh);
15052   %}
15053   ins_pipe(pipe_class_default);
15054 %}
15055 
15056 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
15057   predicate(n->as_Vector()->length() == 8);
15058   match(Set dst (RShiftVS src shift));
15059   ins_cost(INSN_COST);
15060   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
15061   ins_encode %{
15062     int sh = (int)$shift$$constant & 31;
15063     if (sh >= 16) sh = 15;
15064     sh = -sh & 15;
15065     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
15066            as_FloatRegister($src$$reg), sh);
15067   %}
15068   ins_pipe(pipe_class_default);
15069 %}
15070 
15071 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
15072   predicate(n->as_Vector()->length() == 2 ||
15073             n->as_Vector()->length() == 4);
15074   match(Set dst (URShiftVS src shift));
15075   ins_cost(INSN_COST);
15076   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
15077   ins_encode %{
15078     int sh = (int)$shift$$constant & 31;
15079     if (sh >= 16) {
15080       __ eor(as_FloatRegister($dst$$reg), __ T8B,
15081              as_FloatRegister($src$$reg),
15082              as_FloatRegister($src$$reg));
15083     } else {
15084       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
15085              as_FloatRegister($src$$reg), -sh & 15);
15086     }
15087   %}
15088   ins_pipe(pipe_class_default);
15089 %}
15090 
15091 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
15092   predicate(n->as_Vector()->length() == 8);
15093   match(Set dst (URShiftVS src shift));
15094   ins_cost(INSN_COST);
15095   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
15096   ins_encode %{
15097     int sh = (int)$shift$$constant & 31;
15098     if (sh >= 16) {
15099       __ eor(as_FloatRegister($dst$$reg), __ T16B,
15100              as_FloatRegister($src$$reg),
15101              as_FloatRegister($src$$reg));
15102     } else {
15103       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
15104              as_FloatRegister($src$$reg), -sh & 15);
15105     }
15106   %}
15107   ins_pipe(pipe_class_default);
15108 %}
15109 
15110 instruct vsll2I(vecD dst, vecD src, vecX shift) %{
15111   predicate(n->as_Vector()->length() == 2);
15112   match(Set dst (LShiftVI src shift));
15113   match(Set dst (RShiftVI src shift));
15114   ins_cost(INSN_COST);
15115   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
15116   ins_encode %{
15117     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
15118             as_FloatRegister($src$$reg),
15119             as_FloatRegister($shift$$reg));
15120   %}
15121   ins_pipe(pipe_class_default);
15122 %}
15123 
15124 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
15125   predicate(n->as_Vector()->length() == 4);
15126   match(Set dst (LShiftVI src shift));
15127   match(Set dst (RShiftVI src shift));
15128   ins_cost(INSN_COST);
15129   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
15130   ins_encode %{
15131     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
15132             as_FloatRegister($src$$reg),
15133             as_FloatRegister($shift$$reg));
15134   %}
15135   ins_pipe(pipe_class_default);
15136 %}
15137 
15138 instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
15139   predicate(n->as_Vector()->length() == 2);
15140   match(Set dst (URShiftVI src shift));
15141   ins_cost(INSN_COST);
15142   format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
15143   ins_encode %{
15144     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
15145             as_FloatRegister($src$$reg),
15146             as_FloatRegister($shift$$reg));
15147   %}
15148   ins_pipe(pipe_class_default);
15149 %}
15150 
15151 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
15152   predicate(n->as_Vector()->length() == 4);
15153   match(Set dst (URShiftVI src shift));
15154   ins_cost(INSN_COST);
15155   format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
15156   ins_encode %{
15157     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
15158             as_FloatRegister($src$$reg),
15159             as_FloatRegister($shift$$reg));
15160   %}
15161   ins_pipe(pipe_class_default);
15162 %}
15163 
15164 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
15165   predicate(n->as_Vector()->length() == 2);
15166   match(Set dst (LShiftVI src shift));
15167   ins_cost(INSN_COST);
15168   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
15169   ins_encode %{
15170     __ shl(as_FloatRegister($dst$$reg), __ T2S,
15171            as_FloatRegister($src$$reg),
15172            (int)$shift$$constant & 31);
15173   %}
15174   ins_pipe(pipe_class_default);
15175 %}
15176 
15177 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
15178   predicate(n->as_Vector()->length() == 4);
15179   match(Set dst (LShiftVI src shift));
15180   ins_cost(INSN_COST);
15181   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
15182   ins_encode %{
15183     __ shl(as_FloatRegister($dst$$reg), __ T4S,
15184            as_FloatRegister($src$$reg),
15185            (int)$shift$$constant & 31);
15186   %}
15187   ins_pipe(pipe_class_default);
15188 %}
15189 
15190 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
15191   predicate(n->as_Vector()->length() == 2);
15192   match(Set dst (RShiftVI src shift));
15193   ins_cost(INSN_COST);
15194   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
15195   ins_encode %{
15196     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
15197             as_FloatRegister($src$$reg),
15198             -(int)$shift$$constant & 31);
15199   %}
15200   ins_pipe(pipe_class_default);
15201 %}
15202 
15203 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
15204   predicate(n->as_Vector()->length() == 4);
15205   match(Set dst (RShiftVI src shift));
15206   ins_cost(INSN_COST);
15207   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
15208   ins_encode %{
15209     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
15210             as_FloatRegister($src$$reg),
15211             -(int)$shift$$constant & 31);
15212   %}
15213   ins_pipe(pipe_class_default);
15214 %}
15215 
15216 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
15217   predicate(n->as_Vector()->length() == 2);
15218   match(Set dst (URShiftVI src shift));
15219   ins_cost(INSN_COST);
15220   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
15221   ins_encode %{
15222     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
15223             as_FloatRegister($src$$reg),
15224             -(int)$shift$$constant & 31);
15225   %}
15226   ins_pipe(pipe_class_default);
15227 %}
15228 
15229 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
15230   predicate(n->as_Vector()->length() == 4);
15231   match(Set dst (URShiftVI src shift));
15232   ins_cost(INSN_COST);
15233   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
15234   ins_encode %{
15235     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
15236             as_FloatRegister($src$$reg),
15237             -(int)$shift$$constant & 31);
15238   %}
15239   ins_pipe(pipe_class_default);
15240 %}
15241 
15242 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
15243   predicate(n->as_Vector()->length() == 2);
15244   match(Set dst (LShiftVL src shift));
15245   match(Set dst (RShiftVL src shift));
15246   ins_cost(INSN_COST);
15247   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
15248   ins_encode %{
15249     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
15250             as_FloatRegister($src$$reg),
15251             as_FloatRegister($shift$$reg));
15252   %}
15253   ins_pipe(pipe_class_default);
15254 %}
15255 
15256 instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
15257   predicate(n->as_Vector()->length() == 2);
15258   match(Set dst (URShiftVL src shift));
15259   ins_cost(INSN_COST);
15260   format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
15261   ins_encode %{
15262     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
15263             as_FloatRegister($src$$reg),
15264             as_FloatRegister($shift$$reg));
15265   %}
15266   ins_pipe(pipe_class_default);
15267 %}
15268 
15269 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
15270   predicate(n->as_Vector()->length() == 2);
15271   match(Set dst (LShiftVL src shift));
15272   ins_cost(INSN_COST);
15273   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
15274   ins_encode %{
15275     __ shl(as_FloatRegister($dst$$reg), __ T2D,
15276            as_FloatRegister($src$$reg),
15277            (int)$shift$$constant & 63);
15278   %}
15279   ins_pipe(pipe_class_default);
15280 %}
15281 
15282 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
15283   predicate(n->as_Vector()->length() == 2);
15284   match(Set dst (RShiftVL src shift));
15285   ins_cost(INSN_COST);
15286   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
15287   ins_encode %{
15288     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
15289             as_FloatRegister($src$$reg),
15290             -(int)$shift$$constant & 63);
15291   %}
15292   ins_pipe(pipe_class_default);
15293 %}
15294 
15295 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
15296   predicate(n->as_Vector()->length() == 2);
15297   match(Set dst (URShiftVL src shift));
15298   ins_cost(INSN_COST);
15299   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
15300   ins_encode %{
15301     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
15302             as_FloatRegister($src$$reg),
15303             -(int)$shift$$constant & 63);
15304   %}
15305   ins_pipe(pipe_class_default);
15306 %}
15307 
15308 //----------PEEPHOLE RULES-----------------------------------------------------
15309 // These must follow all instruction definitions as they use the names
15310 // defined in the instructions definitions.
15311 //
15312 // peepmatch ( root_instr_name [preceding_instruction]* );
15313 //
15314 // peepconstraint %{
15315 // (instruction_number.operand_name relational_op instruction_number.operand_name
15316 //  [, ...] );
15317 // // instruction numbers are zero-based using left to right order in peepmatch
15318 //
15319 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
15320 // // provide an instruction_number.operand_name for each operand that appears
15321 // // in the replacement instruction's match rule
15322 //
15323 // ---------VM FLAGS---------------------------------------------------------
15324 //
15325 // All peephole optimizations can be turned off using -XX:-OptoPeephole
15326 //
15327 // Each peephole rule is given an identifying number starting with zero and
15328 // increasing by one in the order seen by the parser.  An individual peephole
15329 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
15330 // on the command-line.
15331 //
15332 // ---------CURRENT LIMITATIONS----------------------------------------------
15333 //
15334 // Only match adjacent instructions in same basic block
15335 // Only equality constraints
15336 // Only constraints between operands, not (0.dest_reg == RAX_enc)
15337 // Only one replacement instruction
15338 //
15339 // ---------EXAMPLE----------------------------------------------------------
15340 //
15341 // // pertinent parts of existing instructions in architecture description
15342 // instruct movI(iRegINoSp dst, iRegI src)
15343 // %{
15344 //   match(Set dst (CopyI src));
15345 // %}
15346 //
15347 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
15348 // %{
15349 //   match(Set dst (AddI dst src));
15350 //   effect(KILL cr);
15351 // %}
15352 //
15353 // // Change (inc mov) to lea
15354 // peephole %{
15355 //   // increment preceeded by register-register move
15356 //   peepmatch ( incI_iReg movI );
15357 //   // require that the destination register of the increment
15358 //   // match the destination register of the move
15359 //   peepconstraint ( 0.dst == 1.dst );
15360 //   // construct a replacement instruction that sets
15361 //   // the destination to ( move's source register + one )
15362 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
15363 // %}
15364 //
15365 
15366 // Implementation no longer uses movX instructions since
15367 // machine-independent system no longer uses CopyX nodes.
15368 //
15369 // peephole
15370 // %{
15371 //   peepmatch (incI_iReg movI);
15372 //   peepconstraint (0.dst == 1.dst);
15373 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
15374 // %}
15375 
15376 // peephole
15377 // %{
15378 //   peepmatch (decI_iReg movI);
15379 //   peepconstraint (0.dst == 1.dst);
15380 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
15381 // %}
15382 
15383 // peephole
15384 // %{
15385 //   peepmatch (addI_iReg_imm movI);
15386 //   peepconstraint (0.dst == 1.dst);
15387 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
15388 // %}
15389 
15390 // peephole
15391 // %{
15392 //   peepmatch (incL_iReg movL);
15393 //   peepconstraint (0.dst == 1.dst);
15394 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
15395 // %}
15396 
15397 // peephole
15398 // %{
15399 //   peepmatch (decL_iReg movL);
15400 //   peepconstraint (0.dst == 1.dst);
15401 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
15402 // %}
15403 
15404 // peephole
15405 // %{
15406 //   peepmatch (addL_iReg_imm movL);
15407 //   peepconstraint (0.dst == 1.dst);
15408 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
15409 // %}
15410 
15411 // peephole
15412 // %{
15413 //   peepmatch (addP_iReg_imm movP);
15414 //   peepconstraint (0.dst == 1.dst);
15415 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
15416 // %}
15417 
15418 // // Change load of spilled value to only a spill
15419 // instruct storeI(memory mem, iRegI src)
15420 // %{
15421 //   match(Set mem (StoreI mem src));
15422 // %}
15423 //
15424 // instruct loadI(iRegINoSp dst, memory mem)
15425 // %{
15426 //   match(Set dst (LoadI mem));
15427 // %}
15428 //
15429 
15430 //----------SMARTSPILL RULES---------------------------------------------------
15431 // These must follow all instruction definitions as they use the names
15432 // defined in the instructions definitions.
15433 
15434 // Local Variables:
15435 // mode: c++
15436 // End: