1 //
   2 // Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()         );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next() );
 166   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()         );
 167   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next() );
 168   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()         );
 169   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next() );
 170   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()         );
 171   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next() );
 172   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()         );
 173   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next() );
 174   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()         );
 175   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next() );
 176   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()         );
 177   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next() );
 178   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()         );
 179   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next() );
 180   reg_def V8   ( SOC, SOE, Op_RegF,  8, v8->as_VMReg()         );
 181   reg_def V8_H ( SOC, SOE, Op_RegF,  8, v8->as_VMReg()->next() );
 182   reg_def V9   ( SOC, SOE, Op_RegF,  9, v9->as_VMReg()         );
 183   reg_def V9_H ( SOC, SOE, Op_RegF,  9, v9->as_VMReg()->next() );
 184   reg_def V10  ( SOC, SOE, Op_RegF, 10, v10->as_VMReg()        );
 185   reg_def V10_H( SOC, SOE, Op_RegF, 10, v10->as_VMReg()->next());
 186   reg_def V11  ( SOC, SOE, Op_RegF, 11, v11->as_VMReg()        );
 187   reg_def V11_H( SOC, SOE, Op_RegF, 11, v11->as_VMReg()->next());
 188   reg_def V12  ( SOC, SOE, Op_RegF, 12, v12->as_VMReg()        );
 189   reg_def V12_H( SOC, SOE, Op_RegF, 12, v12->as_VMReg()->next());
 190   reg_def V13  ( SOC, SOE, Op_RegF, 13, v13->as_VMReg()        );
 191   reg_def V13_H( SOC, SOE, Op_RegF, 13, v13->as_VMReg()->next());
 192   reg_def V14  ( SOC, SOE, Op_RegF, 14, v14->as_VMReg()        );
 193   reg_def V14_H( SOC, SOE, Op_RegF, 14, v14->as_VMReg()->next());
 194   reg_def V15  ( SOC, SOE, Op_RegF, 15, v15->as_VMReg()        );
 195   reg_def V15_H( SOC, SOE, Op_RegF, 15, v15->as_VMReg()->next());
 196   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()        );
 197   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next());
 198   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()        );
 199   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next());
 200   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()        );
 201   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next());
 202   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()        );
 203   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next());
 204   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()        );
 205   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next());
 206   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()        );
 207   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next());
 208   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()        );
 209   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next());
 210   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()        );
 211   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next());
 212   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()        );
 213   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next());
 214   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()        );
 215   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next());
 216   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()        );
 217   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next());
 218   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()        );
 219   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next());
 220   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()        );
 221   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next());
 222   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()        );
 223   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next());
 224   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()        );
 225   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next());
 226   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()        );
 227   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next());
 228 
 229 // ----------------------------
 230 // Special Registers
 231 // ----------------------------
 232 
 233 // the AArch64 CSPR status flag register is not directly acessible as
 234 // instruction operand. the FPSR status flag register is a system
 235 // register which can be written/read using MSR/MRS but again does not
 236 // appear as an operand (a code identifying the FSPR occurs as an
 237 // immediate value in the instruction).
 238 
 239 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 240 
 241 
 242 // Specify priority of register selection within phases of register
 243 // allocation.  Highest priority is first.  A useful heuristic is to
 244 // give registers a low priority when they are required by machine
 245 // instructions, like EAX and EDX on I486, and choose no-save registers
 246 // before save-on-call, & save-on-call before save-on-entry.  Registers
 247 // which participate in fixed calling sequences should come last.
 248 // Registers which are used as pairs must fall on an even boundary.
 249 
 250 alloc_class chunk0(
 251     // volatiles
 252     R10, R10_H,
 253     R11, R11_H,
 254     R12, R12_H,
 255     R13, R13_H,
 256     R14, R14_H,
 257     R15, R15_H,
 258     R16, R16_H,
 259     R17, R17_H,
 260     R18, R18_H,
 261 
 262     // arg registers
 263     R0, R0_H,
 264     R1, R1_H,
 265     R2, R2_H,
 266     R3, R3_H,
 267     R4, R4_H,
 268     R5, R5_H,
 269     R6, R6_H,
 270     R7, R7_H,
 271 
 272     // non-volatiles
 273     R19, R19_H,
 274     R20, R20_H,
 275     R21, R21_H,
 276     R22, R22_H,
 277     R23, R23_H,
 278     R24, R24_H,
 279     R25, R25_H,
 280     R26, R26_H,
 281 
 282     // non-allocatable registers
 283 
 284     R27, R27_H, // heapbase
 285     R28, R28_H, // thread
 286     R29, R29_H, // fp
 287     R30, R30_H, // lr
 288     R31, R31_H, // sp
 289 );
 290 
 291 alloc_class chunk1(
 292 
 293     // no save
 294     V16, V16_H,
 295     V17, V17_H,
 296     V18, V18_H,
 297     V19, V19_H,
 298     V20, V20_H,
 299     V21, V21_H,
 300     V22, V22_H,
 301     V23, V23_H,
 302     V24, V24_H,
 303     V25, V25_H,
 304     V26, V26_H,
 305     V27, V27_H,
 306     V28, V28_H,
 307     V29, V29_H,
 308     V30, V30_H,
 309     V31, V31_H,
 310 
 311     // arg registers
 312     V0, V0_H,
 313     V1, V1_H,
 314     V2, V2_H,
 315     V3, V3_H,
 316     V4, V4_H,
 317     V5, V5_H,
 318     V6, V6_H,
 319     V7, V7_H,
 320 
 321     // non-volatiles
 322     V8, V8_H,
 323     V9, V9_H,
 324     V10, V10_H,
 325     V11, V11_H,
 326     V12, V12_H,
 327     V13, V13_H,
 328     V14, V14_H,
 329     V15, V15_H,
 330 );
 331 
 332 alloc_class chunk2(RFLAGS);
 333 
 334 //----------Architecture Description Register Classes--------------------------
 335 // Several register classes are automatically defined based upon information in
 336 // this architecture description.
 337 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 338 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 339 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 340 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 341 //
 342 
 343 // Class for all 32 bit integer registers -- excludes SP which will
 344 // never be used as an integer register
 345 reg_class any_reg32(
 346     R0,
 347     R1,
 348     R2,
 349     R3,
 350     R4,
 351     R5,
 352     R6,
 353     R7,
 354     R10,
 355     R11,
 356     R12,
 357     R13,
 358     R14,
 359     R15,
 360     R16,
 361     R17,
 362     R18,
 363     R19,
 364     R20,
 365     R21,
 366     R22,
 367     R23,
 368     R24,
 369     R25,
 370     R26,
 371     R27,
 372     R28,
 373     R29,
 374     R30
 375 );
 376 
 377 // Singleton class for R0 int register
 378 reg_class int_r0_reg(R0);
 379 
 380 // Singleton class for R2 int register
 381 reg_class int_r2_reg(R2);
 382 
 383 // Singleton class for R3 int register
 384 reg_class int_r3_reg(R3);
 385 
 386 // Singleton class for R4 int register
 387 reg_class int_r4_reg(R4);
 388 
 389 // Class for all long integer registers (including RSP)
 390 reg_class any_reg(
 391     R0, R0_H,
 392     R1, R1_H,
 393     R2, R2_H,
 394     R3, R3_H,
 395     R4, R4_H,
 396     R5, R5_H,
 397     R6, R6_H,
 398     R7, R7_H,
 399     R10, R10_H,
 400     R11, R11_H,
 401     R12, R12_H,
 402     R13, R13_H,
 403     R14, R14_H,
 404     R15, R15_H,
 405     R16, R16_H,
 406     R17, R17_H,
 407     R18, R18_H,
 408     R19, R19_H,
 409     R20, R20_H,
 410     R21, R21_H,
 411     R22, R22_H,
 412     R23, R23_H,
 413     R24, R24_H,
 414     R25, R25_H,
 415     R26, R26_H,
 416     R27, R27_H,
 417     R28, R28_H,
 418     R29, R29_H,
 419     R30, R30_H,
 420     R31, R31_H
 421 );
 422 
 423 // Class for all non-special integer registers
 424 reg_class no_special_reg32(
 425     R0,
 426     R1,
 427     R2,
 428     R3,
 429     R4,
 430     R5,
 431     R6,
 432     R7,
 433     R10,
 434     R11,
 435     R12,                        // rmethod
 436     R13,
 437     R14,
 438     R15,
 439     R16,
 440     R17,
 441     R18,
 442     R19,
 443     R20,
 444     R21,
 445     R22,
 446     R23,
 447     R24,
 448     R25,
 449     R26
 450  /* R27, */                     // heapbase
 451  /* R28, */                     // thread
 452     R29,                        // fp
 453  /* R30, */                     // lr
 454  /* R31 */                      // sp
 455 );
 456 
 457 // Class for all non-special long integer registers
 458 reg_class no_special_reg(
 459     R0, R0_H,
 460     R1, R1_H,
 461     R2, R2_H,
 462     R3, R3_H,
 463     R4, R4_H,
 464     R5, R5_H,
 465     R6, R6_H,
 466     R7, R7_H,
 467     R10, R10_H,
 468     R11, R11_H,
 469     R12, R12_H,                 // rmethod
 470     R13, R13_H,
 471     R14, R14_H,
 472     R15, R15_H,
 473     R16, R16_H,
 474     R17, R17_H,
 475     R18, R18_H,
 476     R19, R19_H,
 477     R20, R20_H,
 478     R21, R21_H,
 479     R22, R22_H,
 480     R23, R23_H,
 481     R24, R24_H,
 482     R25, R25_H,
 483     R26, R26_H,
 484  /* R27, R27_H, */              // heapbase
 485  /* R28, R28_H, */              // thread
 486     R29, R29_H,                 // fp
 487  /* R30, R30_H, */              // lr
 488  /* R31, R31_H */               // sp
 489 );
 490 
 491 // Class for 64 bit register r0
 492 reg_class r0_reg(
 493     R0, R0_H
 494 );
 495 
 496 // Class for 64 bit register r1
 497 reg_class r1_reg(
 498     R1, R1_H
 499 );
 500 
 501 // Class for 64 bit register r2
 502 reg_class r2_reg(
 503     R2, R2_H
 504 );
 505 
 506 // Class for 64 bit register r3
 507 reg_class r3_reg(
 508     R3, R3_H
 509 );
 510 
 511 // Class for 64 bit register r4
 512 reg_class r4_reg(
 513     R4, R4_H
 514 );
 515 
 516 // Class for 64 bit register r5
 517 reg_class r5_reg(
 518     R5, R5_H
 519 );
 520 
 521 // Class for 64 bit register r10
 522 reg_class r10_reg(
 523     R10, R10_H
 524 );
 525 
 526 // Class for 64 bit register r11
 527 reg_class r11_reg(
 528     R11, R11_H
 529 );
 530 
 531 // Class for method register
 532 reg_class method_reg(
 533     R12, R12_H
 534 );
 535 
 536 // Class for heapbase register
 537 reg_class heapbase_reg(
 538     R27, R27_H
 539 );
 540 
 541 // Class for thread register
 542 reg_class thread_reg(
 543     R28, R28_H
 544 );
 545 
 546 // Class for frame pointer register
 547 reg_class fp_reg(
 548     R29, R29_H
 549 );
 550 
 551 // Class for link register
 552 reg_class lr_reg(
 553     R30, R30_H
 554 );
 555 
 556 // Class for long sp register
 557 reg_class sp_reg(
 558   R31, R31_H
 559 );
 560 
 561 // Class for all pointer registers
 562 reg_class ptr_reg(
 563     R0, R0_H,
 564     R1, R1_H,
 565     R2, R2_H,
 566     R3, R3_H,
 567     R4, R4_H,
 568     R5, R5_H,
 569     R6, R6_H,
 570     R7, R7_H,
 571     R10, R10_H,
 572     R11, R11_H,
 573     R12, R12_H,
 574     R13, R13_H,
 575     R14, R14_H,
 576     R15, R15_H,
 577     R16, R16_H,
 578     R17, R17_H,
 579     R18, R18_H,
 580     R19, R19_H,
 581     R20, R20_H,
 582     R21, R21_H,
 583     R22, R22_H,
 584     R23, R23_H,
 585     R24, R24_H,
 586     R25, R25_H,
 587     R26, R26_H,
 588     R27, R27_H,
 589     R28, R28_H,
 590     R29, R29_H,
 591     R30, R30_H,
 592     R31, R31_H
 593 );
 594 
 595 // Class for all non_special pointer registers
 596 reg_class no_special_ptr_reg(
 597     R0, R0_H,
 598     R1, R1_H,
 599     R2, R2_H,
 600     R3, R3_H,
 601     R4, R4_H,
 602     R5, R5_H,
 603     R6, R6_H,
 604     R7, R7_H,
 605     R10, R10_H,
 606     R11, R11_H,
 607     R12, R12_H,
 608     R13, R13_H,
 609     R14, R14_H,
 610     R15, R15_H,
 611     R16, R16_H,
 612     R17, R17_H,
 613     R18, R18_H,
 614     R19, R19_H,
 615     R20, R20_H,
 616     R21, R21_H,
 617     R22, R22_H,
 618     R23, R23_H,
 619     R24, R24_H,
 620     R25, R25_H,
 621     R26, R26_H,
 622  /* R27, R27_H, */              // heapbase
 623  /* R28, R28_H, */              // thread
 624  /* R29, R29_H, */              // fp
 625  /* R30, R30_H, */              // lr
 626  /* R31, R31_H */               // sp
 627 );
 628 
 629 // Class for all float registers
 630 reg_class float_reg(
 631     V0,
 632     V1,
 633     V2,
 634     V3,
 635     V4,
 636     V5,
 637     V6,
 638     V7,
 639     V8,
 640     V9,
 641     V10,
 642     V11,
 643     V12,
 644     V13,
 645     V14,
 646     V15,
 647     V16,
 648     V17,
 649     V18,
 650     V19,
 651     V20,
 652     V21,
 653     V22,
 654     V23,
 655     V24,
 656     V25,
 657     V26,
 658     V27,
 659     V28,
 660     V29,
 661     V30,
 662     V31
 663 );
 664 
 665 // Double precision float registers have virtual `high halves' that
 666 // are needed by the allocator.
 667 // Class for all double registers
 668 reg_class double_reg(
 669     V0, V0_H,
 670     V1, V1_H,
 671     V2, V2_H,
 672     V3, V3_H,
 673     V4, V4_H,
 674     V5, V5_H,
 675     V6, V6_H,
 676     V7, V7_H,
 677     V8, V8_H,
 678     V9, V9_H,
 679     V10, V10_H,
 680     V11, V11_H,
 681     V12, V12_H,
 682     V13, V13_H,
 683     V14, V14_H,
 684     V15, V15_H,
 685     V16, V16_H,
 686     V17, V17_H,
 687     V18, V18_H,
 688     V19, V19_H,
 689     V20, V20_H,
 690     V21, V21_H,
 691     V22, V22_H,
 692     V23, V23_H,
 693     V24, V24_H,
 694     V25, V25_H,
 695     V26, V26_H,
 696     V27, V27_H,
 697     V28, V28_H,
 698     V29, V29_H,
 699     V30, V30_H,
 700     V31, V31_H
 701 );
 702 
 703 // Class for 128 bit register v0
 704 reg_class v0_reg(
 705     V0, V0_H
 706 );
 707 
 708 // Class for 128 bit register v1
 709 reg_class v1_reg(
 710     V1, V1_H
 711 );
 712 
 713 // Class for 128 bit register v2
 714 reg_class v2_reg(
 715     V2, V2_H
 716 );
 717 
 718 // Class for 128 bit register v3
 719 reg_class v3_reg(
 720     V3, V3_H
 721 );
 722 
 723 // Singleton class for condition codes
 724 reg_class int_flags(RFLAGS);
 725 
 726 %}
 727 
 728 //----------DEFINITION BLOCK---------------------------------------------------
 729 // Define name --> value mappings to inform the ADLC of an integer valued name
 730 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 731 // Format:
 732 //        int_def  <name>         ( <int_value>, <expression>);
 733 // Generated Code in ad_<arch>.hpp
 734 //        #define  <name>   (<expression>)
 735 //        // value == <int_value>
 736 // Generated code in ad_<arch>.cpp adlc_verification()
 737 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 738 //
 739 
 740 // we follow the ppc-aix port in using a simple cost model which ranks
 741 // register operations as cheap, memory ops as more expensive and
 742 // branches as most expensive. the first two have a low as well as a
 743 // normal cost. huge cost appears to be a way of saying don't do
 744 // something
 745 
 746 definitions %{
 747   // The default cost (of a register move instruction).
 748   int_def INSN_COST            (    100,     100);
 749   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 750   int_def CALL_COST            (    200,     2 * INSN_COST);
 751   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 752 %}
 753 
 754 
 755 //----------SOURCE BLOCK-------------------------------------------------------
 756 // This is a block of C++ code which provides values, functions, and
 757 // definitions necessary in the rest of the architecture description
 758 
 759 source_hpp %{
 760 
 761 #include "memory/cardTableModRefBS.hpp"
 762 
 763 class CallStubImpl {
 764 
 765   //--------------------------------------------------------------
 766   //---<  Used for optimization in Compile::shorten_branches  >---
 767   //--------------------------------------------------------------
 768 
 769  public:
 770   // Size of call trampoline stub.
 771   static uint size_call_trampoline() {
 772     return 0; // no call trampolines on this platform
 773   }
 774 
 775   // number of relocations needed by a call trampoline stub
 776   static uint reloc_call_trampoline() {
 777     return 0; // no call trampolines on this platform
 778   }
 779 };
 780 
 781 class HandlerImpl {
 782 
 783  public:
 784 
 785   static int emit_exception_handler(CodeBuffer &cbuf);
 786   static int emit_deopt_handler(CodeBuffer& cbuf);
 787 
 788   static uint size_exception_handler() {
 789     return MacroAssembler::far_branch_size();
 790   }
 791 
 792   static uint size_deopt_handler() {
 793     // count one adr and one far branch instruction
 794     return 4 * NativeInstruction::instruction_size;
 795   }
 796 };
 797 
 798   // graph traversal helpers
 799   MemBarNode *has_parent_membar(const Node *n,
 800                                 ProjNode *&ctl, ProjNode *&mem);
 801   MemBarNode *has_child_membar(const MemBarNode *n,
 802                                ProjNode *&ctl, ProjNode *&mem);
 803 
 804   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
 805   bool unnecessary_acquire(const Node *barrier);
 806   bool needs_acquiring_load(const Node *load);
 807 
 808   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
 809   bool unnecessary_release(const Node *barrier);
 810   bool unnecessary_volatile(const Node *barrier);
 811   bool needs_releasing_store(const Node *store);
 812 
 813   // Use barrier instructions rather than load acquire / store
 814   // release.
 815   const bool UseBarriersForVolatile = false;
 816   // Use barrier instructions for unsafe volatile gets rather than
 817   // trying to identify an exact signature for them
 818   const bool UseBarriersForUnsafeVolatileGet = false;
 819 %}
 820 
 821 source %{
 822 
 823   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
 824   // use to implement volatile reads and writes. For a volatile read
 825   // we simply need
 826   //
 827   //   ldar<x>
 828   //
 829   // and for a volatile write we need
 830   //
 831   //   stlr<x>
 832   // 
 833   // Alternatively, we can implement them by pairing a normal
 834   // load/store with a memory barrier. For a volatile read we need
 835   // 
 836   //   ldr<x>
 837   //   dmb ishld
 838   //
 839   // for a volatile write
 840   //
 841   //   dmb ish
 842   //   str<x>
 843   //   dmb ish
 844   //
 845   // In order to generate the desired instruction sequence we need to
 846   // be able to identify specific 'signature' ideal graph node
 847   // sequences which i) occur as a translation of a volatile reads or
 848   // writes and ii) do not occur through any other translation or
 849   // graph transformation. We can then provide alternative aldc
 850   // matching rules which translate these node sequences to the
 851   // desired machine code sequences. Selection of the alternative
 852   // rules can be implemented by predicates which identify the
 853   // relevant node sequences.
 854   //
 855   // The ideal graph generator translates a volatile read to the node
 856   // sequence
 857   //
 858   //   LoadX[mo_acquire]
 859   //   MemBarAcquire
 860   //
 861   // As a special case when using the compressed oops optimization we
 862   // may also see this variant
 863   //
 864   //   LoadN[mo_acquire]
 865   //   DecodeN
 866   //   MemBarAcquire
 867   //
 868   // A volatile write is translated to the node sequence
 869   //
 870   //   MemBarRelease
 871   //   StoreX[mo_release]
 872   //   MemBarVolatile
 873   //
 874   // n.b. the above node patterns are generated with a strict
 875   // 'signature' configuration of input and output dependencies (see
 876   // the predicates below for exact details). The two signatures are
 877   // unique to translated volatile reads/stores -- they will not
 878   // appear as a result of any other bytecode translation or inlining
 879   // nor as a consequence of optimizing transforms.
 880   //
 881   // We also want to catch inlined unsafe volatile gets and puts and
 882   // be able to implement them using either ldar<x>/stlr<x> or some
 883   // combination of ldr<x>/stlr<x> and dmb instructions.
 884   //
 885   // Inlined unsafe volatiles puts manifest as a minor variant of the
 886   // normal volatile put node sequence containing an extra cpuorder
 887   // membar
 888   //
 889   //   MemBarRelease
 890   //   MemBarCPUOrder
 891   //   StoreX[mo_release]
 892   //   MemBarVolatile
 893   //
 894   // n.b. as an aside, the cpuorder membar is not itself subject to
 895   // matching and translation by adlc rules.  However, the rule
 896   // predicates need to detect its presence in order to correctly
 897   // select the desired adlc rules.
 898   //
 899   // Inlined unsafe volatiles gets manifest as a somewhat different
 900   // node sequence to a normal volatile get
 901   //
 902   //   MemBarCPUOrder
 903   //        ||       \\
 904   //   MemBarAcquire LoadX[mo_acquire]
 905   //        ||
 906   //   MemBarCPUOrder
 907   //
 908   // In this case the acquire membar does not directly depend on the
 909   // load. However, we can be sure that the load is generated from an
 910   // inlined unsafe volatile get if we see it dependent on this unique
 911   // sequence of membar nodes. Similarly, given an acquire membar we
 912   // can know that it was added because of an inlined unsafe volatile
 913   // get if it is fed and feeds a cpuorder membar and if its feed
 914   // membar also feeds an acquiring load.
 915   //
 916   // So, where we can identify these volatile read and write
 917   // signatures we can choose to plant either of the above two code
 918   // sequences. For a volatile read we can simply plant a normal
 919   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
 920   // also choose to inhibit translation of the MemBarAcquire and
 921   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
 922   //
 923   // When we recognise a volatile store signature we can choose to
 924   // plant at a dmb ish as a translation for the MemBarRelease, a
 925   // normal str<x> and then a dmb ish for the MemBarVolatile.
 926   // Alternatively, we can inhibit translation of the MemBarRelease
 927   // and MemBarVolatile and instead plant a simple stlr<x>
 928   // instruction.
 929   //
 930   // Of course, the above only applies when we see these signature
 931   // configurations. We still want to plant dmb instructions in any
 932   // other cases where we may see a MemBarAcquire, MemBarRelease or
 933   // MemBarVolatile. For example, at the end of a constructor which
 934   // writes final/volatile fields we will see a MemBarRelease
 935   // instruction and this needs a 'dmb ish' lest we risk the
 936   // constructed object being visible without making the
 937   // final/volatile field writes visible.
 938   //
 939   // n.b. the translation rules below which rely on detection of the
 940   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
 941   // If we see anything other than the signature configurations we
 942   // always just translate the loads and stors to ldr<x> and str<x>
 943   // and translate acquire, release and volatile membars to the
 944   // relevant dmb instructions.
 945   //
 946   // n.b.b as a case in point for the above comment, the current
 947   // predicates don't detect the precise signature for certain types
 948   // of volatile object stores (where the heap_base input type is not
 949   // known at compile-time to be non-NULL). In those cases the
 950   // MemBarRelease and MemBarVolatile bracket an if-then-else sequence
 951   // with a store in each branch (we need a different store depending
 952   // on whether heap_base is actually NULL). In such a case we will
 953   // just plant a dmb both before and after the branch/merge. The
 954   // predicate could (and probably should) be fixed later to also
 955   // detect this case.
 956 
 957   // graph traversal helpers
 958 
 959   // if node n is linked to a parent MemBarNode by an intervening
 960   // Control or Memory ProjNode return the MemBarNode otherwise return
 961   // NULL.
 962   //
 963   // n may only be a Load or a MemBar.
 964   //
 965   // The ProjNode* references c and m are used to return the relevant
 966   // nodes.
 967 
 968   MemBarNode *has_parent_membar(const Node *n, ProjNode *&c, ProjNode *&m)
 969   {
 970     Node *ctl = NULL;
 971     Node *mem = NULL;
 972     Node *membar = NULL;
 973 
 974     if (n->is_Load()) {
 975       ctl = n->lookup(LoadNode::Control);
 976       mem = n->lookup(LoadNode::Memory);
 977     } else if (n->is_MemBar()) {
 978       ctl = n->lookup(TypeFunc::Control);
 979       mem = n->lookup(TypeFunc::Memory);
 980     } else {
 981         return NULL;
 982     }
 983 
 984     if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj())
 985       return NULL;
 986 
 987     c = ctl->as_Proj();
 988 
 989     membar = ctl->lookup(0);
 990 
 991     if (!membar || !membar->is_MemBar())
 992       return NULL;
 993 
 994     m = mem->as_Proj();
 995 
 996     if (mem->lookup(0) != membar)
 997       return NULL;
 998 
 999     return membar->as_MemBar();
1000   }
1001 
1002   // if n is linked to a child MemBarNode by intervening Control and
1003   // Memory ProjNodes return the MemBarNode otherwise return NULL.
1004   //
1005   // The ProjNode** arguments c and m are used to return pointers to
1006   // the relevant nodes. A null argument means don't don't return a
1007   // value.
1008 
1009   MemBarNode *has_child_membar(const MemBarNode *n, ProjNode *&c, ProjNode *&m)
1010   {
1011     ProjNode *ctl = n->proj_out(TypeFunc::Control);
1012     ProjNode *mem = n->proj_out(TypeFunc::Memory);
1013 
1014     // MemBar needs to have both a Ctl and Mem projection
1015     if (! ctl || ! mem)
1016       return NULL;
1017 
1018     c = ctl;
1019     m = mem;
1020 
1021     MemBarNode *child = NULL;
1022     Node *x;
1023 
1024     for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1025       x = ctl->fast_out(i);
1026       // if we see a membar we keep hold of it. we may also see a new
1027       // arena copy of the original but it will appear later
1028       if (x->is_MemBar()) {
1029           child = x->as_MemBar();
1030           break;
1031       }
1032     }
1033 
1034     if (child == NULL)
1035       return NULL;
1036 
1037     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1038       x = mem->fast_out(i);
1039       // if we see a membar we keep hold of it. we may also see a new
1040       // arena copy of the original but it will appear later
1041       if (x == child) {
1042         return child;
1043       }
1044     }
1045     return NULL;
1046   }
1047 
1048   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1049 
1050 bool unnecessary_acquire(const Node *barrier) {
1051   // assert barrier->is_MemBar();
1052   if (UseBarriersForVolatile)
1053     // we need to plant a dmb
1054     return false;
1055 
1056   // a volatile read derived from bytecode (or also from an inlined
1057   // SHA field read via LibraryCallKit::load_field_from_object)
1058   // manifests as a LoadX[mo_acquire] followed by an acquire membar
1059   // with a bogus read dependency on it's preceding load. so in those
1060   // cases we will find the load node at the PARMS offset of the
1061   // acquire membar.  n.b. there may be an intervening DecodeN node.
1062   //
1063   // a volatile load derived from an inlined unsafe field access
1064   // manifests as a cpuorder membar with Ctl and Mem projections
1065   // feeding both an acquire membar and a LoadX[mo_acquire]. The
1066   // acquire then feeds another cpuorder membar via Ctl and Mem
1067   // projections. The load has no output dependency on these trailing
1068   // membars because subsequent nodes inserted into the graph take
1069   // their control feed from the final membar cpuorder meaning they
1070   // are all ordered after the load.
1071 
1072   Node *x = barrier->lookup(TypeFunc::Parms);
1073   if (x) {
1074     // we are starting from an acquire and it has a fake dependency
1075     //
1076     // need to check for
1077     //
1078     //   LoadX[mo_acquire]
1079     //   {  |1   }
1080     //   {DecodeN}
1081     //      |Parms
1082     //   MemBarAcquire*
1083     //
1084     // where * tags node we were passed
1085     // and |k means input k
1086     if (x->is_DecodeNarrowPtr())
1087       x = x->in(1);
1088 
1089     return (x->is_Load() && x->as_Load()->is_acquire());
1090   }
1091   
1092   // only continue if we want to try to match unsafe volatile gets
1093   if (UseBarriersForUnsafeVolatileGet)
1094     return false;
1095 
1096   // need to check for
1097   //
1098   //     MemBarCPUOrder
1099   //        ||       \\
1100   //   MemBarAcquire* LoadX[mo_acquire]
1101   //        ||
1102   //   MemBarCPUOrder
1103   //
1104   // where * tags node we were passed
1105   // and || or \\ are Ctl+Mem feeds via intermediate Proj Nodes
1106 
1107   // check for a parent MemBarCPUOrder
1108   ProjNode *ctl;
1109   ProjNode *mem;
1110   MemBarNode *parent = has_parent_membar(barrier, ctl, mem);
1111   if (!parent || parent->Opcode() != Op_MemBarCPUOrder)
1112     return false;
1113   // ensure the proj nodes both feed a LoadX[mo_acquire]
1114   LoadNode *ld = NULL;
1115   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1116     x = ctl->fast_out(i);
1117     // if we see a load we keep hold of it and stop searching
1118     if (x->is_Load()) {
1119       ld = x->as_Load();
1120       break;
1121     }
1122   }
1123   // it must be an acquiring load
1124   if (! ld || ! ld->is_acquire())
1125     return false;
1126   for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1127     x = mem->fast_out(i);
1128     // if we see the same load we drop it and stop searching
1129     if (x == ld) {
1130       ld = NULL;
1131       break;
1132     }
1133   }
1134   // we must have dropped the load
1135   if (ld)
1136     return false;
1137   // check for a child cpuorder membar
1138   MemBarNode *child  = has_child_membar(barrier->as_MemBar(), ctl, mem);
1139   if (!child || child->Opcode() != Op_MemBarCPUOrder)
1140     return false;
1141 
1142   return true;
1143 }
1144 
1145 bool needs_acquiring_load(const Node *n)
1146 {
1147   // assert n->is_Load();
1148   if (UseBarriersForVolatile)
1149     // we use a normal load and a dmb
1150     return false;
1151 
1152   LoadNode *ld = n->as_Load();
1153 
1154   if (!ld->is_acquire())
1155     return false;
1156 
1157   // check if this load is feeding an acquire membar
1158   //
1159   //   LoadX[mo_acquire]
1160   //   {  |1   }
1161   //   {DecodeN}
1162   //      |Parms
1163   //   MemBarAcquire*
1164   //
1165   // where * tags node we were passed
1166   // and |k means input k
1167 
1168   Node *start = ld;
1169   Node *mbacq = NULL;
1170 
1171   // if we hit a DecodeNarrowPtr we reset the start node and restart
1172   // the search through the outputs
1173  restart:
1174 
1175   for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) {
1176     Node *x = start->fast_out(i);
1177     if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) {
1178       mbacq = x;
1179     } else if (!mbacq &&
1180                (x->is_DecodeNarrowPtr() ||
1181                 (x->is_Mach() && x->Opcode() == Op_DecodeN))) {
1182       start = x;
1183       goto restart;
1184     }
1185   }
1186 
1187   if (mbacq) {
1188     return true;
1189   }
1190 
1191   // only continue if we want to try to match unsafe volatile gets
1192   if (UseBarriersForUnsafeVolatileGet)
1193     return false;
1194 
1195   // check if Ctl and Proj feed comes from a MemBarCPUOrder
1196   //
1197   //     MemBarCPUOrder
1198   //        ||       \\
1199   //   MemBarAcquire* LoadX[mo_acquire]
1200   //        ||
1201   //   MemBarCPUOrder
1202 
1203   MemBarNode *membar;
1204   ProjNode *ctl;
1205   ProjNode *mem;
1206 
1207   membar = has_parent_membar(ld, ctl, mem);
1208 
1209   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder)
1210     return false;
1211 
1212   // ensure that there is a CPUOrder->Acquire->CPUOrder membar chain
1213 
1214   membar = has_child_membar(membar, ctl, mem);
1215 
1216   if (!membar || !membar->Opcode() == Op_MemBarAcquire)
1217     return false;
1218 
1219   membar = has_child_membar(membar, ctl, mem);
1220   
1221   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder)
1222     return false;
1223 
1224   return true;
1225 }
1226 
1227 bool unnecessary_release(const Node *n) {
1228   // assert n->is_MemBar();
1229   if (UseBarriersForVolatile)
1230     // we need to plant a dmb
1231     return false;
1232 
1233   // ok, so we can omit this release barrier if it has been inserted
1234   // as part of a volatile store sequence
1235   //
1236   //   MemBarRelease
1237   //  {      ||      }
1238   //  {MemBarCPUOrder} -- optional
1239   //         ||     \\
1240   //         ||     StoreX[mo_release]
1241   //         | \     /
1242   //         | MergeMem
1243   //         | /
1244   //   MemBarVolatile
1245   //
1246   // where
1247   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1248   //  | \ and / indicate further routing of the Ctl and Mem feeds
1249   // 
1250   // so we need to check that
1251   //
1252   // ia) the release membar (or its dependent cpuorder membar) feeds
1253   // control to a store node (via a Control project node)
1254   //
1255   // ii) the store is ordered release
1256   //
1257   // iii) the release membar (or its dependent cpuorder membar) feeds
1258   // control to a volatile membar (via the same Control project node)
1259   //
1260   // iv) the release membar feeds memory to a merge mem and to the
1261   // same store (both via a single Memory proj node)
1262   //
1263   // v) the store outputs to the merge mem
1264   //
1265   // vi) the merge mem outputs to the same volatile membar
1266   //
1267   // n.b. if this is an inlined unsafe node then the release membar
1268   // may feed its control and memory links via an intervening cpuorder
1269   // membar. this case can be dealt with when we check the release
1270   // membar projections. if they both feed a single cpuorder membar
1271   // node continue to make the same checks as above but with the
1272   // cpuorder membar substituted for the release membar. if they don't
1273   // both feed a cpuorder membar then the check fails.
1274   //
1275   // n.b.b. for an inlined unsafe store of an object in the case where
1276   // !TypePtr::NULL_PTR->higher_equal(type(heap_base_oop)) we may see
1277   // an embedded if then else where we expect the store. this is
1278   // needed to do the right type of store depending on whether
1279   // heap_base is NULL. We could check for that but for now we can
1280   // just take the hit of on inserting a redundant dmb for this
1281   // redundant volatile membar
1282 
1283   MemBarNode *barrier = n->as_MemBar();
1284   ProjNode *ctl;
1285   ProjNode *mem;
1286   // check for an intervening cpuorder membar
1287   MemBarNode *b = has_child_membar(barrier, ctl, mem);
1288   if (b && b->Opcode() == Op_MemBarCPUOrder) {
1289     // ok, so start form the dependent cpuorder barrier
1290     barrier = b;
1291   }
1292   // check the ctl and mem flow
1293   ctl = barrier->proj_out(TypeFunc::Control);
1294   mem = barrier->proj_out(TypeFunc::Memory);
1295 
1296   // the barrier needs to have both a Ctl and Mem projection
1297   if (! ctl || ! mem)
1298     return false;
1299 
1300   Node *x = NULL;
1301   Node *mbvol = NULL;
1302   StoreNode * st = NULL;
1303 
1304   // For a normal volatile write the Ctl ProjNode should have output
1305   // to a MemBarVolatile and a Store marked as releasing
1306   //
1307   // n.b. for an inlined unsafe store of an object in the case where
1308   // !TypePtr::NULL_PTR->higher_equal(type(heap_base_oop)) we may see
1309   // an embedded if then else where we expect the store. this is
1310   // needed to do the right type of store depending on whether
1311   // heap_base is NULL. We could check for that case too but for now
1312   // we can just take the hit of inserting a dmb and a non-volatile
1313   // store to implement the volatile store
1314 
1315   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1316     x = ctl->fast_out(i);
1317     if (x->is_MemBar() && x->Opcode() == Op_MemBarVolatile) {
1318       if (mbvol) {
1319         return false;
1320       }
1321       mbvol = x;
1322     } else if (x->is_Store()) {
1323       st = x->as_Store();
1324       if (! st->is_release()) {
1325         return false;
1326       }
1327     } else if (!x->is_Mach()) {
1328       // we may see mach nodes added during matching but nothing else
1329       return false;
1330     }
1331   }
1332 
1333   if (!mbvol || !st)
1334     return false;
1335 
1336   // the Mem ProjNode should output to a MergeMem and the same Store
1337   Node *mm = NULL;
1338   for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1339     x = mem->fast_out(i);
1340     if (!mm && x->is_MergeMem()) {
1341       mm = x;
1342     } else if (x != st && !x->is_Mach()) {
1343       // we may see mach nodes added during matching but nothing else
1344       return false;
1345     }
1346   }
1347 
1348   if (!mm)
1349     return false;
1350 
1351   // the MergeMem should output to the MemBarVolatile
1352   for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
1353     x = mm->fast_out(i);
1354     if (x != mbvol && !x->is_Mach()) {
1355       // we may see mach nodes added during matching but nothing else
1356       return false;
1357     }
1358   }
1359 
1360   return true;
1361 }
1362 
1363 bool unnecessary_volatile(const Node *n) {
1364   // assert n->is_MemBar();
1365   if (UseBarriersForVolatile)
1366     // we need to plant a dmb
1367     return false;
1368 
1369   // ok, so we can omit this volatile barrier if it has been inserted
1370   // as part of a volatile store sequence
1371   //
1372   //   MemBarRelease
1373   //  {      ||      }
1374   //  {MemBarCPUOrder} -- optional
1375   //         ||     \\
1376   //         ||     StoreX[mo_release]
1377   //         | \     /
1378   //         | MergeMem
1379   //         | /
1380   //   MemBarVolatile
1381   //
1382   // where
1383   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1384   //  | \ and / indicate further routing of the Ctl and Mem feeds
1385   // 
1386   // we need to check that
1387   //
1388   // i) the volatile membar gets its control feed from a release
1389   // membar (or its dependent cpuorder membar) via a Control project
1390   // node
1391   //
1392   // ii) the release membar (or its dependent cpuorder membar) also
1393   // feeds control to a store node via the same proj node
1394   //
1395   // iii) the store is ordered release
1396   //
1397   // iv) the release membar (or its dependent cpuorder membar) feeds
1398   // memory to a merge mem and to the same store (both via a single
1399   // Memory proj node)
1400   //
1401   // v) the store outputs to the merge mem
1402   //
1403   // vi) the merge mem outputs to the volatile membar
1404   //
1405   // n.b. for an inlined unsafe store of an object in the case where
1406   // !TypePtr::NULL_PTR->higher_equal(type(heap_base_oop)) we may see
1407   // an embedded if then else where we expect the store. this is
1408   // needed to do the right type of store depending on whether
1409   // heap_base is NULL. We could check for that but for now we can
1410   // just take the hit of on inserting a redundant dmb for this
1411   // redundant volatile membar
1412 
1413   MemBarNode *mbvol = n->as_MemBar();
1414   Node *x = n->lookup(TypeFunc::Control);
1415 
1416   if (! x || !x->is_Proj())
1417     return false;
1418 
1419   ProjNode *proj = x->as_Proj();
1420 
1421   x = proj->lookup(0);
1422 
1423   if (!x || !x->is_MemBar())
1424     return false;
1425 
1426   MemBarNode *barrier = x->as_MemBar();
1427 
1428   // if the barrier is a release membar we have what we want. if it is
1429   // a cpuorder membar then we need to ensure that it is fed by a
1430   // release membar in which case we proceed to check the graph below
1431   // this cpuorder membar as the feed
1432 
1433   if (x->Opcode() != Op_MemBarRelease) {
1434     if (x->Opcode() != Op_MemBarCPUOrder)
1435       return false;
1436     ProjNode *ctl;
1437     ProjNode *mem;
1438     MemBarNode *b = has_parent_membar(x, ctl, mem);
1439     if (!b || !b->Opcode() == Op_MemBarRelease)
1440       return false;
1441   }
1442 
1443   ProjNode *ctl = barrier->proj_out(TypeFunc::Control);
1444   ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
1445 
1446   // barrier needs to have both a Ctl and Mem projection
1447   // and we need to have reached it via the Ctl projection
1448   if (! ctl || ! mem || ctl != proj)
1449     return false;
1450 
1451   StoreNode * st = NULL;
1452 
1453   // The Ctl ProjNode should have output to a MemBarVolatile and
1454   // a Store marked as releasing
1455   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1456     x = ctl->fast_out(i);
1457     if (x->is_MemBar() && x->Opcode() == Op_MemBarVolatile) {
1458       if (x != mbvol) {
1459         return false;
1460       }
1461     } else if (x->is_Store()) {
1462       st = x->as_Store();
1463       if (! st->is_release()) {
1464         return false;
1465       }
1466     } else if (!x->is_Mach()){
1467       // we may see mach nodes added during matching but nothing else
1468       return false;
1469     }
1470   }
1471 
1472   if (!st)
1473     return false;
1474 
1475   // the Mem ProjNode should output to a MergeMem and the same Store
1476   Node *mm = NULL;
1477   for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1478     x = mem->fast_out(i);
1479     if (!mm && x->is_MergeMem()) {
1480       mm = x;
1481     } else if (x != st && !x->is_Mach()) {
1482       // we may see mach nodes added during matching but nothing else
1483       return false;
1484     }
1485   }
1486 
1487   if (!mm)
1488     return false;
1489 
1490   // the MergeMem should output to the MemBarVolatile
1491   for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
1492     x = mm->fast_out(i);
1493     if (x != mbvol && !x->is_Mach()) {
1494       // we may see mach nodes added during matching but nothing else
1495       return false;
1496     }
1497   }
1498 
1499   return true;
1500 }
1501 
1502 
1503 
1504 bool needs_releasing_store(const Node *n)
1505 {
1506   // assert n->is_Store();
1507   if (UseBarriersForVolatile)
1508     // we use a normal store and dmb combination
1509     return false;
1510 
1511   StoreNode *st = n->as_Store();
1512 
1513   if (!st->is_release())
1514     return false;
1515 
1516   // check if this store is bracketed by a release (or its dependent
1517   // cpuorder membar) and a volatile membar
1518   //
1519   //   MemBarRelease
1520   //  {      ||      }
1521   //  {MemBarCPUOrder} -- optional
1522   //         ||     \\
1523   //         ||     StoreX[mo_release]
1524   //         | \     /
1525   //         | MergeMem
1526   //         | /
1527   //   MemBarVolatile
1528   //
1529   // where
1530   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1531   //  | \ and / indicate further routing of the Ctl and Mem feeds
1532   // 
1533 
1534 
1535   Node *x = st->lookup(TypeFunc::Control);
1536 
1537   if (! x || !x->is_Proj())
1538     return false;
1539 
1540   ProjNode *proj = x->as_Proj();
1541 
1542   x = proj->lookup(0);
1543 
1544   if (!x || !x->is_MemBar())
1545     return false;
1546 
1547   MemBarNode *barrier = x->as_MemBar();
1548 
1549   // if the barrier is a release membar we have what we want. if it is
1550   // a cpuorder membar then we need to ensure that it is fed by a
1551   // release membar in which case we proceed to check the graph below
1552   // this cpuorder membar as the feed
1553 
1554   if (x->Opcode() != Op_MemBarRelease) {
1555     if (x->Opcode() != Op_MemBarCPUOrder)
1556       return false;
1557     Node *ctl = x->lookup(TypeFunc::Control);
1558     Node *mem = x->lookup(TypeFunc::Memory);
1559     if (!ctl || !ctl->is_Proj() || !mem || !mem->is_Proj())
1560       return false;
1561     x = ctl->lookup(0);
1562     if (!x || !x->is_MemBar() || !x->Opcode() == Op_MemBarRelease)
1563       return false;
1564     Node *y = mem->lookup(0);
1565     if (!y || y != x)
1566       return false;
1567   }
1568 
1569   ProjNode *ctl = barrier->proj_out(TypeFunc::Control);
1570   ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
1571 
1572   // MemBarRelease needs to have both a Ctl and Mem projection
1573   // and we need to have reached it via the Ctl projection
1574   if (! ctl || ! mem || ctl != proj)
1575     return false;
1576 
1577   MemBarNode *mbvol = NULL;
1578 
1579   // The Ctl ProjNode should have output to a MemBarVolatile and
1580   // a Store marked as releasing
1581   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1582     x = ctl->fast_out(i);
1583     if (x->is_MemBar() && x->Opcode() == Op_MemBarVolatile) {
1584       mbvol = x->as_MemBar();
1585     } else if (x->is_Store()) {
1586       if (x != st) {
1587         return false;
1588       }
1589     } else if (!x->is_Mach()){
1590       return false;
1591     }
1592   }
1593 
1594   if (!mbvol)
1595     return false;
1596 
1597   // the Mem ProjNode should output to a MergeMem and the same Store
1598   Node *mm = NULL;
1599   for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1600     x = mem->fast_out(i);
1601     if (!mm && x->is_MergeMem()) {
1602       mm = x;
1603     } else if (x != st && !x->is_Mach()) {
1604       return false;
1605     }
1606   }
1607 
1608   if (!mm)
1609     return false;
1610 
1611   // the MergeMem should output to the MemBarVolatile
1612   for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
1613     x = mm->fast_out(i);
1614     if (x != mbvol && !x->is_Mach()) {
1615       return false;
1616     }
1617   }
1618 
1619   return true;
1620 }
1621 
1622 
1623 
1624 #define __ _masm.
1625 
1626 // advance declarations for helper functions to convert register
1627 // indices to register objects
1628 
1629 // the ad file has to provide implementations of certain methods
1630 // expected by the generic code
1631 //
1632 // REQUIRED FUNCTIONALITY
1633 
1634 //=============================================================================
1635 
1636 // !!!!! Special hack to get all types of calls to specify the byte offset
1637 //       from the start of the call to the point where the return address
1638 //       will point.
1639 
1640 int MachCallStaticJavaNode::ret_addr_offset()
1641 {
1642   // call should be a simple bl
1643   // unless this is a method handle invoke in which case it is
1644   // mov(rfp, sp), bl, mov(sp, rfp)
1645   int off = 4;
1646   if (_method_handle_invoke) {
1647     off += 4;
1648   }
1649   return off;
1650 }
1651 
1652 int MachCallDynamicJavaNode::ret_addr_offset()
1653 {
1654   return 16; // movz, movk, movk, bl
1655 }
1656 
1657 int MachCallRuntimeNode::ret_addr_offset() {
1658   // for generated stubs the call will be
1659   //   far_call(addr)
1660   // for real runtime callouts it will be six instructions
1661   // see aarch64_enc_java_to_runtime
1662   //   adr(rscratch2, retaddr)
1663   //   lea(rscratch1, RuntimeAddress(addr)
1664   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
1665   //   blrt rscratch1
1666   CodeBlob *cb = CodeCache::find_blob(_entry_point);
1667   if (cb) {
1668     return MacroAssembler::far_branch_size();
1669   } else {
1670     return 6 * NativeInstruction::instruction_size;
1671   }
1672 }
1673 
1674 // Indicate if the safepoint node needs the polling page as an input
1675 
1676 // the shared code plants the oop data at the start of the generated
1677 // code for the safepoint node and that needs ot be at the load
1678 // instruction itself. so we cannot plant a mov of the safepoint poll
1679 // address followed by a load. setting this to true means the mov is
1680 // scheduled as a prior instruction. that's better for scheduling
1681 // anyway.
1682 
1683 bool SafePointNode::needs_polling_address_input()
1684 {
1685   return true;
1686 }
1687 
1688 //=============================================================================
1689 
1690 #ifndef PRODUCT
1691 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1692   st->print("BREAKPOINT");
1693 }
1694 #endif
1695 
1696 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1697   MacroAssembler _masm(&cbuf);
1698   __ brk(0);
1699 }
1700 
1701 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
1702   return MachNode::size(ra_);
1703 }
1704 
1705 //=============================================================================
1706 
1707 #ifndef PRODUCT
1708   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
1709     st->print("nop \t# %d bytes pad for loops and calls", _count);
1710   }
1711 #endif
1712 
1713   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
1714     MacroAssembler _masm(&cbuf);
1715     for (int i = 0; i < _count; i++) {
1716       __ nop();
1717     }
1718   }
1719 
1720   uint MachNopNode::size(PhaseRegAlloc*) const {
1721     return _count * NativeInstruction::instruction_size;
1722   }
1723 
1724 //=============================================================================
1725 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
1726 
1727 int Compile::ConstantTable::calculate_table_base_offset() const {
1728   return 0;  // absolute addressing, no offset
1729 }
1730 
1731 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1732 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1733   ShouldNotReachHere();
1734 }
1735 
1736 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1737   // Empty encoding
1738 }
1739 
1740 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1741   return 0;
1742 }
1743 
1744 #ifndef PRODUCT
1745 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1746   st->print("-- \t// MachConstantBaseNode (empty encoding)");
1747 }
1748 #endif
1749 
1750 #ifndef PRODUCT
1751 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1752   Compile* C = ra_->C;
1753 
1754   int framesize = C->frame_slots() << LogBytesPerInt;
1755 
1756   if (C->need_stack_bang(framesize))
1757     st->print("# stack bang size=%d\n\t", framesize);
1758 
1759   if (framesize == 0) {
1760     // Is this even possible?
1761     st->print("stp  lr, rfp, [sp, #%d]!", -(2 * wordSize));
1762   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
1763     st->print("sub  sp, sp, #%d\n\t", framesize);
1764     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
1765   } else {
1766     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
1767     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1768     st->print("sub  sp, sp, rscratch1");
1769   }
1770 }
1771 #endif
1772 
1773 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1774   Compile* C = ra_->C;
1775   MacroAssembler _masm(&cbuf);
1776 
1777   // n.b. frame size includes space for return pc and rfp
1778   const long framesize = C->frame_size_in_bytes();
1779   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
1780 
1781   // insert a nop at the start of the prolog so we can patch in a
1782   // branch if we need to invalidate the method later
1783   __ nop();
1784 
1785   int bangsize = C->bang_size_in_bytes();
1786   if (C->need_stack_bang(bangsize) && UseStackBanging)
1787     __ generate_stack_overflow_check(bangsize);
1788 
1789   __ build_frame(framesize);
1790 
1791   if (NotifySimulator) {
1792     __ notify(Assembler::method_entry);
1793   }
1794 
1795   if (VerifyStackAtCalls) {
1796     Unimplemented();
1797   }
1798 
1799   C->set_frame_complete(cbuf.insts_size());
1800 
1801   if (C->has_mach_constant_base_node()) {
1802     // NOTE: We set the table base offset here because users might be
1803     // emitted before MachConstantBaseNode.
1804     Compile::ConstantTable& constant_table = C->constant_table();
1805     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1806   }
1807 }
1808 
1809 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
1810 {
1811   return MachNode::size(ra_); // too many variables; just compute it
1812                               // the hard way
1813 }
1814 
1815 int MachPrologNode::reloc() const
1816 {
1817   return 0;
1818 }
1819 
1820 //=============================================================================
1821 
1822 #ifndef PRODUCT
1823 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1824   Compile* C = ra_->C;
1825   int framesize = C->frame_slots() << LogBytesPerInt;
1826 
1827   st->print("# pop frame %d\n\t",framesize);
1828 
1829   if (framesize == 0) {
1830     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1831   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
1832     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
1833     st->print("add  sp, sp, #%d\n\t", framesize);
1834   } else {
1835     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1836     st->print("add  sp, sp, rscratch1\n\t");
1837     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1838   }
1839 
1840   if (do_polling() && C->is_method_compilation()) {
1841     st->print("# touch polling page\n\t");
1842     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
1843     st->print("ldr zr, [rscratch1]");
1844   }
1845 }
1846 #endif
1847 
1848 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1849   Compile* C = ra_->C;
1850   MacroAssembler _masm(&cbuf);
1851   int framesize = C->frame_slots() << LogBytesPerInt;
1852 
1853   __ remove_frame(framesize);
1854 
1855   if (NotifySimulator) {
1856     __ notify(Assembler::method_reentry);
1857   }
1858 
1859   if (do_polling() && C->is_method_compilation()) {
1860     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
1861   }
1862 }
1863 
1864 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1865   // Variable size. Determine dynamically.
1866   return MachNode::size(ra_);
1867 }
1868 
1869 int MachEpilogNode::reloc() const {
1870   // Return number of relocatable values contained in this instruction.
1871   return 1; // 1 for polling page.
1872 }
1873 
1874 const Pipeline * MachEpilogNode::pipeline() const {
1875   return MachNode::pipeline_class();
1876 }
1877 
1878 // This method seems to be obsolete. It is declared in machnode.hpp
1879 // and defined in all *.ad files, but it is never called. Should we
1880 // get rid of it?
1881 int MachEpilogNode::safepoint_offset() const {
1882   assert(do_polling(), "no return for this epilog node");
1883   return 4;
1884 }
1885 
1886 //=============================================================================
1887 
1888 // Figure out which register class each belongs in: rc_int, rc_float or
1889 // rc_stack.
1890 enum RC { rc_bad, rc_int, rc_float, rc_stack };
1891 
1892 static enum RC rc_class(OptoReg::Name reg) {
1893 
1894   if (reg == OptoReg::Bad) {
1895     return rc_bad;
1896   }
1897 
1898   // we have 30 int registers * 2 halves
1899   // (rscratch1 and rscratch2 are omitted)
1900 
1901   if (reg < 60) {
1902     return rc_int;
1903   }
1904 
1905   // we have 32 float register * 2 halves
1906   if (reg < 60 + 64) {
1907     return rc_float;
1908   }
1909 
1910   // Between float regs & stack is the flags regs.
1911   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
1912 
1913   return rc_stack;
1914 }
1915 
1916 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
1917   Compile* C = ra_->C;
1918 
1919   // Get registers to move.
1920   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
1921   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
1922   OptoReg::Name dst_hi = ra_->get_reg_second(this);
1923   OptoReg::Name dst_lo = ra_->get_reg_first(this);
1924 
1925   enum RC src_hi_rc = rc_class(src_hi);
1926   enum RC src_lo_rc = rc_class(src_lo);
1927   enum RC dst_hi_rc = rc_class(dst_hi);
1928   enum RC dst_lo_rc = rc_class(dst_lo);
1929 
1930   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
1931 
1932   if (src_hi != OptoReg::Bad) {
1933     assert((src_lo&1)==0 && src_lo+1==src_hi &&
1934            (dst_lo&1)==0 && dst_lo+1==dst_hi,
1935            "expected aligned-adjacent pairs");
1936   }
1937 
1938   if (src_lo == dst_lo && src_hi == dst_hi) {
1939     return 0;            // Self copy, no move.
1940   }
1941 
1942   switch (src_lo_rc) {
1943   case rc_int:
1944     if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
1945       if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
1946           (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
1947           // 64 bit
1948         if (cbuf) {
1949           MacroAssembler _masm(cbuf);
1950           __ mov(as_Register(Matcher::_regEncode[dst_lo]),
1951                  as_Register(Matcher::_regEncode[src_lo]));
1952         } else if (st) {
1953           st->print("mov  %s, %s\t# shuffle",
1954                     Matcher::regName[dst_lo],
1955                     Matcher::regName[src_lo]);
1956         }
1957       } else {
1958         // 32 bit
1959         if (cbuf) {
1960           MacroAssembler _masm(cbuf);
1961           __ movw(as_Register(Matcher::_regEncode[dst_lo]),
1962                   as_Register(Matcher::_regEncode[src_lo]));
1963         } else if (st) {
1964           st->print("movw  %s, %s\t# shuffle",
1965                     Matcher::regName[dst_lo],
1966                     Matcher::regName[src_lo]);
1967         }
1968       }
1969     } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
1970       if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
1971           (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
1972           // 64 bit
1973         if (cbuf) {
1974           MacroAssembler _masm(cbuf);
1975           __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1976                    as_Register(Matcher::_regEncode[src_lo]));
1977         } else if (st) {
1978           st->print("fmovd  %s, %s\t# shuffle",
1979                     Matcher::regName[dst_lo],
1980                     Matcher::regName[src_lo]);
1981         }
1982       } else {
1983         // 32 bit
1984         if (cbuf) {
1985           MacroAssembler _masm(cbuf);
1986           __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1987                    as_Register(Matcher::_regEncode[src_lo]));
1988         } else if (st) {
1989           st->print("fmovs  %s, %s\t# shuffle",
1990                     Matcher::regName[dst_lo],
1991                     Matcher::regName[src_lo]);
1992         }
1993       }
1994     } else {                    // gpr --> stack spill
1995       assert(dst_lo_rc == rc_stack, "spill to bad register class");
1996       int dst_offset = ra_->reg2offset(dst_lo);
1997       if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
1998           (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
1999           // 64 bit
2000         if (cbuf) {
2001           MacroAssembler _masm(cbuf);
2002           __ str(as_Register(Matcher::_regEncode[src_lo]),
2003                  Address(sp, dst_offset));
2004         } else if (st) {
2005           st->print("str  %s, [sp, #%d]\t# spill",
2006                     Matcher::regName[src_lo],
2007                     dst_offset);
2008         }
2009       } else {
2010         // 32 bit
2011         if (cbuf) {
2012           MacroAssembler _masm(cbuf);
2013           __ strw(as_Register(Matcher::_regEncode[src_lo]),
2014                  Address(sp, dst_offset));
2015         } else if (st) {
2016           st->print("strw  %s, [sp, #%d]\t# spill",
2017                     Matcher::regName[src_lo],
2018                     dst_offset);
2019         }
2020       }
2021     }
2022     return 4;
2023   case rc_float:
2024     if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
2025       if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
2026           (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
2027           // 64 bit
2028         if (cbuf) {
2029           MacroAssembler _masm(cbuf);
2030           __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
2031                    as_FloatRegister(Matcher::_regEncode[src_lo]));
2032         } else if (st) {
2033           st->print("fmovd  %s, %s\t# shuffle",
2034                     Matcher::regName[dst_lo],
2035                     Matcher::regName[src_lo]);
2036         }
2037       } else {
2038         // 32 bit
2039         if (cbuf) {
2040           MacroAssembler _masm(cbuf);
2041           __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
2042                    as_FloatRegister(Matcher::_regEncode[src_lo]));
2043         } else if (st) {
2044           st->print("fmovs  %s, %s\t# shuffle",
2045                     Matcher::regName[dst_lo],
2046                     Matcher::regName[src_lo]);
2047         }
2048       }
2049     } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
2050       if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
2051           (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
2052           // 64 bit
2053         if (cbuf) {
2054           MacroAssembler _masm(cbuf);
2055           __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2056                    as_FloatRegister(Matcher::_regEncode[src_lo]));
2057         } else if (st) {
2058           st->print("fmovd  %s, %s\t# shuffle",
2059                     Matcher::regName[dst_lo],
2060                     Matcher::regName[src_lo]);
2061         }
2062       } else {
2063         // 32 bit
2064         if (cbuf) {
2065           MacroAssembler _masm(cbuf);
2066           __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2067                    as_FloatRegister(Matcher::_regEncode[src_lo]));
2068         } else if (st) {
2069           st->print("fmovs  %s, %s\t# shuffle",
2070                     Matcher::regName[dst_lo],
2071                     Matcher::regName[src_lo]);
2072         }
2073       }
2074     } else {                    // fpr --> stack spill
2075       assert(dst_lo_rc == rc_stack, "spill to bad register class");
2076       int dst_offset = ra_->reg2offset(dst_lo);
2077       if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
2078           (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
2079           // 64 bit
2080         if (cbuf) {
2081           MacroAssembler _masm(cbuf);
2082           __ strd(as_FloatRegister(Matcher::_regEncode[src_lo]),
2083                  Address(sp, dst_offset));
2084         } else if (st) {
2085           st->print("strd  %s, [sp, #%d]\t# spill",
2086                     Matcher::regName[src_lo],
2087                     dst_offset);
2088         }
2089       } else {
2090         // 32 bit
2091         if (cbuf) {
2092           MacroAssembler _masm(cbuf);
2093           __ strs(as_FloatRegister(Matcher::_regEncode[src_lo]),
2094                  Address(sp, dst_offset));
2095         } else if (st) {
2096           st->print("strs  %s, [sp, #%d]\t# spill",
2097                     Matcher::regName[src_lo],
2098                     dst_offset);
2099         }
2100       }
2101     }
2102     return 4;
2103   case rc_stack:
2104     int src_offset = ra_->reg2offset(src_lo);
2105     if (dst_lo_rc == rc_int) {  // stack --> gpr load
2106       if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
2107           (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
2108           // 64 bit
2109         if (cbuf) {
2110           MacroAssembler _masm(cbuf);
2111           __ ldr(as_Register(Matcher::_regEncode[dst_lo]),
2112                  Address(sp, src_offset));
2113         } else if (st) {
2114           st->print("ldr  %s, [sp, %d]\t# restore",
2115                     Matcher::regName[dst_lo],
2116                     src_offset);
2117         }
2118       } else {
2119         // 32 bit
2120         if (cbuf) {
2121           MacroAssembler _masm(cbuf);
2122           __ ldrw(as_Register(Matcher::_regEncode[dst_lo]),
2123                   Address(sp, src_offset));
2124         } else if (st) {
2125           st->print("ldr  %s, [sp, %d]\t# restore",
2126                     Matcher::regName[dst_lo],
2127                    src_offset);
2128         }
2129       }
2130       return 4;
2131     } else if (dst_lo_rc == rc_float) { // stack --> fpr load
2132       if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
2133           (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
2134           // 64 bit
2135         if (cbuf) {
2136           MacroAssembler _masm(cbuf);
2137           __ ldrd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2138                  Address(sp, src_offset));
2139         } else if (st) {
2140           st->print("ldrd  %s, [sp, %d]\t# restore",
2141                     Matcher::regName[dst_lo],
2142                     src_offset);
2143         }
2144       } else {
2145         // 32 bit
2146         if (cbuf) {
2147           MacroAssembler _masm(cbuf);
2148           __ ldrs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2149                   Address(sp, src_offset));
2150         } else if (st) {
2151           st->print("ldrs  %s, [sp, %d]\t# restore",
2152                     Matcher::regName[dst_lo],
2153                    src_offset);
2154         }
2155       }
2156       return 4;
2157     } else {                    // stack --> stack copy
2158       assert(dst_lo_rc == rc_stack, "spill to bad register class");
2159       int dst_offset = ra_->reg2offset(dst_lo);
2160       if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
2161           (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
2162           // 64 bit
2163         if (cbuf) {
2164           MacroAssembler _masm(cbuf);
2165           __ ldr(rscratch1, Address(sp, src_offset));
2166           __ str(rscratch1, Address(sp, dst_offset));
2167         } else if (st) {
2168           st->print("ldr  rscratch1, [sp, %d]\t# mem-mem spill",
2169                     src_offset);
2170           st->print("\n\t");
2171           st->print("str  rscratch1, [sp, %d]",
2172                     dst_offset);
2173         }
2174       } else {
2175         // 32 bit
2176         if (cbuf) {
2177           MacroAssembler _masm(cbuf);
2178           __ ldrw(rscratch1, Address(sp, src_offset));
2179           __ strw(rscratch1, Address(sp, dst_offset));
2180         } else if (st) {
2181           st->print("ldrw  rscratch1, [sp, %d]\t# mem-mem spill",
2182                     src_offset);
2183           st->print("\n\t");
2184           st->print("strw  rscratch1, [sp, %d]",
2185                     dst_offset);
2186         }
2187       }
2188       return 8;
2189     }
2190   }
2191 
2192   assert(false," bad rc_class for spill ");
2193   Unimplemented();
2194   return 0;
2195 
2196 }
2197 
2198 #ifndef PRODUCT
2199 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2200   if (!ra_)
2201     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
2202   else
2203     implementation(NULL, ra_, false, st);
2204 }
2205 #endif
2206 
2207 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2208   implementation(&cbuf, ra_, false, NULL);
2209 }
2210 
2211 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
2212   return implementation(NULL, ra_, true, NULL);
2213 }
2214 
2215 //=============================================================================
2216 
2217 #ifndef PRODUCT
2218 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2219   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
2220   int reg = ra_->get_reg_first(this);
2221   st->print("add %s, rsp, #%d]\t# box lock",
2222             Matcher::regName[reg], offset);
2223 }
2224 #endif
2225 
2226 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2227   MacroAssembler _masm(&cbuf);
2228 
2229   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
2230   int reg    = ra_->get_encode(this);
2231 
2232   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
2233     __ add(as_Register(reg), sp, offset);
2234   } else {
2235     ShouldNotReachHere();
2236   }
2237 }
2238 
2239 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
2240   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
2241   return 4;
2242 }
2243 
2244 //=============================================================================
2245 
2246 #ifndef PRODUCT
2247 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
2248 {
2249   st->print_cr("# MachUEPNode");
2250   if (UseCompressedClassPointers) {
2251     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
2252     if (Universe::narrow_klass_shift() != 0) {
2253       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
2254     }
2255   } else {
2256    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
2257   }
2258   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
2259   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
2260 }
2261 #endif
2262 
2263 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
2264 {
2265   // This is the unverified entry point.
2266   MacroAssembler _masm(&cbuf);
2267 
2268   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
2269   Label skip;
2270   // TODO
2271   // can we avoid this skip and still use a reloc?
2272   __ br(Assembler::EQ, skip);
2273   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
2274   __ bind(skip);
2275 }
2276 
2277 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
2278 {
2279   return MachNode::size(ra_);
2280 }
2281 
2282 // REQUIRED EMIT CODE
2283 
2284 //=============================================================================
2285 
2286 // Emit exception handler code.
2287 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
2288 {
2289   // mov rscratch1 #exception_blob_entry_point
2290   // br rscratch1
2291   // Note that the code buffer's insts_mark is always relative to insts.
2292   // That's why we must use the macroassembler to generate a handler.
2293   MacroAssembler _masm(&cbuf);
2294   address base =
2295   __ start_a_stub(size_exception_handler());
2296   if (base == NULL)  return 0;  // CodeBuffer::expand failed
2297   int offset = __ offset();
2298   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
2299   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
2300   __ end_a_stub();
2301   return offset;
2302 }
2303 
2304 // Emit deopt handler code.
2305 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
2306 {
2307   // Note that the code buffer's insts_mark is always relative to insts.
2308   // That's why we must use the macroassembler to generate a handler.
2309   MacroAssembler _masm(&cbuf);
2310   address base =
2311   __ start_a_stub(size_deopt_handler());
2312   if (base == NULL)  return 0;  // CodeBuffer::expand failed
2313   int offset = __ offset();
2314 
2315   __ adr(lr, __ pc());
2316   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
2317 
2318   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
2319   __ end_a_stub();
2320   return offset;
2321 }
2322 
2323 // REQUIRED MATCHER CODE
2324 
2325 //=============================================================================
2326 
2327 const bool Matcher::match_rule_supported(int opcode) {
2328 
2329   // TODO
2330   // identify extra cases that we might want to provide match rules for
2331   // e.g. Op_StrEquals and other intrinsics
2332   if (!has_match_rule(opcode)) {
2333     return false;
2334   }
2335 
2336   return true;  // Per default match rules are supported.
2337 }
2338 
2339 int Matcher::regnum_to_fpu_offset(int regnum)
2340 {
2341   Unimplemented();
2342   return 0;
2343 }
2344 
2345 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset)
2346 {
2347   Unimplemented();
2348   return false;
2349 }
2350 
2351 const bool Matcher::isSimpleConstant64(jlong value) {
2352   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
2353   // Probably always true, even if a temp register is required.
2354   return true;
2355 }
2356 
2357 // true just means we have fast l2f conversion
2358 const bool Matcher::convL2FSupported(void) {
2359   return true;
2360 }
2361 
2362 // Vector width in bytes.
2363 const int Matcher::vector_width_in_bytes(BasicType bt) {
2364   // TODO fixme
2365   return 0;
2366 }
2367 
2368 // Limits on vector size (number of elements) loaded into vector.
2369 const int Matcher::max_vector_size(const BasicType bt) {
2370   return vector_width_in_bytes(bt)/type2aelembytes(bt);
2371 }
2372 const int Matcher::min_vector_size(const BasicType bt) {
2373   int max_size = max_vector_size(bt);
2374   // Min size which can be loaded into vector is 4 bytes.
2375   int size = (type2aelembytes(bt) == 1) ? 4 : 2;
2376   return MIN2(size,max_size);
2377 }
2378 
2379 // Vector ideal reg.
2380 const int Matcher::vector_ideal_reg(int len) {
2381   // TODO fixme
2382   return Op_RegD;
2383 }
2384 
2385 // Only lowest bits of xmm reg are used for vector shift count.
2386 const int Matcher::vector_shift_count_ideal_reg(int size) {
2387   // TODO fixme
2388   return Op_RegL;
2389 }
2390 
2391 // AES support not yet implemented
2392 const bool Matcher::pass_original_key_for_aes() {
2393   return false;
2394 }
2395 
2396 // x86 supports misaligned vectors store/load.
2397 const bool Matcher::misaligned_vectors_ok() {
2398   // TODO fixme
2399   // return !AlignVector; // can be changed by flag
2400   return false;
2401 }
2402 
2403 // false => size gets scaled to BytesPerLong, ok.
2404 const bool Matcher::init_array_count_is_in_bytes = false;
2405 
2406 // Threshold size for cleararray.
2407 const int Matcher::init_array_short_size = 18 * BytesPerLong;
2408 
2409 // Use conditional move (CMOVL)
2410 const int Matcher::long_cmove_cost() {
2411   // long cmoves are no more expensive than int cmoves
2412   return 0;
2413 }
2414 
2415 const int Matcher::float_cmove_cost() {
2416   // float cmoves are no more expensive than int cmoves
2417   return 0;
2418 }
2419 
2420 // Does the CPU require late expand (see block.cpp for description of late expand)?
2421 const bool Matcher::require_postalloc_expand = false;
2422 
2423 // Should the Matcher clone shifts on addressing modes, expecting them
2424 // to be subsumed into complex addressing expressions or compute them
2425 // into registers?  True for Intel but false for most RISCs
2426 const bool Matcher::clone_shift_expressions = false;
2427 
2428 // Do we need to mask the count passed to shift instructions or does
2429 // the cpu only look at the lower 5/6 bits anyway?
2430 const bool Matcher::need_masked_shift_count = false;
2431 
2432 // This affects two different things:
2433 //  - how Decode nodes are matched
2434 //  - how ImplicitNullCheck opportunities are recognized
2435 // If true, the matcher will try to remove all Decodes and match them
2436 // (as operands) into nodes. NullChecks are not prepared to deal with
2437 // Decodes by final_graph_reshaping().
2438 // If false, final_graph_reshaping() forces the decode behind the Cmp
2439 // for a NullCheck. The matcher matches the Decode node into a register.
2440 // Implicit_null_check optimization moves the Decode along with the
2441 // memory operation back up before the NullCheck.
2442 bool Matcher::narrow_oop_use_complex_address() {
2443   return Universe::narrow_oop_shift() == 0;
2444 }
2445 
2446 bool Matcher::narrow_klass_use_complex_address() {
2447 // TODO
2448 // decide whether we need to set this to true
2449   return false;
2450 }
2451 
2452 // Is it better to copy float constants, or load them directly from
2453 // memory?  Intel can load a float constant from a direct address,
2454 // requiring no extra registers.  Most RISCs will have to materialize
2455 // an address into a register first, so they would do better to copy
2456 // the constant from stack.
2457 const bool Matcher::rematerialize_float_constants = false;
2458 
2459 // If CPU can load and store mis-aligned doubles directly then no
2460 // fixup is needed.  Else we split the double into 2 integer pieces
2461 // and move it piece-by-piece.  Only happens when passing doubles into
2462 // C code as the Java calling convention forces doubles to be aligned.
2463 const bool Matcher::misaligned_doubles_ok = true;
2464 
2465 // No-op on amd64
2466 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
2467   Unimplemented();
2468 }
2469 
2470 // Advertise here if the CPU requires explicit rounding operations to
2471 // implement the UseStrictFP mode.
2472 const bool Matcher::strict_fp_requires_explicit_rounding = false;
2473 
2474 // Are floats converted to double when stored to stack during
2475 // deoptimization?
2476 bool Matcher::float_in_double() { return true; }
2477 
2478 // Do ints take an entire long register or just half?
2479 // The relevant question is how the int is callee-saved:
2480 // the whole long is written but de-opt'ing will have to extract
2481 // the relevant 32 bits.
2482 const bool Matcher::int_in_long = true;
2483 
2484 // Return whether or not this register is ever used as an argument.
2485 // This function is used on startup to build the trampoline stubs in
2486 // generateOptoStub.  Registers not mentioned will be killed by the VM
2487 // call in the trampoline, and arguments in those registers not be
2488 // available to the callee.
2489 bool Matcher::can_be_java_arg(int reg)
2490 {
2491   return
2492     reg ==  R0_num || reg == R0_H_num ||
2493     reg ==  R1_num || reg == R1_H_num ||
2494     reg ==  R2_num || reg == R2_H_num ||
2495     reg ==  R3_num || reg == R3_H_num ||
2496     reg ==  R4_num || reg == R4_H_num ||
2497     reg ==  R5_num || reg == R5_H_num ||
2498     reg ==  R6_num || reg == R6_H_num ||
2499     reg ==  R7_num || reg == R7_H_num ||
2500     reg ==  V0_num || reg == V0_H_num ||
2501     reg ==  V1_num || reg == V1_H_num ||
2502     reg ==  V2_num || reg == V2_H_num ||
2503     reg ==  V3_num || reg == V3_H_num ||
2504     reg ==  V4_num || reg == V4_H_num ||
2505     reg ==  V5_num || reg == V5_H_num ||
2506     reg ==  V6_num || reg == V6_H_num ||
2507     reg ==  V7_num || reg == V7_H_num;
2508 }
2509 
2510 bool Matcher::is_spillable_arg(int reg)
2511 {
2512   return can_be_java_arg(reg);
2513 }
2514 
2515 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
2516   return false;
2517 }
2518 
2519 RegMask Matcher::divI_proj_mask() {
2520   ShouldNotReachHere();
2521   return RegMask();
2522 }
2523 
2524 // Register for MODI projection of divmodI.
2525 RegMask Matcher::modI_proj_mask() {
2526   ShouldNotReachHere();
2527   return RegMask();
2528 }
2529 
2530 // Register for DIVL projection of divmodL.
2531 RegMask Matcher::divL_proj_mask() {
2532   ShouldNotReachHere();
2533   return RegMask();
2534 }
2535 
2536 // Register for MODL projection of divmodL.
2537 RegMask Matcher::modL_proj_mask() {
2538   ShouldNotReachHere();
2539   return RegMask();
2540 }
2541 
2542 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
2543   return FP_REG_mask();
2544 }
2545 
2546 // helper for encoding java_to_runtime calls on sim
2547 //
2548 // this is needed to compute the extra arguments required when
2549 // planting a call to the simulator blrt instruction. the TypeFunc
2550 // can be queried to identify the counts for integral, and floating
2551 // arguments and the return type
2552 
2553 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
2554 {
2555   int gps = 0;
2556   int fps = 0;
2557   const TypeTuple *domain = tf->domain();
2558   int max = domain->cnt();
2559   for (int i = TypeFunc::Parms; i < max; i++) {
2560     const Type *t = domain->field_at(i);
2561     switch(t->basic_type()) {
2562     case T_FLOAT:
2563     case T_DOUBLE:
2564       fps++;
2565     default:
2566       gps++;
2567     }
2568   }
2569   gpcnt = gps;
2570   fpcnt = fps;
2571   BasicType rt = tf->return_type();
2572   switch (rt) {
2573   case T_VOID:
2574     rtype = MacroAssembler::ret_type_void;
2575     break;
2576   default:
2577     rtype = MacroAssembler::ret_type_integral;
2578     break;
2579   case T_FLOAT:
2580     rtype = MacroAssembler::ret_type_float;
2581     break;
2582   case T_DOUBLE:
2583     rtype = MacroAssembler::ret_type_double;
2584     break;
2585   }
2586 }
2587 
2588 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
2589   MacroAssembler _masm(&cbuf);                                          \
2590   {                                                                     \
2591     guarantee(INDEX == -1, "mode not permitted for volatile");          \
2592     guarantee(DISP == 0, "mode not permitted for volatile");            \
2593     guarantee(SCALE == 0, "mode not permitted for volatile");           \
2594     __ INSN(REG, as_Register(BASE));                                    \
2595   }
2596 
2597 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
2598 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
2599 
2600   // Used for all non-volatile memory accesses.  The use of
2601   // $mem->opcode() to discover whether this pattern uses sign-extended
2602   // offsets is something of a kludge.
2603   static void loadStore(MacroAssembler masm, mem_insn insn,
2604                          Register reg, int opcode,
2605                          Register base, int index, int size, int disp)
2606   {
2607     Address::extend scale;
2608 
2609     // Hooboy, this is fugly.  We need a way to communicate to the
2610     // encoder that the index needs to be sign extended, so we have to
2611     // enumerate all the cases.
2612     switch (opcode) {
2613     case INDINDEXSCALEDOFFSETI2L:
2614     case INDINDEXSCALEDI2L:
2615     case INDINDEXSCALEDOFFSETI2LN:
2616     case INDINDEXSCALEDI2LN:
2617     case INDINDEXOFFSETI2L:
2618     case INDINDEXOFFSETI2LN:
2619       scale = Address::sxtw(size);
2620       break;
2621     default:
2622       scale = Address::lsl(size);
2623     }
2624 
2625     if (index == -1) {
2626       (masm.*insn)(reg, Address(base, disp));
2627     } else {
2628       if (disp == 0) {
2629         (masm.*insn)(reg, Address(base, as_Register(index), scale));
2630       } else {
2631         masm.lea(rscratch1, Address(base, disp));
2632         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
2633       }
2634     }
2635   }
2636 
2637   static void loadStore(MacroAssembler masm, mem_float_insn insn,
2638                          FloatRegister reg, int opcode,
2639                          Register base, int index, int size, int disp)
2640   {
2641     Address::extend scale;
2642 
2643     switch (opcode) {
2644     case INDINDEXSCALEDOFFSETI2L:
2645     case INDINDEXSCALEDI2L:
2646     case INDINDEXSCALEDOFFSETI2LN:
2647     case INDINDEXSCALEDI2LN:
2648       scale = Address::sxtw(size);
2649       break;
2650     default:
2651       scale = Address::lsl(size);
2652     }
2653 
2654      if (index == -1) {
2655       (masm.*insn)(reg, Address(base, disp));
2656     } else {
2657       if (disp == 0) {
2658         (masm.*insn)(reg, Address(base, as_Register(index), scale));
2659       } else {
2660         masm.lea(rscratch1, Address(base, disp));
2661         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
2662       }
2663     }
2664   }
2665 
2666 %}
2667 
2668 
2669 
2670 //----------ENCODING BLOCK-----------------------------------------------------
2671 // This block specifies the encoding classes used by the compiler to
2672 // output byte streams.  Encoding classes are parameterized macros
2673 // used by Machine Instruction Nodes in order to generate the bit
2674 // encoding of the instruction.  Operands specify their base encoding
2675 // interface with the interface keyword.  There are currently
2676 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
2677 // COND_INTER.  REG_INTER causes an operand to generate a function
2678 // which returns its register number when queried.  CONST_INTER causes
2679 // an operand to generate a function which returns the value of the
2680 // constant when queried.  MEMORY_INTER causes an operand to generate
2681 // four functions which return the Base Register, the Index Register,
2682 // the Scale Value, and the Offset Value of the operand when queried.
2683 // COND_INTER causes an operand to generate six functions which return
2684 // the encoding code (ie - encoding bits for the instruction)
2685 // associated with each basic boolean condition for a conditional
2686 // instruction.
2687 //
2688 // Instructions specify two basic values for encoding.  Again, a
2689 // function is available to check if the constant displacement is an
2690 // oop. They use the ins_encode keyword to specify their encoding
2691 // classes (which must be a sequence of enc_class names, and their
2692 // parameters, specified in the encoding block), and they use the
2693 // opcode keyword to specify, in order, their primary, secondary, and
2694 // tertiary opcode.  Only the opcode sections which a particular
2695 // instruction needs for encoding need to be specified.
2696 encode %{
2697   // Build emit functions for each basic byte or larger field in the
2698   // intel encoding scheme (opcode, rm, sib, immediate), and call them
2699   // from C++ code in the enc_class source block.  Emit functions will
2700   // live in the main source block for now.  In future, we can
2701   // generalize this by adding a syntax that specifies the sizes of
2702   // fields in an order, so that the adlc can build the emit functions
2703   // automagically
2704 
2705   // catch all for unimplemented encodings
2706   enc_class enc_unimplemented %{
2707     MacroAssembler _masm(&cbuf);
2708     __ unimplemented("C2 catch all");
2709   %}
2710 
2711   // BEGIN Non-volatile memory access
2712 
2713   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
2714     Register dst_reg = as_Register($dst$$reg);
2715     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
2716                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2717   %}
2718 
2719   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
2720     Register dst_reg = as_Register($dst$$reg);
2721     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
2722                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2723   %}
2724 
2725   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
2726     Register dst_reg = as_Register($dst$$reg);
2727     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2728                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2729   %}
2730 
2731   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
2732     Register dst_reg = as_Register($dst$$reg);
2733     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2734                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2735   %}
2736 
2737   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
2738     Register dst_reg = as_Register($dst$$reg);
2739     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
2740                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2741   %}
2742 
2743   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
2744     Register dst_reg = as_Register($dst$$reg);
2745     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
2746                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2747   %}
2748 
2749   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
2750     Register dst_reg = as_Register($dst$$reg);
2751     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2752                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2753   %}
2754 
2755   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
2756     Register dst_reg = as_Register($dst$$reg);
2757     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2758                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2759   %}
2760 
2761   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
2762     Register dst_reg = as_Register($dst$$reg);
2763     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2764                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2765   %}
2766 
2767   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
2768     Register dst_reg = as_Register($dst$$reg);
2769     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2770                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2771   %}
2772 
2773   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
2774     Register dst_reg = as_Register($dst$$reg);
2775     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
2776                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2777   %}
2778 
2779   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
2780     Register dst_reg = as_Register($dst$$reg);
2781     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
2782                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2783   %}
2784 
2785   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
2786     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2787     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
2788                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2789   %}
2790 
2791   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
2792     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2793     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
2794                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2795   %}
2796 
2797   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
2798     Register src_reg = as_Register($src$$reg);
2799     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
2800                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2801   %}
2802 
2803   enc_class aarch64_enc_strb0(memory mem) %{
2804     MacroAssembler _masm(&cbuf);
2805     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2806                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2807   %}
2808 
2809   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
2810     Register src_reg = as_Register($src$$reg);
2811     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
2812                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2813   %}
2814 
2815   enc_class aarch64_enc_strh0(memory mem) %{
2816     MacroAssembler _masm(&cbuf);
2817     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
2818                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2819   %}
2820 
2821   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
2822     Register src_reg = as_Register($src$$reg);
2823     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
2824                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2825   %}
2826 
2827   enc_class aarch64_enc_strw0(memory mem) %{
2828     MacroAssembler _masm(&cbuf);
2829     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
2830                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2831   %}
2832 
2833   enc_class aarch64_enc_str(iRegL src, memory mem) %{
2834     Register src_reg = as_Register($src$$reg);
2835     // we sometimes get asked to store the stack pointer into the
2836     // current thread -- we cannot do that directly on AArch64
2837     if (src_reg == r31_sp) {
2838       MacroAssembler _masm(&cbuf);
2839       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2840       __ mov(rscratch2, sp);
2841       src_reg = rscratch2;
2842     }
2843     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
2844                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2845   %}
2846 
2847   enc_class aarch64_enc_str0(memory mem) %{
2848     MacroAssembler _masm(&cbuf);
2849     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
2850                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2851   %}
2852 
2853   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
2854     FloatRegister src_reg = as_FloatRegister($src$$reg);
2855     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
2856                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2857   %}
2858 
2859   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
2860     FloatRegister src_reg = as_FloatRegister($src$$reg);
2861     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
2862                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2863   %}
2864 
2865   // END Non-volatile memory access
2866 
2867   // volatile loads and stores
2868 
2869   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
2870     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2871                  rscratch1, stlrb);
2872   %}
2873 
2874   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
2875     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2876                  rscratch1, stlrh);
2877   %}
2878 
2879   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
2880     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2881                  rscratch1, stlrw);
2882   %}
2883 
2884 
2885   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
2886     Register dst_reg = as_Register($dst$$reg);
2887     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2888              rscratch1, ldarb);
2889     __ sxtbw(dst_reg, dst_reg);
2890   %}
2891 
2892   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
2893     Register dst_reg = as_Register($dst$$reg);
2894     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2895              rscratch1, ldarb);
2896     __ sxtb(dst_reg, dst_reg);
2897   %}
2898 
2899   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
2900     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2901              rscratch1, ldarb);
2902   %}
2903 
2904   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
2905     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2906              rscratch1, ldarb);
2907   %}
2908 
2909   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
2910     Register dst_reg = as_Register($dst$$reg);
2911     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2912              rscratch1, ldarh);
2913     __ sxthw(dst_reg, dst_reg);
2914   %}
2915 
2916   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
2917     Register dst_reg = as_Register($dst$$reg);
2918     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2919              rscratch1, ldarh);
2920     __ sxth(dst_reg, dst_reg);
2921   %}
2922 
2923   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
2924     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2925              rscratch1, ldarh);
2926   %}
2927 
2928   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
2929     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2930              rscratch1, ldarh);
2931   %}
2932 
2933   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
2934     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2935              rscratch1, ldarw);
2936   %}
2937 
2938   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
2939     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2940              rscratch1, ldarw);
2941   %}
2942 
2943   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
2944     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2945              rscratch1, ldar);
2946   %}
2947 
2948   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
2949     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2950              rscratch1, ldarw);
2951     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
2952   %}
2953 
2954   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
2955     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2956              rscratch1, ldar);
2957     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
2958   %}
2959 
2960   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
2961     Register src_reg = as_Register($src$$reg);
2962     // we sometimes get asked to store the stack pointer into the
2963     // current thread -- we cannot do that directly on AArch64
2964     if (src_reg == r31_sp) {
2965         MacroAssembler _masm(&cbuf);
2966       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2967       __ mov(rscratch2, sp);
2968       src_reg = rscratch2;
2969     }
2970     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2971                  rscratch1, stlr);
2972   %}
2973 
2974   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
2975     {
2976       MacroAssembler _masm(&cbuf);
2977       FloatRegister src_reg = as_FloatRegister($src$$reg);
2978       __ fmovs(rscratch2, src_reg);
2979     }
2980     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2981                  rscratch1, stlrw);
2982   %}
2983 
2984   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
2985     {
2986       MacroAssembler _masm(&cbuf);
2987       FloatRegister src_reg = as_FloatRegister($src$$reg);
2988       __ fmovd(rscratch2, src_reg);
2989     }
2990     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2991                  rscratch1, stlr);
2992   %}
2993 
2994   // synchronized read/update encodings
2995 
2996   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
2997     MacroAssembler _masm(&cbuf);
2998     Register dst_reg = as_Register($dst$$reg);
2999     Register base = as_Register($mem$$base);
3000     int index = $mem$$index;
3001     int scale = $mem$$scale;
3002     int disp = $mem$$disp;
3003     if (index == -1) {
3004        if (disp != 0) {
3005         __ lea(rscratch1, Address(base, disp));
3006         __ ldaxr(dst_reg, rscratch1);
3007       } else {
3008         // TODO
3009         // should we ever get anything other than this case?
3010         __ ldaxr(dst_reg, base);
3011       }
3012     } else {
3013       Register index_reg = as_Register(index);
3014       if (disp == 0) {
3015         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
3016         __ ldaxr(dst_reg, rscratch1);
3017       } else {
3018         __ lea(rscratch1, Address(base, disp));
3019         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
3020         __ ldaxr(dst_reg, rscratch1);
3021       }
3022     }
3023   %}
3024 
3025   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
3026     MacroAssembler _masm(&cbuf);
3027     Register src_reg = as_Register($src$$reg);
3028     Register base = as_Register($mem$$base);
3029     int index = $mem$$index;
3030     int scale = $mem$$scale;
3031     int disp = $mem$$disp;
3032     if (index == -1) {
3033        if (disp != 0) {
3034         __ lea(rscratch2, Address(base, disp));
3035         __ stlxr(rscratch1, src_reg, rscratch2);
3036       } else {
3037         // TODO
3038         // should we ever get anything other than this case?
3039         __ stlxr(rscratch1, src_reg, base);
3040       }
3041     } else {
3042       Register index_reg = as_Register(index);
3043       if (disp == 0) {
3044         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
3045         __ stlxr(rscratch1, src_reg, rscratch2);
3046       } else {
3047         __ lea(rscratch2, Address(base, disp));
3048         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
3049         __ stlxr(rscratch1, src_reg, rscratch2);
3050       }
3051     }
3052     __ cmpw(rscratch1, zr);
3053   %}
3054 
3055   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
3056     MacroAssembler _masm(&cbuf);
3057     Register old_reg = as_Register($oldval$$reg);
3058     Register new_reg = as_Register($newval$$reg);
3059     Register base = as_Register($mem$$base);
3060     Register addr_reg;
3061     int index = $mem$$index;
3062     int scale = $mem$$scale;
3063     int disp = $mem$$disp;
3064     if (index == -1) {
3065        if (disp != 0) {
3066         __ lea(rscratch2, Address(base, disp));
3067         addr_reg = rscratch2;
3068       } else {
3069         // TODO
3070         // should we ever get anything other than this case?
3071         addr_reg = base;
3072       }
3073     } else {
3074       Register index_reg = as_Register(index);
3075       if (disp == 0) {
3076         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
3077         addr_reg = rscratch2;
3078       } else {
3079         __ lea(rscratch2, Address(base, disp));
3080         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
3081         addr_reg = rscratch2;
3082       }
3083     }
3084     Label retry_load, done;
3085     __ bind(retry_load);
3086     __ ldxr(rscratch1, addr_reg);
3087     __ cmp(rscratch1, old_reg);
3088     __ br(Assembler::NE, done);
3089     __ stlxr(rscratch1, new_reg, addr_reg);
3090     __ cbnzw(rscratch1, retry_load);
3091     __ bind(done);
3092   %}
3093 
3094   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3095     MacroAssembler _masm(&cbuf);
3096     Register old_reg = as_Register($oldval$$reg);
3097     Register new_reg = as_Register($newval$$reg);
3098     Register base = as_Register($mem$$base);
3099     Register addr_reg;
3100     int index = $mem$$index;
3101     int scale = $mem$$scale;
3102     int disp = $mem$$disp;
3103     if (index == -1) {
3104        if (disp != 0) {
3105         __ lea(rscratch2, Address(base, disp));
3106         addr_reg = rscratch2;
3107       } else {
3108         // TODO
3109         // should we ever get anything other than this case?
3110         addr_reg = base;
3111       }
3112     } else {
3113       Register index_reg = as_Register(index);
3114       if (disp == 0) {
3115         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
3116         addr_reg = rscratch2;
3117       } else {
3118         __ lea(rscratch2, Address(base, disp));
3119         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
3120         addr_reg = rscratch2;
3121       }
3122     }
3123     Label retry_load, done;
3124     __ bind(retry_load);
3125     __ ldxrw(rscratch1, addr_reg);
3126     __ cmpw(rscratch1, old_reg);
3127     __ br(Assembler::NE, done);
3128     __ stlxrw(rscratch1, new_reg, addr_reg);
3129     __ cbnzw(rscratch1, retry_load);
3130     __ bind(done);
3131   %}
3132 
3133   // auxiliary used for CompareAndSwapX to set result register
3134   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
3135     MacroAssembler _masm(&cbuf);
3136     Register res_reg = as_Register($res$$reg);
3137     __ cset(res_reg, Assembler::EQ);
3138   %}
3139 
3140   // prefetch encodings
3141 
3142   enc_class aarch64_enc_prefetchw(memory mem) %{
3143     MacroAssembler _masm(&cbuf);
3144     Register base = as_Register($mem$$base);
3145     int index = $mem$$index;
3146     int scale = $mem$$scale;
3147     int disp = $mem$$disp;
3148     if (index == -1) {
3149       __ prfm(Address(base, disp), PSTL1KEEP);
3150       __ nop();
3151     } else {
3152       Register index_reg = as_Register(index);
3153       if (disp == 0) {
3154         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
3155       } else {
3156         __ lea(rscratch1, Address(base, disp));
3157         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
3158       }
3159     }
3160   %}
3161 
3162   enc_class aarch64_enc_clear_array_reg_reg(iRegL_R11 cnt, iRegP_R10 base) %{
3163     MacroAssembler _masm(&cbuf);
3164     Register cnt_reg = as_Register($cnt$$reg);
3165     Register base_reg = as_Register($base$$reg);
3166     // base is word aligned
3167     // cnt is count of words
3168 
3169     Label loop;
3170     Label entry;
3171 
3172 //  Algorithm:
3173 //
3174 //    scratch1 = cnt & 7;
3175 //    cnt -= scratch1;
3176 //    p += scratch1;
3177 //    switch (scratch1) {
3178 //      do {
3179 //        cnt -= 8;
3180 //          p[-8] = 0;
3181 //        case 7:
3182 //          p[-7] = 0;
3183 //        case 6:
3184 //          p[-6] = 0;
3185 //          // ...
3186 //        case 1:
3187 //          p[-1] = 0;
3188 //        case 0:
3189 //          p += 8;
3190 //      } while (cnt);
3191 //    }
3192 
3193     const int unroll = 8; // Number of str(zr) instructions we'll unroll
3194 
3195     __ andr(rscratch1, cnt_reg, unroll - 1);  // tmp1 = cnt % unroll
3196     __ sub(cnt_reg, cnt_reg, rscratch1);      // cnt -= unroll
3197     // base_reg always points to the end of the region we're about to zero
3198     __ add(base_reg, base_reg, rscratch1, Assembler::LSL, exact_log2(wordSize));
3199     __ adr(rscratch2, entry);
3200     __ sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 2);
3201     __ br(rscratch2);
3202     __ bind(loop);
3203     __ sub(cnt_reg, cnt_reg, unroll);
3204     for (int i = -unroll; i < 0; i++)
3205       __ str(zr, Address(base_reg, i * wordSize));
3206     __ bind(entry);
3207     __ add(base_reg, base_reg, unroll * wordSize);
3208     __ cbnz(cnt_reg, loop);
3209   %}
3210 
3211   /// mov envcodings
3212 
3213   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
3214     MacroAssembler _masm(&cbuf);
3215     u_int32_t con = (u_int32_t)$src$$constant;
3216     Register dst_reg = as_Register($dst$$reg);
3217     if (con == 0) {
3218       __ movw(dst_reg, zr);
3219     } else {
3220       __ movw(dst_reg, con);
3221     }
3222   %}
3223 
3224   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
3225     MacroAssembler _masm(&cbuf);
3226     Register dst_reg = as_Register($dst$$reg);
3227     u_int64_t con = (u_int64_t)$src$$constant;
3228     if (con == 0) {
3229       __ mov(dst_reg, zr);
3230     } else {
3231       __ mov(dst_reg, con);
3232     }
3233   %}
3234 
3235   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
3236     MacroAssembler _masm(&cbuf);
3237     Register dst_reg = as_Register($dst$$reg);
3238     address con = (address)$src$$constant;
3239     if (con == NULL || con == (address)1) {
3240       ShouldNotReachHere();
3241     } else {
3242       relocInfo::relocType rtype = $src->constant_reloc();
3243       if (rtype == relocInfo::oop_type) {
3244         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
3245       } else if (rtype == relocInfo::metadata_type) {
3246         __ mov_metadata(dst_reg, (Metadata*)con);
3247       } else {
3248         assert(rtype == relocInfo::none, "unexpected reloc type");
3249         if (con < (address)(uintptr_t)os::vm_page_size()) {
3250           __ mov(dst_reg, con);
3251         } else {
3252           unsigned long offset;
3253           __ adrp(dst_reg, con, offset);
3254           __ add(dst_reg, dst_reg, offset);
3255         }
3256       }
3257     }
3258   %}
3259 
3260   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
3261     MacroAssembler _masm(&cbuf);
3262     Register dst_reg = as_Register($dst$$reg);
3263     __ mov(dst_reg, zr);
3264   %}
3265 
3266   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
3267     MacroAssembler _masm(&cbuf);
3268     Register dst_reg = as_Register($dst$$reg);
3269     __ mov(dst_reg, (u_int64_t)1);
3270   %}
3271 
3272   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
3273     MacroAssembler _masm(&cbuf);
3274     address page = (address)$src$$constant;
3275     Register dst_reg = as_Register($dst$$reg);
3276     unsigned long off;
3277     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
3278     assert(off == 0, "assumed offset == 0");
3279   %}
3280 
3281   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
3282     MacroAssembler _masm(&cbuf);
3283     address page = (address)$src$$constant;
3284     Register dst_reg = as_Register($dst$$reg);
3285     unsigned long off;
3286     __ adrp(dst_reg, ExternalAddress(page), off);
3287     assert(off == 0, "assumed offset == 0");
3288   %}
3289 
3290   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
3291     MacroAssembler _masm(&cbuf);
3292     Register dst_reg = as_Register($dst$$reg);
3293     address con = (address)$src$$constant;
3294     if (con == NULL) {
3295       ShouldNotReachHere();
3296     } else {
3297       relocInfo::relocType rtype = $src->constant_reloc();
3298       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
3299       __ set_narrow_oop(dst_reg, (jobject)con);
3300     }
3301   %}
3302 
3303   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
3304     MacroAssembler _masm(&cbuf);
3305     Register dst_reg = as_Register($dst$$reg);
3306     __ mov(dst_reg, zr);
3307   %}
3308 
3309   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
3310     MacroAssembler _masm(&cbuf);
3311     Register dst_reg = as_Register($dst$$reg);
3312     address con = (address)$src$$constant;
3313     if (con == NULL) {
3314       ShouldNotReachHere();
3315     } else {
3316       relocInfo::relocType rtype = $src->constant_reloc();
3317       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
3318       __ set_narrow_klass(dst_reg, (Klass *)con);
3319     }
3320   %}
3321 
3322   // arithmetic encodings
3323 
3324   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
3325     MacroAssembler _masm(&cbuf);
3326     Register dst_reg = as_Register($dst$$reg);
3327     Register src_reg = as_Register($src1$$reg);
3328     int32_t con = (int32_t)$src2$$constant;
3329     // add has primary == 0, subtract has primary == 1
3330     if ($primary) { con = -con; }
3331     if (con < 0) {
3332       __ subw(dst_reg, src_reg, -con);
3333     } else {
3334       __ addw(dst_reg, src_reg, con);
3335     }
3336   %}
3337 
3338   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
3339     MacroAssembler _masm(&cbuf);
3340     Register dst_reg = as_Register($dst$$reg);
3341     Register src_reg = as_Register($src1$$reg);
3342     int32_t con = (int32_t)$src2$$constant;
3343     // add has primary == 0, subtract has primary == 1
3344     if ($primary) { con = -con; }
3345     if (con < 0) {
3346       __ sub(dst_reg, src_reg, -con);
3347     } else {
3348       __ add(dst_reg, src_reg, con);
3349     }
3350   %}
3351 
3352   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
3353     MacroAssembler _masm(&cbuf);
3354    Register dst_reg = as_Register($dst$$reg);
3355    Register src1_reg = as_Register($src1$$reg);
3356    Register src2_reg = as_Register($src2$$reg);
3357     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
3358   %}
3359 
3360   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
3361     MacroAssembler _masm(&cbuf);
3362    Register dst_reg = as_Register($dst$$reg);
3363    Register src1_reg = as_Register($src1$$reg);
3364    Register src2_reg = as_Register($src2$$reg);
3365     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
3366   %}
3367 
3368   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
3369     MacroAssembler _masm(&cbuf);
3370    Register dst_reg = as_Register($dst$$reg);
3371    Register src1_reg = as_Register($src1$$reg);
3372    Register src2_reg = as_Register($src2$$reg);
3373     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
3374   %}
3375 
3376   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
3377     MacroAssembler _masm(&cbuf);
3378    Register dst_reg = as_Register($dst$$reg);
3379    Register src1_reg = as_Register($src1$$reg);
3380    Register src2_reg = as_Register($src2$$reg);
3381     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
3382   %}
3383 
3384   // compare instruction encodings
3385 
3386   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
3387     MacroAssembler _masm(&cbuf);
3388     Register reg1 = as_Register($src1$$reg);
3389     Register reg2 = as_Register($src2$$reg);
3390     __ cmpw(reg1, reg2);
3391   %}
3392 
3393   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
3394     MacroAssembler _masm(&cbuf);
3395     Register reg = as_Register($src1$$reg);
3396     int32_t val = $src2$$constant;
3397     if (val >= 0) {
3398       __ subsw(zr, reg, val);
3399     } else {
3400       __ addsw(zr, reg, -val);
3401     }
3402   %}
3403 
3404   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
3405     MacroAssembler _masm(&cbuf);
3406     Register reg1 = as_Register($src1$$reg);
3407     u_int32_t val = (u_int32_t)$src2$$constant;
3408     __ movw(rscratch1, val);
3409     __ cmpw(reg1, rscratch1);
3410   %}
3411 
3412   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
3413     MacroAssembler _masm(&cbuf);
3414     Register reg1 = as_Register($src1$$reg);
3415     Register reg2 = as_Register($src2$$reg);
3416     __ cmp(reg1, reg2);
3417   %}
3418 
3419   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
3420     MacroAssembler _masm(&cbuf);
3421     Register reg = as_Register($src1$$reg);
3422     int64_t val = $src2$$constant;
3423     if (val >= 0) {
3424       __ subs(zr, reg, val);
3425     } else if (val != -val) {
3426       __ adds(zr, reg, -val);
3427     } else {
3428     // aargh, Long.MIN_VALUE is a special case
3429       __ orr(rscratch1, zr, (u_int64_t)val);
3430       __ subs(zr, reg, rscratch1);
3431     }
3432   %}
3433 
3434   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
3435     MacroAssembler _masm(&cbuf);
3436     Register reg1 = as_Register($src1$$reg);
3437     u_int64_t val = (u_int64_t)$src2$$constant;
3438     __ mov(rscratch1, val);
3439     __ cmp(reg1, rscratch1);
3440   %}
3441 
3442   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
3443     MacroAssembler _masm(&cbuf);
3444     Register reg1 = as_Register($src1$$reg);
3445     Register reg2 = as_Register($src2$$reg);
3446     __ cmp(reg1, reg2);
3447   %}
3448 
3449   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
3450     MacroAssembler _masm(&cbuf);
3451     Register reg1 = as_Register($src1$$reg);
3452     Register reg2 = as_Register($src2$$reg);
3453     __ cmpw(reg1, reg2);
3454   %}
3455 
3456   enc_class aarch64_enc_testp(iRegP src) %{
3457     MacroAssembler _masm(&cbuf);
3458     Register reg = as_Register($src$$reg);
3459     __ cmp(reg, zr);
3460   %}
3461 
3462   enc_class aarch64_enc_testn(iRegN src) %{
3463     MacroAssembler _masm(&cbuf);
3464     Register reg = as_Register($src$$reg);
3465     __ cmpw(reg, zr);
3466   %}
3467 
3468   enc_class aarch64_enc_b(label lbl) %{
3469     MacroAssembler _masm(&cbuf);
3470     Label *L = $lbl$$label;
3471     __ b(*L);
3472   %}
3473 
3474   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
3475     MacroAssembler _masm(&cbuf);
3476     Label *L = $lbl$$label;
3477     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3478   %}
3479 
3480   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
3481     MacroAssembler _masm(&cbuf);
3482     Label *L = $lbl$$label;
3483     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3484   %}
3485 
3486   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
3487   %{
3488      Register sub_reg = as_Register($sub$$reg);
3489      Register super_reg = as_Register($super$$reg);
3490      Register temp_reg = as_Register($temp$$reg);
3491      Register result_reg = as_Register($result$$reg);
3492 
3493      Label miss;
3494      MacroAssembler _masm(&cbuf);
3495      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
3496                                      NULL, &miss,
3497                                      /*set_cond_codes:*/ true);
3498      if ($primary) {
3499        __ mov(result_reg, zr);
3500      }
3501      __ bind(miss);
3502   %}
3503 
3504   enc_class aarch64_enc_java_static_call(method meth) %{
3505     MacroAssembler _masm(&cbuf);
3506 
3507     address addr = (address)$meth$$method;
3508     if (!_method) {
3509       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
3510       __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
3511     } else if (_optimized_virtual) {
3512       __ trampoline_call(Address(addr, relocInfo::opt_virtual_call_type), &cbuf);
3513     } else {
3514       __ trampoline_call(Address(addr, relocInfo::static_call_type), &cbuf);
3515     }
3516 
3517     if (_method) {
3518       // Emit stub for static call
3519       CompiledStaticCall::emit_to_interp_stub(cbuf);
3520     }
3521   %}
3522 
3523   enc_class aarch64_enc_java_handle_call(method meth) %{
3524     MacroAssembler _masm(&cbuf);
3525     relocInfo::relocType reloc;
3526 
3527     // RFP is preserved across all calls, even compiled calls.
3528     // Use it to preserve SP.
3529     __ mov(rfp, sp);
3530 
3531     const int start_offset = __ offset();
3532     address addr = (address)$meth$$method;
3533     if (!_method) {
3534       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
3535       __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
3536     } else if (_optimized_virtual) {
3537       __ trampoline_call(Address(addr, relocInfo::opt_virtual_call_type), &cbuf);
3538     } else {
3539       __ trampoline_call(Address(addr, relocInfo::static_call_type), &cbuf);
3540     }
3541 
3542     if (_method) {
3543       // Emit stub for static call
3544       CompiledStaticCall::emit_to_interp_stub(cbuf);
3545     }
3546 
3547     // now restore sp
3548     __ mov(sp, rfp);
3549   %}
3550 
3551   enc_class aarch64_enc_java_dynamic_call(method meth) %{
3552     MacroAssembler _masm(&cbuf);
3553     __ ic_call((address)$meth$$method);
3554   %}
3555 
3556   enc_class aarch64_enc_call_epilog() %{
3557     MacroAssembler _masm(&cbuf);
3558     if (VerifyStackAtCalls) {
3559       // Check that stack depth is unchanged: find majik cookie on stack
3560       __ call_Unimplemented();
3561     }
3562   %}
3563 
3564   enc_class aarch64_enc_java_to_runtime(method meth) %{
3565     MacroAssembler _masm(&cbuf);
3566 
3567     // some calls to generated routines (arraycopy code) are scheduled
3568     // by C2 as runtime calls. if so we can call them using a br (they
3569     // will be in a reachable segment) otherwise we have to use a blrt
3570     // which loads the absolute address into a register.
3571     address entry = (address)$meth$$method;
3572     CodeBlob *cb = CodeCache::find_blob(entry);
3573     if (cb) {
3574       __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
3575     } else {
3576       int gpcnt;
3577       int fpcnt;
3578       int rtype;
3579       getCallInfo(tf(), gpcnt, fpcnt, rtype);
3580       Label retaddr;
3581       __ adr(rscratch2, retaddr);
3582       __ lea(rscratch1, RuntimeAddress(entry));
3583       // Leave a breadcrumb for JavaThread::pd_last_frame().
3584       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
3585       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
3586       __ bind(retaddr);
3587       __ add(sp, sp, 2 * wordSize);
3588     }
3589   %}
3590 
3591   enc_class aarch64_enc_rethrow() %{
3592     MacroAssembler _masm(&cbuf);
3593     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
3594   %}
3595 
3596   enc_class aarch64_enc_ret() %{
3597     MacroAssembler _masm(&cbuf);
3598     __ ret(lr);
3599   %}
3600 
3601   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
3602     MacroAssembler _masm(&cbuf);
3603     Register target_reg = as_Register($jump_target$$reg);
3604     __ br(target_reg);
3605   %}
3606 
3607   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
3608     MacroAssembler _masm(&cbuf);
3609     Register target_reg = as_Register($jump_target$$reg);
3610     // exception oop should be in r0
3611     // ret addr has been popped into lr
3612     // callee expects it in r3
3613     __ mov(r3, lr);
3614     __ br(target_reg);
3615   %}
3616 
3617   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3618     MacroAssembler _masm(&cbuf);
3619     Register oop = as_Register($object$$reg);
3620     Register box = as_Register($box$$reg);
3621     Register disp_hdr = as_Register($tmp$$reg);
3622     Register tmp = as_Register($tmp2$$reg);
3623     Label cont;
3624     Label object_has_monitor;
3625     Label cas_failed;
3626 
3627     assert_different_registers(oop, box, tmp, disp_hdr);
3628 
3629     // Load markOop from object into displaced_header.
3630     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
3631 
3632     // Always do locking in runtime.
3633     if (EmitSync & 0x01) {
3634       __ cmp(oop, zr);
3635       return;
3636     }
3637 
3638     if (UseBiasedLocking) {
3639       __ biased_locking_enter(disp_hdr, oop, box, tmp, true, cont);
3640     }
3641 
3642     // Handle existing monitor
3643     if (EmitSync & 0x02) {
3644       // we can use AArch64's bit test and branch here but
3645       // markoopDesc does not define a bit index just the bit value
3646       // so assert in case the bit pos changes
3647 #     define __monitor_value_log2 1
3648       assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
3649       __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
3650 #     undef __monitor_value_log2
3651     }
3652 
3653     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
3654     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
3655 
3656     // Load Compare Value application register.
3657 
3658     // Initialize the box. (Must happen before we update the object mark!)
3659     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3660 
3661     // Compare object markOop with mark and if equal exchange scratch1
3662     // with object markOop.
3663     // Note that this is simply a CAS: it does not generate any
3664     // barriers.  These are separately generated by
3665     // membar_acquire_lock().
3666     {
3667       Label retry_load;
3668       __ bind(retry_load);
3669       __ ldxr(tmp, oop);
3670       __ cmp(tmp, disp_hdr);
3671       __ br(Assembler::NE, cas_failed);
3672       // use stlxr to ensure update is immediately visible
3673       __ stlxr(tmp, box, oop);
3674       __ cbzw(tmp, cont);
3675       __ b(retry_load);
3676     }
3677 
3678     // Formerly:
3679     // __ cmpxchgptr(/*oldv=*/disp_hdr,
3680     //               /*newv=*/box,
3681     //               /*addr=*/oop,
3682     //               /*tmp=*/tmp,
3683     //               cont,
3684     //               /*fail*/NULL);
3685 
3686     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3687 
3688     // If the compare-and-exchange succeeded, then we found an unlocked
3689     // object, will have now locked it will continue at label cont
3690 
3691     __ bind(cas_failed);
3692     // We did not see an unlocked object so try the fast recursive case.
3693 
3694     // Check if the owner is self by comparing the value in the
3695     // markOop of object (disp_hdr) with the stack pointer.
3696     __ mov(rscratch1, sp);
3697     __ sub(disp_hdr, disp_hdr, rscratch1);
3698     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
3699     // If condition is true we are cont and hence we can store 0 as the
3700     // displaced header in the box, which indicates that it is a recursive lock.
3701     __ ands(tmp/*==0?*/, disp_hdr, tmp);
3702     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3703 
3704     // Handle existing monitor.
3705     if ((EmitSync & 0x02) == 0) {
3706       __ b(cont);
3707 
3708       __ bind(object_has_monitor);
3709       // The object's monitor m is unlocked iff m->owner == NULL,
3710       // otherwise m->owner may contain a thread or a stack address.
3711       //
3712       // Try to CAS m->owner from NULL to current thread.
3713       __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
3714       __ mov(disp_hdr, zr);
3715 
3716       {
3717         Label retry_load, fail;
3718         __ bind(retry_load);
3719         __ ldxr(rscratch1, tmp);
3720         __ cmp(disp_hdr, rscratch1);
3721         __ br(Assembler::NE, fail);
3722         // use stlxr to ensure update is immediately visible
3723         __ stlxr(rscratch1, rthread, tmp);
3724         __ cbnzw(rscratch1, retry_load);
3725         __ bind(fail);
3726       }
3727 
3728       // Label next;
3729       // __ cmpxchgptr(/*oldv=*/disp_hdr,
3730       //               /*newv=*/rthread,
3731       //               /*addr=*/tmp,
3732       //               /*tmp=*/rscratch1,
3733       //               /*succeed*/next,
3734       //               /*fail*/NULL);
3735       // __ bind(next);
3736 
3737       // store a non-null value into the box.
3738       __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3739 
3740       // PPC port checks the following invariants
3741       // #ifdef ASSERT
3742       // bne(flag, cont);
3743       // We have acquired the monitor, check some invariants.
3744       // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
3745       // Invariant 1: _recursions should be 0.
3746       // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
3747       // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
3748       //                        "monitor->_recursions should be 0", -1);
3749       // Invariant 2: OwnerIsThread shouldn't be 0.
3750       // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
3751       //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
3752       //                           "monitor->OwnerIsThread shouldn't be 0", -1);
3753       // #endif
3754     }
3755 
3756     __ bind(cont);
3757     // flag == EQ indicates success
3758     // flag == NE indicates failure
3759 
3760   %}
3761 
3762   // TODO
3763   // reimplement this with custom cmpxchgptr code
3764   // which avoids some of the unnecessary branching
3765   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3766     MacroAssembler _masm(&cbuf);
3767     Register oop = as_Register($object$$reg);
3768     Register box = as_Register($box$$reg);
3769     Register disp_hdr = as_Register($tmp$$reg);
3770     Register tmp = as_Register($tmp2$$reg);
3771     Label cont;
3772     Label object_has_monitor;
3773     Label cas_failed;
3774 
3775     assert_different_registers(oop, box, tmp, disp_hdr);
3776 
3777     // Always do locking in runtime.
3778     if (EmitSync & 0x01) {
3779       __ cmp(oop, zr); // Oop can't be 0 here => always false.
3780       return;
3781     }
3782 
3783     if (UseBiasedLocking) {
3784       __ biased_locking_exit(oop, tmp, cont);
3785     }
3786 
3787     // Find the lock address and load the displaced header from the stack.
3788     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3789 
3790     // If the displaced header is 0, we have a recursive unlock.
3791     __ cmp(disp_hdr, zr);
3792     __ br(Assembler::EQ, cont);
3793 
3794 
3795     // Handle existing monitor.
3796     if ((EmitSync & 0x02) == 0) {
3797       __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
3798       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
3799     }
3800 
3801     // Check if it is still a light weight lock, this is is true if we
3802     // see the stack address of the basicLock in the markOop of the
3803     // object.
3804 
3805       {
3806         Label retry_load;
3807         __ bind(retry_load);
3808         __ ldxr(tmp, oop);
3809         __ cmp(box, tmp);
3810         __ br(Assembler::NE, cas_failed);
3811         // use stlxr to ensure update is immediately visible
3812         __ stlxr(tmp, disp_hdr, oop);
3813         __ cbzw(tmp, cont);
3814         __ b(retry_load);
3815       }
3816 
3817     // __ cmpxchgptr(/*compare_value=*/box,
3818     //               /*exchange_value=*/disp_hdr,
3819     //               /*where=*/oop,
3820     //               /*result=*/tmp,
3821     //               cont,
3822     //               /*cas_failed*/NULL);
3823     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3824 
3825     __ bind(cas_failed);
3826 
3827     // Handle existing monitor.
3828     if ((EmitSync & 0x02) == 0) {
3829       __ b(cont);
3830 
3831       __ bind(object_has_monitor);
3832       __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
3833       __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3834       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
3835       __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
3836       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
3837       __ cmp(rscratch1, zr);
3838       __ br(Assembler::NE, cont);
3839 
3840       __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
3841       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
3842       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
3843       __ cmp(rscratch1, zr);
3844       __ cbnz(rscratch1, cont);
3845       // need a release store here
3846       __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3847       __ stlr(rscratch1, tmp); // rscratch1 is zero
3848     }
3849 
3850     __ bind(cont);
3851     // flag == EQ indicates success
3852     // flag == NE indicates failure
3853   %}
3854 
3855 %}
3856 
3857 //----------FRAME--------------------------------------------------------------
3858 // Definition of frame structure and management information.
3859 //
3860 //  S T A C K   L A Y O U T    Allocators stack-slot number
3861 //                             |   (to get allocators register number
3862 //  G  Owned by    |        |  v    add OptoReg::stack0())
3863 //  r   CALLER     |        |
3864 //  o     |        +--------+      pad to even-align allocators stack-slot
3865 //  w     V        |  pad0  |        numbers; owned by CALLER
3866 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
3867 //  h     ^        |   in   |  5
3868 //        |        |  args  |  4   Holes in incoming args owned by SELF
3869 //  |     |        |        |  3
3870 //  |     |        +--------+
3871 //  V     |        | old out|      Empty on Intel, window on Sparc
3872 //        |    old |preserve|      Must be even aligned.
3873 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
3874 //        |        |   in   |  3   area for Intel ret address
3875 //     Owned by    |preserve|      Empty on Sparc.
3876 //       SELF      +--------+
3877 //        |        |  pad2  |  2   pad to align old SP
3878 //        |        +--------+  1
3879 //        |        | locks  |  0
3880 //        |        +--------+----> OptoReg::stack0(), even aligned
3881 //        |        |  pad1  | 11   pad to align new SP
3882 //        |        +--------+
3883 //        |        |        | 10
3884 //        |        | spills |  9   spills
3885 //        V        |        |  8   (pad0 slot for callee)
3886 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
3887 //        ^        |  out   |  7
3888 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
3889 //     Owned by    +--------+
3890 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
3891 //        |    new |preserve|      Must be even-aligned.
3892 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
3893 //        |        |        |
3894 //
3895 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
3896 //         known from SELF's arguments and the Java calling convention.
3897 //         Region 6-7 is determined per call site.
3898 // Note 2: If the calling convention leaves holes in the incoming argument
3899 //         area, those holes are owned by SELF.  Holes in the outgoing area
3900 //         are owned by the CALLEE.  Holes should not be nessecary in the
3901 //         incoming area, as the Java calling convention is completely under
3902 //         the control of the AD file.  Doubles can be sorted and packed to
3903 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
3904 //         varargs C calling conventions.
3905 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
3906 //         even aligned with pad0 as needed.
3907 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
3908 //           (the latter is true on Intel but is it false on AArch64?)
3909 //         region 6-11 is even aligned; it may be padded out more so that
3910 //         the region from SP to FP meets the minimum stack alignment.
3911 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3912 //         alignment.  Region 11, pad1, may be dynamically extended so that
3913 //         SP meets the minimum alignment.
3914 
3915 frame %{
3916   // What direction does stack grow in (assumed to be same for C & Java)
3917   stack_direction(TOWARDS_LOW);
3918 
3919   // These three registers define part of the calling convention
3920   // between compiled code and the interpreter.
3921 
3922   // Inline Cache Register or methodOop for I2C.
3923   inline_cache_reg(R12);
3924 
3925   // Method Oop Register when calling interpreter.
3926   interpreter_method_oop_reg(R12);
3927 
3928   // Number of stack slots consumed by locking an object
3929   sync_stack_slots(2);
3930 
3931   // Compiled code's Frame Pointer
3932   frame_pointer(R31);
3933 
3934   // Interpreter stores its frame pointer in a register which is
3935   // stored to the stack by I2CAdaptors.
3936   // I2CAdaptors convert from interpreted java to compiled java.
3937   interpreter_frame_pointer(R29);
3938 
3939   // Stack alignment requirement
3940   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
3941 
3942   // Number of stack slots between incoming argument block and the start of
3943   // a new frame.  The PROLOG must add this many slots to the stack.  The
3944   // EPILOG must remove this many slots. aarch64 needs two slots for
3945   // return address and fp.
3946   // TODO think this is correct but check
3947   in_preserve_stack_slots(4);
3948 
3949   // Number of outgoing stack slots killed above the out_preserve_stack_slots
3950   // for calls to C.  Supports the var-args backing area for register parms.
3951   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
3952 
3953   // The after-PROLOG location of the return address.  Location of
3954   // return address specifies a type (REG or STACK) and a number
3955   // representing the register number (i.e. - use a register name) or
3956   // stack slot.
3957   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3958   // Otherwise, it is above the locks and verification slot and alignment word
3959   // TODO this may well be correct but need to check why that - 2 is there
3960   // ppc port uses 0 but we definitely need to allow for fixed_slots
3961   // which folds in the space used for monitors
3962   return_addr(STACK - 2 +
3963               round_to((Compile::current()->in_preserve_stack_slots() +
3964                         Compile::current()->fixed_slots()),
3965                        stack_alignment_in_slots()));
3966 
3967   // Body of function which returns an integer array locating
3968   // arguments either in registers or in stack slots.  Passed an array
3969   // of ideal registers called "sig" and a "length" count.  Stack-slot
3970   // offsets are based on outgoing arguments, i.e. a CALLER setting up
3971   // arguments for a CALLEE.  Incoming stack arguments are
3972   // automatically biased by the preserve_stack_slots field above.
3973 
3974   calling_convention
3975   %{
3976     // No difference between ingoing/outgoing just pass false
3977     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3978   %}
3979 
3980   c_calling_convention
3981   %{
3982     // This is obviously always outgoing
3983     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
3984   %}
3985 
3986   // Location of compiled Java return values.  Same as C for now.
3987   return_value
3988   %{
3989     // TODO do we allow ideal_reg == Op_RegN???
3990     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
3991            "only return normal values");
3992 
3993     static const int lo[Op_RegL + 1] = { // enum name
3994       0,                                 // Op_Node
3995       0,                                 // Op_Set
3996       R0_num,                            // Op_RegN
3997       R0_num,                            // Op_RegI
3998       R0_num,                            // Op_RegP
3999       V0_num,                            // Op_RegF
4000       V0_num,                            // Op_RegD
4001       R0_num                             // Op_RegL
4002     };
4003 
4004     static const int hi[Op_RegL + 1] = { // enum name
4005       0,                                 // Op_Node
4006       0,                                 // Op_Set
4007       OptoReg::Bad,                       // Op_RegN
4008       OptoReg::Bad,                      // Op_RegI
4009       R0_H_num,                          // Op_RegP
4010       OptoReg::Bad,                      // Op_RegF
4011       V0_H_num,                          // Op_RegD
4012       R0_H_num                           // Op_RegL
4013     };
4014 
4015     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
4016   %}
4017 %}
4018 
4019 //----------ATTRIBUTES---------------------------------------------------------
4020 //----------Operand Attributes-------------------------------------------------
4021 op_attrib op_cost(1);        // Required cost attribute
4022 
4023 //----------Instruction Attributes---------------------------------------------
4024 ins_attrib ins_cost(INSN_COST); // Required cost attribute
4025 ins_attrib ins_size(32);        // Required size attribute (in bits)
4026 ins_attrib ins_short_branch(0); // Required flag: is this instruction
4027                                 // a non-matching short branch variant
4028                                 // of some long branch?
4029 ins_attrib ins_alignment(4);    // Required alignment attribute (must
4030                                 // be a power of 2) specifies the
4031                                 // alignment that some part of the
4032                                 // instruction (not necessarily the
4033                                 // start) requires.  If > 1, a
4034                                 // compute_padding() function must be
4035                                 // provided for the instruction
4036 
4037 //----------OPERANDS-----------------------------------------------------------
4038 // Operand definitions must precede instruction definitions for correct parsing
4039 // in the ADLC because operands constitute user defined types which are used in
4040 // instruction definitions.
4041 
4042 //----------Simple Operands----------------------------------------------------
4043 
4044 // Integer operands 32 bit
4045 // 32 bit immediate
4046 operand immI()
4047 %{
4048   match(ConI);
4049 
4050   op_cost(0);
4051   format %{ %}
4052   interface(CONST_INTER);
4053 %}
4054 
4055 // 32 bit zero
4056 operand immI0()
4057 %{
4058   predicate(n->get_int() == 0);
4059   match(ConI);
4060 
4061   op_cost(0);
4062   format %{ %}
4063   interface(CONST_INTER);
4064 %}
4065 
4066 // 32 bit unit increment
4067 operand immI_1()
4068 %{
4069   predicate(n->get_int() == 1);
4070   match(ConI);
4071 
4072   op_cost(0);
4073   format %{ %}
4074   interface(CONST_INTER);
4075 %}
4076 
4077 // 32 bit unit decrement
4078 operand immI_M1()
4079 %{
4080   predicate(n->get_int() == -1);
4081   match(ConI);
4082 
4083   op_cost(0);
4084   format %{ %}
4085   interface(CONST_INTER);
4086 %}
4087 
4088 operand immI_le_4()
4089 %{
4090   predicate(n->get_int() <= 4);
4091   match(ConI);
4092 
4093   op_cost(0);
4094   format %{ %}
4095   interface(CONST_INTER);
4096 %}
4097 
4098 operand immI_31()
4099 %{
4100   predicate(n->get_int() == 31);
4101   match(ConI);
4102 
4103   op_cost(0);
4104   format %{ %}
4105   interface(CONST_INTER);
4106 %}
4107 
4108 operand immI_8()
4109 %{
4110   predicate(n->get_int() == 8);
4111   match(ConI);
4112 
4113   op_cost(0);
4114   format %{ %}
4115   interface(CONST_INTER);
4116 %}
4117 
4118 operand immI_16()
4119 %{
4120   predicate(n->get_int() == 16);
4121   match(ConI);
4122 
4123   op_cost(0);
4124   format %{ %}
4125   interface(CONST_INTER);
4126 %}
4127 
4128 operand immI_24()
4129 %{
4130   predicate(n->get_int() == 24);
4131   match(ConI);
4132 
4133   op_cost(0);
4134   format %{ %}
4135   interface(CONST_INTER);
4136 %}
4137 
4138 operand immI_32()
4139 %{
4140   predicate(n->get_int() == 32);
4141   match(ConI);
4142 
4143   op_cost(0);
4144   format %{ %}
4145   interface(CONST_INTER);
4146 %}
4147 
4148 operand immI_48()
4149 %{
4150   predicate(n->get_int() == 48);
4151   match(ConI);
4152 
4153   op_cost(0);
4154   format %{ %}
4155   interface(CONST_INTER);
4156 %}
4157 
4158 operand immI_56()
4159 %{
4160   predicate(n->get_int() == 56);
4161   match(ConI);
4162 
4163   op_cost(0);
4164   format %{ %}
4165   interface(CONST_INTER);
4166 %}
4167 
4168 operand immI_64()
4169 %{
4170   predicate(n->get_int() == 64);
4171   match(ConI);
4172 
4173   op_cost(0);
4174   format %{ %}
4175   interface(CONST_INTER);
4176 %}
4177 
4178 operand immI_255()
4179 %{
4180   predicate(n->get_int() == 255);
4181   match(ConI);
4182 
4183   op_cost(0);
4184   format %{ %}
4185   interface(CONST_INTER);
4186 %}
4187 
4188 operand immI_65535()
4189 %{
4190   predicate(n->get_int() == 65535);
4191   match(ConI);
4192 
4193   op_cost(0);
4194   format %{ %}
4195   interface(CONST_INTER);
4196 %}
4197 
4198 operand immL_63()
4199 %{
4200   predicate(n->get_int() == 63);
4201   match(ConI);
4202 
4203   op_cost(0);
4204   format %{ %}
4205   interface(CONST_INTER);
4206 %}
4207 
4208 operand immL_255()
4209 %{
4210   predicate(n->get_int() == 255);
4211   match(ConI);
4212 
4213   op_cost(0);
4214   format %{ %}
4215   interface(CONST_INTER);
4216 %}
4217 
4218 operand immL_65535()
4219 %{
4220   predicate(n->get_long() == 65535L);
4221   match(ConL);
4222 
4223   op_cost(0);
4224   format %{ %}
4225   interface(CONST_INTER);
4226 %}
4227 
4228 operand immL_4294967295()
4229 %{
4230   predicate(n->get_long() == 4294967295L);
4231   match(ConL);
4232 
4233   op_cost(0);
4234   format %{ %}
4235   interface(CONST_INTER);
4236 %}
4237 
4238 operand immL_bitmask()
4239 %{
4240   predicate(((n->get_long() & 0xc000000000000000l) == 0)
4241             && is_power_of_2(n->get_long() + 1));
4242   match(ConL);
4243 
4244   op_cost(0);
4245   format %{ %}
4246   interface(CONST_INTER);
4247 %}
4248 
4249 operand immI_bitmask()
4250 %{
4251   predicate(((n->get_int() & 0xc0000000) == 0)
4252             && is_power_of_2(n->get_int() + 1));
4253   match(ConI);
4254 
4255   op_cost(0);
4256   format %{ %}
4257   interface(CONST_INTER);
4258 %}
4259 
4260 // Scale values for scaled offset addressing modes (up to long but not quad)
4261 operand immIScale()
4262 %{
4263   predicate(0 <= n->get_int() && (n->get_int() <= 3));
4264   match(ConI);
4265 
4266   op_cost(0);
4267   format %{ %}
4268   interface(CONST_INTER);
4269 %}
4270 
4271 // 26 bit signed offset -- for pc-relative branches
4272 operand immI26()
4273 %{
4274   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
4275   match(ConI);
4276 
4277   op_cost(0);
4278   format %{ %}
4279   interface(CONST_INTER);
4280 %}
4281 
4282 // 19 bit signed offset -- for pc-relative loads
4283 operand immI19()
4284 %{
4285   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
4286   match(ConI);
4287 
4288   op_cost(0);
4289   format %{ %}
4290   interface(CONST_INTER);
4291 %}
4292 
4293 // 12 bit unsigned offset -- for base plus immediate loads
4294 operand immIU12()
4295 %{
4296   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
4297   match(ConI);
4298 
4299   op_cost(0);
4300   format %{ %}
4301   interface(CONST_INTER);
4302 %}
4303 
4304 operand immLU12()
4305 %{
4306   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
4307   match(ConL);
4308 
4309   op_cost(0);
4310   format %{ %}
4311   interface(CONST_INTER);
4312 %}
4313 
4314 // Offset for scaled or unscaled immediate loads and stores
4315 operand immIOffset()
4316 %{
4317   predicate(Address::offset_ok_for_immed(n->get_int()));
4318   match(ConI);
4319 
4320   op_cost(0);
4321   format %{ %}
4322   interface(CONST_INTER);
4323 %}
4324 
4325 operand immLoffset()
4326 %{
4327   predicate(Address::offset_ok_for_immed(n->get_long()));
4328   match(ConL);
4329 
4330   op_cost(0);
4331   format %{ %}
4332   interface(CONST_INTER);
4333 %}
4334 
4335 // 32 bit integer valid for add sub immediate
4336 operand immIAddSub()
4337 %{
4338   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
4339   match(ConI);
4340   op_cost(0);
4341   format %{ %}
4342   interface(CONST_INTER);
4343 %}
4344 
4345 // 32 bit unsigned integer valid for logical immediate
4346 // TODO -- check this is right when e.g the mask is 0x80000000
4347 operand immILog()
4348 %{
4349   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
4350   match(ConI);
4351 
4352   op_cost(0);
4353   format %{ %}
4354   interface(CONST_INTER);
4355 %}
4356 
4357 // Integer operands 64 bit
4358 // 64 bit immediate
4359 operand immL()
4360 %{
4361   match(ConL);
4362 
4363   op_cost(0);
4364   format %{ %}
4365   interface(CONST_INTER);
4366 %}
4367 
4368 // 64 bit zero
4369 operand immL0()
4370 %{
4371   predicate(n->get_long() == 0);
4372   match(ConL);
4373 
4374   op_cost(0);
4375   format %{ %}
4376   interface(CONST_INTER);
4377 %}
4378 
4379 // 64 bit unit increment
4380 operand immL_1()
4381 %{
4382   predicate(n->get_long() == 1);
4383   match(ConL);
4384 
4385   op_cost(0);
4386   format %{ %}
4387   interface(CONST_INTER);
4388 %}
4389 
4390 // 64 bit unit decrement
4391 operand immL_M1()
4392 %{
4393   predicate(n->get_long() == -1);
4394   match(ConL);
4395 
4396   op_cost(0);
4397   format %{ %}
4398   interface(CONST_INTER);
4399 %}
4400 
4401 // 32 bit offset of pc in thread anchor
4402 
4403 operand immL_pc_off()
4404 %{
4405   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
4406                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
4407   match(ConL);
4408 
4409   op_cost(0);
4410   format %{ %}
4411   interface(CONST_INTER);
4412 %}
4413 
4414 // 64 bit integer valid for add sub immediate
4415 operand immLAddSub()
4416 %{
4417   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
4418   match(ConL);
4419   op_cost(0);
4420   format %{ %}
4421   interface(CONST_INTER);
4422 %}
4423 
4424 // 64 bit integer valid for logical immediate
4425 operand immLLog()
4426 %{
4427   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
4428   match(ConL);
4429   op_cost(0);
4430   format %{ %}
4431   interface(CONST_INTER);
4432 %}
4433 
4434 // Long Immediate: low 32-bit mask
4435 operand immL_32bits()
4436 %{
4437   predicate(n->get_long() == 0xFFFFFFFFL);
4438   match(ConL);
4439   op_cost(0);
4440   format %{ %}
4441   interface(CONST_INTER);
4442 %}
4443 
4444 // Pointer operands
4445 // Pointer Immediate
4446 operand immP()
4447 %{
4448   match(ConP);
4449 
4450   op_cost(0);
4451   format %{ %}
4452   interface(CONST_INTER);
4453 %}
4454 
4455 // NULL Pointer Immediate
4456 operand immP0()
4457 %{
4458   predicate(n->get_ptr() == 0);
4459   match(ConP);
4460 
4461   op_cost(0);
4462   format %{ %}
4463   interface(CONST_INTER);
4464 %}
4465 
4466 // Pointer Immediate One
4467 // this is used in object initialization (initial object header)
4468 operand immP_1()
4469 %{
4470   predicate(n->get_ptr() == 1);
4471   match(ConP);
4472 
4473   op_cost(0);
4474   format %{ %}
4475   interface(CONST_INTER);
4476 %}
4477 
4478 // Polling Page Pointer Immediate
4479 operand immPollPage()
4480 %{
4481   predicate((address)n->get_ptr() == os::get_polling_page());
4482   match(ConP);
4483 
4484   op_cost(0);
4485   format %{ %}
4486   interface(CONST_INTER);
4487 %}
4488 
4489 // Card Table Byte Map Base
4490 operand immByteMapBase()
4491 %{
4492   // Get base of card map
4493   predicate((jbyte*)n->get_ptr() ==
4494         ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
4495   match(ConP);
4496 
4497   op_cost(0);
4498   format %{ %}
4499   interface(CONST_INTER);
4500 %}
4501 
4502 // Pointer Immediate Minus One
4503 // this is used when we want to write the current PC to the thread anchor
4504 operand immP_M1()
4505 %{
4506   predicate(n->get_ptr() == -1);
4507   match(ConP);
4508 
4509   op_cost(0);
4510   format %{ %}
4511   interface(CONST_INTER);
4512 %}
4513 
4514 // Pointer Immediate Minus Two
4515 // this is used when we want to write the current PC to the thread anchor
4516 operand immP_M2()
4517 %{
4518   predicate(n->get_ptr() == -2);
4519   match(ConP);
4520 
4521   op_cost(0);
4522   format %{ %}
4523   interface(CONST_INTER);
4524 %}
4525 
4526 // Float and Double operands
4527 // Double Immediate
4528 operand immD()
4529 %{
4530   match(ConD);
4531   op_cost(0);
4532   format %{ %}
4533   interface(CONST_INTER);
4534 %}
4535 
4536 // Double Immediate: +0.0d
4537 operand immD0()
4538 %{
4539   predicate(jlong_cast(n->getd()) == 0);
4540   match(ConD);
4541 
4542   op_cost(0);
4543   format %{ %}
4544   interface(CONST_INTER);
4545 %}
4546 
4547 // constant 'double +0.0'.
4548 operand immDPacked()
4549 %{
4550   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
4551   match(ConD);
4552   op_cost(0);
4553   format %{ %}
4554   interface(CONST_INTER);
4555 %}
4556 
4557 // Float Immediate
4558 operand immF()
4559 %{
4560   match(ConF);
4561   op_cost(0);
4562   format %{ %}
4563   interface(CONST_INTER);
4564 %}
4565 
4566 // Float Immediate: +0.0f.
4567 operand immF0()
4568 %{
4569   predicate(jint_cast(n->getf()) == 0);
4570   match(ConF);
4571 
4572   op_cost(0);
4573   format %{ %}
4574   interface(CONST_INTER);
4575 %}
4576 
4577 //
4578 operand immFPacked()
4579 %{
4580   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
4581   match(ConF);
4582   op_cost(0);
4583   format %{ %}
4584   interface(CONST_INTER);
4585 %}
4586 
4587 // Narrow pointer operands
4588 // Narrow Pointer Immediate
4589 operand immN()
4590 %{
4591   match(ConN);
4592 
4593   op_cost(0);
4594   format %{ %}
4595   interface(CONST_INTER);
4596 %}
4597 
4598 // Narrow NULL Pointer Immediate
4599 operand immN0()
4600 %{
4601   predicate(n->get_narrowcon() == 0);
4602   match(ConN);
4603 
4604   op_cost(0);
4605   format %{ %}
4606   interface(CONST_INTER);
4607 %}
4608 
4609 operand immNKlass()
4610 %{
4611   match(ConNKlass);
4612 
4613   op_cost(0);
4614   format %{ %}
4615   interface(CONST_INTER);
4616 %}
4617 
4618 // Integer 32 bit Register Operands
4619 // Integer 32 bitRegister (excludes SP)
4620 operand iRegI()
4621 %{
4622   constraint(ALLOC_IN_RC(any_reg32));
4623   match(RegI);
4624   match(iRegINoSp);
4625   op_cost(0);
4626   format %{ %}
4627   interface(REG_INTER);
4628 %}
4629 
4630 // Integer 32 bit Register not Special
4631 operand iRegINoSp()
4632 %{
4633   constraint(ALLOC_IN_RC(no_special_reg32));
4634   match(RegI);
4635   op_cost(0);
4636   format %{ %}
4637   interface(REG_INTER);
4638 %}
4639 
4640 // Integer 64 bit Register Operands
4641 // Integer 64 bit Register (includes SP)
4642 operand iRegL()
4643 %{
4644   constraint(ALLOC_IN_RC(any_reg));
4645   match(RegL);
4646   match(iRegLNoSp);
4647   op_cost(0);
4648   format %{ %}
4649   interface(REG_INTER);
4650 %}
4651 
4652 // Integer 64 bit Register not Special
4653 operand iRegLNoSp()
4654 %{
4655   constraint(ALLOC_IN_RC(no_special_reg));
4656   match(RegL);
4657   format %{ %}
4658   interface(REG_INTER);
4659 %}
4660 
4661 // Pointer Register Operands
4662 // Pointer Register
4663 operand iRegP()
4664 %{
4665   constraint(ALLOC_IN_RC(ptr_reg));
4666   match(RegP);
4667   match(iRegPNoSp);
4668   match(iRegP_R0);
4669   //match(iRegP_R2);
4670   //match(iRegP_R4);
4671   //match(iRegP_R5);
4672   match(thread_RegP);
4673   op_cost(0);
4674   format %{ %}
4675   interface(REG_INTER);
4676 %}
4677 
4678 // Pointer 64 bit Register not Special
4679 operand iRegPNoSp()
4680 %{
4681   constraint(ALLOC_IN_RC(no_special_ptr_reg));
4682   match(RegP);
4683   // match(iRegP);
4684   // match(iRegP_R0);
4685   // match(iRegP_R2);
4686   // match(iRegP_R4);
4687   // match(iRegP_R5);
4688   // match(thread_RegP);
4689   op_cost(0);
4690   format %{ %}
4691   interface(REG_INTER);
4692 %}
4693 
4694 // Pointer 64 bit Register R0 only
4695 operand iRegP_R0()
4696 %{
4697   constraint(ALLOC_IN_RC(r0_reg));
4698   match(RegP);
4699   // match(iRegP);
4700   match(iRegPNoSp);
4701   op_cost(0);
4702   format %{ %}
4703   interface(REG_INTER);
4704 %}
4705 
4706 // Pointer 64 bit Register R1 only
4707 operand iRegP_R1()
4708 %{
4709   constraint(ALLOC_IN_RC(r1_reg));
4710   match(RegP);
4711   // match(iRegP);
4712   match(iRegPNoSp);
4713   op_cost(0);
4714   format %{ %}
4715   interface(REG_INTER);
4716 %}
4717 
4718 // Pointer 64 bit Register R2 only
4719 operand iRegP_R2()
4720 %{
4721   constraint(ALLOC_IN_RC(r2_reg));
4722   match(RegP);
4723   // match(iRegP);
4724   match(iRegPNoSp);
4725   op_cost(0);
4726   format %{ %}
4727   interface(REG_INTER);
4728 %}
4729 
4730 // Pointer 64 bit Register R3 only
4731 operand iRegP_R3()
4732 %{
4733   constraint(ALLOC_IN_RC(r3_reg));
4734   match(RegP);
4735   // match(iRegP);
4736   match(iRegPNoSp);
4737   op_cost(0);
4738   format %{ %}
4739   interface(REG_INTER);
4740 %}
4741 
4742 // Pointer 64 bit Register R4 only
4743 operand iRegP_R4()
4744 %{
4745   constraint(ALLOC_IN_RC(r4_reg));
4746   match(RegP);
4747   // match(iRegP);
4748   match(iRegPNoSp);
4749   op_cost(0);
4750   format %{ %}
4751   interface(REG_INTER);
4752 %}
4753 
4754 // Pointer 64 bit Register R5 only
4755 operand iRegP_R5()
4756 %{
4757   constraint(ALLOC_IN_RC(r5_reg));
4758   match(RegP);
4759   // match(iRegP);
4760   match(iRegPNoSp);
4761   op_cost(0);
4762   format %{ %}
4763   interface(REG_INTER);
4764 %}
4765 
4766 // Pointer 64 bit Register R10 only
4767 operand iRegP_R10()
4768 %{
4769   constraint(ALLOC_IN_RC(r10_reg));
4770   match(RegP);
4771   // match(iRegP);
4772   match(iRegPNoSp);
4773   op_cost(0);
4774   format %{ %}
4775   interface(REG_INTER);
4776 %}
4777 
4778 // Long 64 bit Register R11 only
4779 operand iRegL_R11()
4780 %{
4781   constraint(ALLOC_IN_RC(r11_reg));
4782   match(RegL);
4783   match(iRegLNoSp);
4784   op_cost(0);
4785   format %{ %}
4786   interface(REG_INTER);
4787 %}
4788 
4789 // Pointer 64 bit Register FP only
4790 operand iRegP_FP()
4791 %{
4792   constraint(ALLOC_IN_RC(fp_reg));
4793   match(RegP);
4794   // match(iRegP);
4795   op_cost(0);
4796   format %{ %}
4797   interface(REG_INTER);
4798 %}
4799 
4800 // Register R0 only
4801 operand iRegI_R0()
4802 %{
4803   constraint(ALLOC_IN_RC(int_r0_reg));
4804   match(RegI);
4805   match(iRegINoSp);
4806   op_cost(0);
4807   format %{ %}
4808   interface(REG_INTER);
4809 %}
4810 
4811 // Register R2 only
4812 operand iRegI_R2()
4813 %{
4814   constraint(ALLOC_IN_RC(int_r2_reg));
4815   match(RegI);
4816   match(iRegINoSp);
4817   op_cost(0);
4818   format %{ %}
4819   interface(REG_INTER);
4820 %}
4821 
4822 // Register R3 only
4823 operand iRegI_R3()
4824 %{
4825   constraint(ALLOC_IN_RC(int_r3_reg));
4826   match(RegI);
4827   match(iRegINoSp);
4828   op_cost(0);
4829   format %{ %}
4830   interface(REG_INTER);
4831 %}
4832 
4833 
4834 // Register R2 only
4835 operand iRegI_R4()
4836 %{
4837   constraint(ALLOC_IN_RC(int_r4_reg));
4838   match(RegI);
4839   match(iRegINoSp);
4840   op_cost(0);
4841   format %{ %}
4842   interface(REG_INTER);
4843 %}
4844 
4845 
4846 // Pointer Register Operands
4847 // Narrow Pointer Register
4848 operand iRegN()
4849 %{
4850   constraint(ALLOC_IN_RC(any_reg32));
4851   match(RegN);
4852   match(iRegNNoSp);
4853   op_cost(0);
4854   format %{ %}
4855   interface(REG_INTER);
4856 %}
4857 
4858 // Integer 64 bit Register not Special
4859 operand iRegNNoSp()
4860 %{
4861   constraint(ALLOC_IN_RC(no_special_reg32));
4862   match(RegN);
4863   op_cost(0);
4864   format %{ %}
4865   interface(REG_INTER);
4866 %}
4867 
4868 // heap base register -- used for encoding immN0
4869 
4870 operand iRegIHeapbase()
4871 %{
4872   constraint(ALLOC_IN_RC(heapbase_reg));
4873   match(RegI);
4874   op_cost(0);
4875   format %{ %}
4876   interface(REG_INTER);
4877 %}
4878 
4879 // Float Register
4880 // Float register operands
4881 operand vRegF()
4882 %{
4883   constraint(ALLOC_IN_RC(float_reg));
4884   match(RegF);
4885 
4886   op_cost(0);
4887   format %{ %}
4888   interface(REG_INTER);
4889 %}
4890 
4891 // Double Register
4892 // Double register operands
4893 operand vRegD()
4894 %{
4895   constraint(ALLOC_IN_RC(double_reg));
4896   match(RegD);
4897 
4898   op_cost(0);
4899   format %{ %}
4900   interface(REG_INTER);
4901 %}
4902 
4903 operand vRegD_V0()
4904 %{
4905   constraint(ALLOC_IN_RC(v0_reg));
4906   match(RegD);
4907   op_cost(0);
4908   format %{ %}
4909   interface(REG_INTER);
4910 %}
4911 
4912 operand vRegD_V1()
4913 %{
4914   constraint(ALLOC_IN_RC(v1_reg));
4915   match(RegD);
4916   op_cost(0);
4917   format %{ %}
4918   interface(REG_INTER);
4919 %}
4920 
4921 operand vRegD_V2()
4922 %{
4923   constraint(ALLOC_IN_RC(v2_reg));
4924   match(RegD);
4925   op_cost(0);
4926   format %{ %}
4927   interface(REG_INTER);
4928 %}
4929 
4930 operand vRegD_V3()
4931 %{
4932   constraint(ALLOC_IN_RC(v3_reg));
4933   match(RegD);
4934   op_cost(0);
4935   format %{ %}
4936   interface(REG_INTER);
4937 %}
4938 
4939 // Flags register, used as output of signed compare instructions
4940 
4941 // note that on AArch64 we also use this register as the output for
4942 // for floating point compare instructions (CmpF CmpD). this ensures
4943 // that ordered inequality tests use GT, GE, LT or LE none of which
4944 // pass through cases where the result is unordered i.e. one or both
4945 // inputs to the compare is a NaN. this means that the ideal code can
4946 // replace e.g. a GT with an LE and not end up capturing the NaN case
4947 // (where the comparison should always fail). EQ and NE tests are
4948 // always generated in ideal code so that unordered folds into the NE
4949 // case, matching the behaviour of AArch64 NE.
4950 //
4951 // This differs from x86 where the outputs of FP compares use a
4952 // special FP flags registers and where compares based on this
4953 // register are distinguished into ordered inequalities (cmpOpUCF) and
4954 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
4955 // to explicitly handle the unordered case in branches. x86 also has
4956 // to include extra CMoveX rules to accept a cmpOpUCF input.
4957 
4958 operand rFlagsReg()
4959 %{
4960   constraint(ALLOC_IN_RC(int_flags));
4961   match(RegFlags);
4962 
4963   op_cost(0);
4964   format %{ "RFLAGS" %}
4965   interface(REG_INTER);
4966 %}
4967 
4968 // Flags register, used as output of unsigned compare instructions
4969 operand rFlagsRegU()
4970 %{
4971   constraint(ALLOC_IN_RC(int_flags));
4972   match(RegFlags);
4973 
4974   op_cost(0);
4975   format %{ "RFLAGSU" %}
4976   interface(REG_INTER);
4977 %}
4978 
4979 // Special Registers
4980 
4981 // Method Register
4982 operand inline_cache_RegP(iRegP reg)
4983 %{
4984   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
4985   match(reg);
4986   match(iRegPNoSp);
4987   op_cost(0);
4988   format %{ %}
4989   interface(REG_INTER);
4990 %}
4991 
4992 operand interpreter_method_oop_RegP(iRegP reg)
4993 %{
4994   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
4995   match(reg);
4996   match(iRegPNoSp);
4997   op_cost(0);
4998   format %{ %}
4999   interface(REG_INTER);
5000 %}
5001 
5002 // Thread Register
5003 operand thread_RegP(iRegP reg)
5004 %{
5005   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
5006   match(reg);
5007   op_cost(0);
5008   format %{ %}
5009   interface(REG_INTER);
5010 %}
5011 
5012 operand lr_RegP(iRegP reg)
5013 %{
5014   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
5015   match(reg);
5016   op_cost(0);
5017   format %{ %}
5018   interface(REG_INTER);
5019 %}
5020 
5021 //----------Memory Operands----------------------------------------------------
5022 
5023 operand indirect(iRegP reg)
5024 %{
5025   constraint(ALLOC_IN_RC(ptr_reg));
5026   match(reg);
5027   op_cost(0);
5028   format %{ "[$reg]" %}
5029   interface(MEMORY_INTER) %{
5030     base($reg);
5031     index(0xffffffff);
5032     scale(0x0);
5033     disp(0x0);
5034   %}
5035 %}
5036 
5037 operand indIndexScaledOffsetI(iRegP reg, iRegL lreg, immIScale scale, immIU12 off)
5038 %{
5039   constraint(ALLOC_IN_RC(ptr_reg));
5040   match(AddP (AddP reg (LShiftL lreg scale)) off);
5041   op_cost(INSN_COST);
5042   format %{ "$reg, $lreg lsl($scale), $off" %}
5043   interface(MEMORY_INTER) %{
5044     base($reg);
5045     index($lreg);
5046     scale($scale);
5047     disp($off);
5048   %}
5049 %}
5050 
5051 operand indIndexScaledOffsetL(iRegP reg, iRegL lreg, immIScale scale, immLU12 off)
5052 %{
5053   constraint(ALLOC_IN_RC(ptr_reg));
5054   match(AddP (AddP reg (LShiftL lreg scale)) off);
5055   op_cost(INSN_COST);
5056   format %{ "$reg, $lreg lsl($scale), $off" %}
5057   interface(MEMORY_INTER) %{
5058     base($reg);
5059     index($lreg);
5060     scale($scale);
5061     disp($off);
5062   %}
5063 %}
5064 
5065 operand indIndexOffsetI2L(iRegP reg, iRegI ireg, immLU12 off)
5066 %{
5067   constraint(ALLOC_IN_RC(ptr_reg));
5068   match(AddP (AddP reg (ConvI2L ireg)) off);
5069   op_cost(INSN_COST);
5070   format %{ "$reg, $ireg, $off I2L" %}
5071   interface(MEMORY_INTER) %{
5072     base($reg);
5073     index($ireg);
5074     scale(0x0);
5075     disp($off);
5076   %}
5077 %}
5078 
5079 operand indIndexScaledOffsetI2L(iRegP reg, iRegI ireg, immIScale scale, immLU12 off)
5080 %{
5081   constraint(ALLOC_IN_RC(ptr_reg));
5082   match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5083   op_cost(INSN_COST);
5084   format %{ "$reg, $ireg sxtw($scale), $off I2L" %}
5085   interface(MEMORY_INTER) %{
5086     base($reg);
5087     index($ireg);
5088     scale($scale);
5089     disp($off);
5090   %}
5091 %}
5092 
5093 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
5094 %{
5095   constraint(ALLOC_IN_RC(ptr_reg));
5096   match(AddP reg (LShiftL (ConvI2L ireg) scale));
5097   op_cost(0);
5098   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
5099   interface(MEMORY_INTER) %{
5100     base($reg);
5101     index($ireg);
5102     scale($scale);
5103     disp(0x0);
5104   %}
5105 %}
5106 
5107 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
5108 %{
5109   constraint(ALLOC_IN_RC(ptr_reg));
5110   match(AddP reg (LShiftL lreg scale));
5111   op_cost(0);
5112   format %{ "$reg, $lreg lsl($scale)" %}
5113   interface(MEMORY_INTER) %{
5114     base($reg);
5115     index($lreg);
5116     scale($scale);
5117     disp(0x0);
5118   %}
5119 %}
5120 
5121 operand indIndex(iRegP reg, iRegL lreg)
5122 %{
5123   constraint(ALLOC_IN_RC(ptr_reg));
5124   match(AddP reg lreg);
5125   op_cost(0);
5126   format %{ "$reg, $lreg" %}
5127   interface(MEMORY_INTER) %{
5128     base($reg);
5129     index($lreg);
5130     scale(0x0);
5131     disp(0x0);
5132   %}
5133 %}
5134 
5135 operand indOffI(iRegP reg, immIOffset off)
5136 %{
5137   constraint(ALLOC_IN_RC(ptr_reg));
5138   match(AddP reg off);
5139   op_cost(0);
5140   format %{ "[$reg, $off]" %}
5141   interface(MEMORY_INTER) %{
5142     base($reg);
5143     index(0xffffffff);
5144     scale(0x0);
5145     disp($off);
5146   %}
5147 %}
5148 
5149 operand indOffL(iRegP reg, immLoffset off)
5150 %{
5151   constraint(ALLOC_IN_RC(ptr_reg));
5152   match(AddP reg off);
5153   op_cost(0);
5154   format %{ "[$reg, $off]" %}
5155   interface(MEMORY_INTER) %{
5156     base($reg);
5157     index(0xffffffff);
5158     scale(0x0);
5159     disp($off);
5160   %}
5161 %}
5162 
5163 
5164 operand indirectN(iRegN reg)
5165 %{
5166   predicate(Universe::narrow_oop_shift() == 0);
5167   constraint(ALLOC_IN_RC(ptr_reg));
5168   match(DecodeN reg);
5169   op_cost(0);
5170   format %{ "[$reg]\t# narrow" %}
5171   interface(MEMORY_INTER) %{
5172     base($reg);
5173     index(0xffffffff);
5174     scale(0x0);
5175     disp(0x0);
5176   %}
5177 %}
5178 
5179 operand indIndexScaledOffsetIN(iRegN reg, iRegL lreg, immIScale scale, immIU12 off)
5180 %{
5181   predicate(Universe::narrow_oop_shift() == 0);
5182   constraint(ALLOC_IN_RC(ptr_reg));
5183   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5184   op_cost(0);
5185   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
5186   interface(MEMORY_INTER) %{
5187     base($reg);
5188     index($lreg);
5189     scale($scale);
5190     disp($off);
5191   %}
5192 %}
5193 
5194 operand indIndexScaledOffsetLN(iRegN reg, iRegL lreg, immIScale scale, immLU12 off)
5195 %{
5196   predicate(Universe::narrow_oop_shift() == 0);
5197   constraint(ALLOC_IN_RC(ptr_reg));
5198   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5199   op_cost(INSN_COST);
5200   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
5201   interface(MEMORY_INTER) %{
5202     base($reg);
5203     index($lreg);
5204     scale($scale);
5205     disp($off);
5206   %}
5207 %}
5208 
5209 operand indIndexOffsetI2LN(iRegN reg, iRegI ireg, immLU12 off)
5210 %{
5211   predicate(Universe::narrow_oop_shift() == 0);
5212   constraint(ALLOC_IN_RC(ptr_reg));
5213   match(AddP (AddP (DecodeN reg) (ConvI2L ireg)) off);
5214   op_cost(INSN_COST);
5215   format %{ "$reg, $ireg, $off I2L\t# narrow" %}
5216   interface(MEMORY_INTER) %{
5217     base($reg);
5218     index($ireg);
5219     scale(0x0);
5220     disp($off);
5221   %}
5222 %}
5223 
5224 operand indIndexScaledOffsetI2LN(iRegN reg, iRegI ireg, immIScale scale, immLU12 off)
5225 %{
5226   predicate(Universe::narrow_oop_shift() == 0);
5227   constraint(ALLOC_IN_RC(ptr_reg));
5228   match(AddP (AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale)) off);
5229   op_cost(INSN_COST);
5230   format %{ "$reg, $ireg sxtw($scale), $off I2L\t# narrow" %}
5231   interface(MEMORY_INTER) %{
5232     base($reg);
5233     index($ireg);
5234     scale($scale);
5235     disp($off);
5236   %}
5237 %}
5238 
5239 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
5240 %{
5241   predicate(Universe::narrow_oop_shift() == 0);
5242   constraint(ALLOC_IN_RC(ptr_reg));
5243   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
5244   op_cost(0);
5245   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
5246   interface(MEMORY_INTER) %{
5247     base($reg);
5248     index($ireg);
5249     scale($scale);
5250     disp(0x0);
5251   %}
5252 %}
5253 
5254 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
5255 %{
5256   predicate(Universe::narrow_oop_shift() == 0);
5257   constraint(ALLOC_IN_RC(ptr_reg));
5258   match(AddP (DecodeN reg) (LShiftL lreg scale));
5259   op_cost(0);
5260   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
5261   interface(MEMORY_INTER) %{
5262     base($reg);
5263     index($lreg);
5264     scale($scale);
5265     disp(0x0);
5266   %}
5267 %}
5268 
5269 operand indIndexN(iRegN reg, iRegL lreg)
5270 %{
5271   predicate(Universe::narrow_oop_shift() == 0);
5272   constraint(ALLOC_IN_RC(ptr_reg));
5273   match(AddP (DecodeN reg) lreg);
5274   op_cost(0);
5275   format %{ "$reg, $lreg\t# narrow" %}
5276   interface(MEMORY_INTER) %{
5277     base($reg);
5278     index($lreg);
5279     scale(0x0);
5280     disp(0x0);
5281   %}
5282 %}
5283 
5284 operand indOffIN(iRegN reg, immIOffset off)
5285 %{
5286   predicate(Universe::narrow_oop_shift() == 0);
5287   constraint(ALLOC_IN_RC(ptr_reg));
5288   match(AddP (DecodeN reg) off);
5289   op_cost(0);
5290   format %{ "[$reg, $off]\t# narrow" %}
5291   interface(MEMORY_INTER) %{
5292     base($reg);
5293     index(0xffffffff);
5294     scale(0x0);
5295     disp($off);
5296   %}
5297 %}
5298 
5299 operand indOffLN(iRegN reg, immLoffset off)
5300 %{
5301   predicate(Universe::narrow_oop_shift() == 0);
5302   constraint(ALLOC_IN_RC(ptr_reg));
5303   match(AddP (DecodeN reg) off);
5304   op_cost(0);
5305   format %{ "[$reg, $off]\t# narrow" %}
5306   interface(MEMORY_INTER) %{
5307     base($reg);
5308     index(0xffffffff);
5309     scale(0x0);
5310     disp($off);
5311   %}
5312 %}
5313 
5314 
5315 
5316 // AArch64 opto stubs need to write to the pc slot in the thread anchor
5317 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
5318 %{
5319   constraint(ALLOC_IN_RC(ptr_reg));
5320   match(AddP reg off);
5321   op_cost(0);
5322   format %{ "[$reg, $off]" %}
5323   interface(MEMORY_INTER) %{
5324     base($reg);
5325     index(0xffffffff);
5326     scale(0x0);
5327     disp($off);
5328   %}
5329 %}
5330 
5331 //----------Special Memory Operands--------------------------------------------
5332 // Stack Slot Operand - This operand is used for loading and storing temporary
5333 //                      values on the stack where a match requires a value to
5334 //                      flow through memory.
5335 operand stackSlotP(sRegP reg)
5336 %{
5337   constraint(ALLOC_IN_RC(stack_slots));
5338   op_cost(100);
5339   // No match rule because this operand is only generated in matching
5340   // match(RegP);
5341   format %{ "[$reg]" %}
5342   interface(MEMORY_INTER) %{
5343     base(0x1e);  // RSP
5344     index(0x0);  // No Index
5345     scale(0x0);  // No Scale
5346     disp($reg);  // Stack Offset
5347   %}
5348 %}
5349 
5350 operand stackSlotI(sRegI reg)
5351 %{
5352   constraint(ALLOC_IN_RC(stack_slots));
5353   // No match rule because this operand is only generated in matching
5354   // match(RegI);
5355   format %{ "[$reg]" %}
5356   interface(MEMORY_INTER) %{
5357     base(0x1e);  // RSP
5358     index(0x0);  // No Index
5359     scale(0x0);  // No Scale
5360     disp($reg);  // Stack Offset
5361   %}
5362 %}
5363 
5364 operand stackSlotF(sRegF reg)
5365 %{
5366   constraint(ALLOC_IN_RC(stack_slots));
5367   // No match rule because this operand is only generated in matching
5368   // match(RegF);
5369   format %{ "[$reg]" %}
5370   interface(MEMORY_INTER) %{
5371     base(0x1e);  // RSP
5372     index(0x0);  // No Index
5373     scale(0x0);  // No Scale
5374     disp($reg);  // Stack Offset
5375   %}
5376 %}
5377 
5378 operand stackSlotD(sRegD reg)
5379 %{
5380   constraint(ALLOC_IN_RC(stack_slots));
5381   // No match rule because this operand is only generated in matching
5382   // match(RegD);
5383   format %{ "[$reg]" %}
5384   interface(MEMORY_INTER) %{
5385     base(0x1e);  // RSP
5386     index(0x0);  // No Index
5387     scale(0x0);  // No Scale
5388     disp($reg);  // Stack Offset
5389   %}
5390 %}
5391 
5392 operand stackSlotL(sRegL reg)
5393 %{
5394   constraint(ALLOC_IN_RC(stack_slots));
5395   // No match rule because this operand is only generated in matching
5396   // match(RegL);
5397   format %{ "[$reg]" %}
5398   interface(MEMORY_INTER) %{
5399     base(0x1e);  // RSP
5400     index(0x0);  // No Index
5401     scale(0x0);  // No Scale
5402     disp($reg);  // Stack Offset
5403   %}
5404 %}
5405 
5406 // Operands for expressing Control Flow
5407 // NOTE: Label is a predefined operand which should not be redefined in
5408 //       the AD file. It is generically handled within the ADLC.
5409 
5410 //----------Conditional Branch Operands----------------------------------------
5411 // Comparison Op  - This is the operation of the comparison, and is limited to
5412 //                  the following set of codes:
5413 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5414 //
5415 // Other attributes of the comparison, such as unsignedness, are specified
5416 // by the comparison instruction that sets a condition code flags register.
5417 // That result is represented by a flags operand whose subtype is appropriate
5418 // to the unsignedness (etc.) of the comparison.
5419 //
5420 // Later, the instruction which matches both the Comparison Op (a Bool) and
5421 // the flags (produced by the Cmp) specifies the coding of the comparison op
5422 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5423 
5424 // used for signed integral comparisons and fp comparisons
5425 
5426 operand cmpOp()
5427 %{
5428   match(Bool);
5429 
5430   format %{ "" %}
5431   interface(COND_INTER) %{
5432     equal(0x0, "eq");
5433     not_equal(0x1, "ne");
5434     less(0xb, "lt");
5435     greater_equal(0xa, "ge");
5436     less_equal(0xd, "le");
5437     greater(0xc, "gt");
5438     overflow(0x6, "vs");
5439     no_overflow(0x7, "vc");
5440   %}
5441 %}
5442 
5443 // used for unsigned integral comparisons
5444 
5445 operand cmpOpU()
5446 %{
5447   match(Bool);
5448 
5449   format %{ "" %}
5450   interface(COND_INTER) %{
5451     equal(0x0, "eq");
5452     not_equal(0x1, "ne");
5453     less(0x3, "lo");
5454     greater_equal(0x2, "hs");
5455     less_equal(0x9, "ls");
5456     greater(0x8, "hi");
5457     overflow(0x6, "vs");
5458     no_overflow(0x7, "vc");
5459   %}
5460 %}
5461 
5462 // Special operand allowing long args to int ops to be truncated for free
5463 
5464 operand iRegL2I(iRegL reg) %{
5465 
5466   op_cost(0);
5467 
5468   match(ConvL2I reg);
5469 
5470   format %{ "l2i($reg)" %}
5471 
5472   interface(REG_INTER)
5473 %}
5474 
5475 
5476 //----------OPERAND CLASSES----------------------------------------------------
5477 // Operand Classes are groups of operands that are used as to simplify
5478 // instruction definitions by not requiring the AD writer to specify
5479 // separate instructions for every form of operand when the
5480 // instruction accepts multiple operand types with the same basic
5481 // encoding and format. The classic case of this is memory operands.
5482 
5483 // memory is used to define read/write location for load/store
5484 // instruction defs. we can turn a memory op into an Address
5485 
5486 opclass memory(indirect, indIndexScaledOffsetI, indIndexScaledOffsetL, indIndexOffsetI2L, indIndexScaledOffsetI2L, indIndexScaled, indIndexScaledI2L, indIndex, indOffI, indOffL,
5487                indirectN, indIndexScaledOffsetIN, indIndexScaledOffsetLN, indIndexOffsetI2LN, indIndexScaledOffsetI2LN, indIndexScaledN, indIndexScaledI2LN, indIndexN, indOffIN, indOffLN);
5488 
5489 
5490 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
5491 // operations. it allows the src to be either an iRegI or a (ConvL2I
5492 // iRegL). in the latter case the l2i normally planted for a ConvL2I
5493 // can be elided because the 32-bit instruction will just employ the
5494 // lower 32 bits anyway.
5495 //
5496 // n.b. this does not elide all L2I conversions. if the truncated
5497 // value is consumed by more than one operation then the ConvL2I
5498 // cannot be bundled into the consuming nodes so an l2i gets planted
5499 // (actually a movw $dst $src) and the downstream instructions consume
5500 // the result of the l2i as an iRegI input. That's a shame since the
5501 // movw is actually redundant but its not too costly.
5502 
5503 opclass iRegIorL2I(iRegI, iRegL2I);
5504 
5505 //----------PIPELINE-----------------------------------------------------------
5506 // Rules which define the behavior of the target architectures pipeline.
5507 // Integer ALU reg operation
5508 pipeline %{
5509 
5510 attributes %{
5511   // ARM instructions are of fixed length
5512   fixed_size_instructions;        // Fixed size instructions TODO does
5513   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
5514   // ARM instructions come in 32-bit word units
5515   instruction_unit_size = 4;         // An instruction is 4 bytes long
5516   instruction_fetch_unit_size = 64;  // The processor fetches one line
5517   instruction_fetch_units = 1;       // of 64 bytes
5518 
5519   // List of nop instructions
5520   nops( MachNop );
5521 %}
5522 
5523 // We don't use an actual pipeline model so don't care about resources
5524 // or description. we do use pipeline classes to introduce fixed
5525 // latencies
5526 
5527 //----------RESOURCES----------------------------------------------------------
5528 // Resources are the functional units available to the machine
5529 
5530 resources( INS0, INS1, INS01 = INS0 | INS1,
5531            ALU0, ALU1, ALU = ALU0 | ALU1,
5532            MAC,
5533            DIV,
5534            BRANCH,
5535            LDST,
5536            NEON_FP);
5537 
5538 //----------PIPELINE DESCRIPTION-----------------------------------------------
5539 // Pipeline Description specifies the stages in the machine's pipeline
5540 
5541 pipe_desc(ISS, EX1, EX2, WR);
5542 
5543 //----------PIPELINE CLASSES---------------------------------------------------
5544 // Pipeline Classes describe the stages in which input and output are
5545 // referenced by the hardware pipeline.
5546 
5547 //------- Integer ALU operations --------------------------
5548 
5549 // Integer ALU reg-reg operation
5550 // Operands needed in EX1, result generated in EX2
5551 // Eg.  ADD     x0, x1, x2
5552 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
5553 %{
5554   single_instruction;
5555   dst    : EX2(write);
5556   src1   : EX1(read);
5557   src2   : EX1(read);
5558   INS01  : ISS; // Dual issue as instruction 0 or 1
5559   ALU    : EX2;
5560 %}
5561 
5562 // Integer ALU reg-reg operation with constant shift
5563 // Shifted register must be available in LATE_ISS instead of EX1
5564 // Eg.  ADD     x0, x1, x2, LSL #2
5565 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
5566 %{
5567   single_instruction;
5568   dst    : EX2(write);
5569   src1   : EX1(read);
5570   src2   : ISS(read);
5571   INS01  : ISS;
5572   ALU    : EX2;
5573 %}
5574 
5575 // Integer ALU reg operation with constant shift
5576 // Eg.  LSL     x0, x1, #shift
5577 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
5578 %{
5579   single_instruction;
5580   dst    : EX2(write);
5581   src1   : ISS(read);
5582   INS01  : ISS;
5583   ALU    : EX2;
5584 %}
5585 
5586 // Integer ALU reg-reg operation with variable shift
5587 // Both operands must be available in LATE_ISS instead of EX1
5588 // Result is available in EX1 instead of EX2
5589 // Eg.  LSLV    x0, x1, x2
5590 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
5591 %{
5592   single_instruction;
5593   dst    : EX1(write);
5594   src1   : ISS(read);
5595   src2   : ISS(read);
5596   INS01  : ISS;
5597   ALU    : EX1;
5598 %}
5599 
5600 // Integer ALU reg-reg operation with extract
5601 // As for _vshift above, but result generated in EX2
5602 // Eg.  EXTR    x0, x1, x2, #N
5603 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
5604 %{
5605   single_instruction;
5606   dst    : EX2(write);
5607   src1   : ISS(read);
5608   src2   : ISS(read);
5609   INS1   : ISS; // Can only dual issue as Instruction 1
5610   ALU    : EX1;
5611 %}
5612 
5613 // Integer ALU reg operation
5614 // Eg.  NEG     x0, x1
5615 pipe_class ialu_reg(iRegI dst, iRegI src)
5616 %{
5617   single_instruction;
5618   dst    : EX2(write);
5619   src    : EX1(read);
5620   INS01  : ISS;
5621   ALU    : EX2;
5622 %}
5623 
5624 // Integer ALU reg mmediate operation
5625 // Eg.  ADD     x0, x1, #N
5626 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
5627 %{
5628   single_instruction;
5629   dst    : EX2(write);
5630   src1   : EX1(read);
5631   INS01  : ISS;
5632   ALU    : EX2;
5633 %}
5634 
5635 // Integer ALU immediate operation (no source operands)
5636 // Eg.  MOV     x0, #N
5637 pipe_class ialu_imm(iRegI dst)
5638 %{
5639   single_instruction;
5640   dst    : EX1(write);
5641   INS01  : ISS;
5642   ALU    : EX1;
5643 %}
5644 
5645 //------- Compare operation -------------------------------
5646 
5647 // Compare reg-reg
5648 // Eg.  CMP     x0, x1
5649 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
5650 %{
5651   single_instruction;
5652 //  fixed_latency(16);
5653   cr     : EX2(write);
5654   op1    : EX1(read);
5655   op2    : EX1(read);
5656   INS01  : ISS;
5657   ALU    : EX2;
5658 %}
5659 
5660 // Compare reg-reg
5661 // Eg.  CMP     x0, #N
5662 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
5663 %{
5664   single_instruction;
5665 //  fixed_latency(16);
5666   cr     : EX2(write);
5667   op1    : EX1(read);
5668   INS01  : ISS;
5669   ALU    : EX2;
5670 %}
5671 
5672 //------- Conditional instructions ------------------------
5673 
5674 // Conditional no operands
5675 // Eg.  CSINC   x0, zr, zr, <cond>
5676 pipe_class icond_none(iRegI dst, rFlagsReg cr)
5677 %{
5678   single_instruction;
5679   cr     : EX1(read);
5680   dst    : EX2(write);
5681   INS01  : ISS;
5682   ALU    : EX2;
5683 %}
5684 
5685 // Conditional 2 operand
5686 // EG.  CSEL    X0, X1, X2, <cond>
5687 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
5688 %{
5689   single_instruction;
5690   cr     : EX1(read);
5691   src1   : EX1(read);
5692   src2   : EX1(read);
5693   dst    : EX2(write);
5694   INS01  : ISS;
5695   ALU    : EX2;
5696 %}
5697 
5698 // Conditional 2 operand
5699 // EG.  CSEL    X0, X1, X2, <cond>
5700 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
5701 %{
5702   single_instruction;
5703   cr     : EX1(read);
5704   src    : EX1(read);
5705   dst    : EX2(write);
5706   INS01  : ISS;
5707   ALU    : EX2;
5708 %}
5709 
5710 //------- Multiply pipeline operations --------------------
5711 
5712 // Multiply reg-reg
5713 // Eg.  MUL     w0, w1, w2
5714 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
5715 %{
5716   single_instruction;
5717   dst    : WR(write);
5718   src1   : ISS(read);
5719   src2   : ISS(read);
5720   INS01  : ISS;
5721   MAC    : WR;
5722 %}
5723 
5724 // Multiply accumulate
5725 // Eg.  MADD    w0, w1, w2, w3
5726 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
5727 %{
5728   single_instruction;
5729   dst    : WR(write);
5730   src1   : ISS(read);
5731   src2   : ISS(read);
5732   src3   : ISS(read);
5733   INS01  : ISS;
5734   MAC    : WR;
5735 %}
5736 
5737 // Eg.  MUL     w0, w1, w2
5738 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
5739 %{
5740   single_instruction;
5741   fixed_latency(3); // Maximum latency for 64 bit mul
5742   dst    : WR(write);
5743   src1   : ISS(read);
5744   src2   : ISS(read);
5745   INS01  : ISS;
5746   MAC    : WR;
5747 %}
5748 
5749 // Multiply accumulate
5750 // Eg.  MADD    w0, w1, w2, w3
5751 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
5752 %{
5753   single_instruction;
5754   fixed_latency(3); // Maximum latency for 64 bit mul
5755   dst    : WR(write);
5756   src1   : ISS(read);
5757   src2   : ISS(read);
5758   src3   : ISS(read);
5759   INS01  : ISS;
5760   MAC    : WR;
5761 %}
5762 
5763 //------- Divide pipeline operations --------------------
5764 
5765 // Eg.  SDIV    w0, w1, w2
5766 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
5767 %{
5768   single_instruction;
5769   fixed_latency(8); // Maximum latency for 32 bit divide
5770   dst    : WR(write);
5771   src1   : ISS(read);
5772   src2   : ISS(read);
5773   INS0   : ISS; // Can only dual issue as instruction 0
5774   DIV    : WR;
5775 %}
5776 
5777 // Eg.  SDIV    x0, x1, x2
5778 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
5779 %{
5780   single_instruction;
5781   fixed_latency(16); // Maximum latency for 64 bit divide
5782   dst    : WR(write);
5783   src1   : ISS(read);
5784   src2   : ISS(read);
5785   INS0   : ISS; // Can only dual issue as instruction 0
5786   DIV    : WR;
5787 %}
5788 
5789 //------- Load pipeline operations ------------------------
5790 
5791 // Load - prefetch
5792 // Eg.  PFRM    <mem>
5793 pipe_class iload_prefetch(memory mem)
5794 %{
5795   single_instruction;
5796   mem    : ISS(read);
5797   INS01  : ISS;
5798   LDST   : WR;
5799 %}
5800 
5801 // Load - reg, mem
5802 // Eg.  LDR     x0, <mem>
5803 pipe_class iload_reg_mem(iRegI dst, memory mem)
5804 %{
5805   single_instruction;
5806   dst    : WR(write);
5807   mem    : ISS(read);
5808   INS01  : ISS;
5809   LDST   : WR;
5810 %}
5811 
5812 // Load - reg, reg
5813 // Eg.  LDR     x0, [sp, x1]
5814 pipe_class iload_reg_reg(iRegI dst, iRegI src)
5815 %{
5816   single_instruction;
5817   dst    : WR(write);
5818   src    : ISS(read);
5819   INS01  : ISS;
5820   LDST   : WR;
5821 %}
5822 
5823 //------- Store pipeline operations -----------------------
5824 
5825 // Store - zr, mem
5826 // Eg.  STR     zr, <mem>
5827 pipe_class istore_mem(memory mem)
5828 %{
5829   single_instruction;
5830   mem    : ISS(read);
5831   INS01  : ISS;
5832   LDST   : WR;
5833 %}
5834 
5835 // Store - reg, mem
5836 // Eg.  STR     x0, <mem>
5837 pipe_class istore_reg_mem(iRegI src, memory mem)
5838 %{
5839   single_instruction;
5840   mem    : ISS(read);
5841   src    : EX2(read);
5842   INS01  : ISS;
5843   LDST   : WR;
5844 %}
5845 
5846 // Store - reg, reg
5847 // Eg. STR      x0, [sp, x1]
5848 pipe_class istore_reg_reg(iRegI dst, iRegI src)
5849 %{
5850   single_instruction;
5851   dst    : ISS(read);
5852   src    : EX2(read);
5853   INS01  : ISS;
5854   LDST   : WR;
5855 %}
5856 
5857 //------- Store pipeline operations -----------------------
5858 
5859 // Branch
5860 pipe_class pipe_branch()
5861 %{
5862   single_instruction;
5863   INS01  : ISS;
5864   BRANCH : EX1;
5865 %}
5866 
5867 // Conditional branch
5868 pipe_class pipe_branch_cond(rFlagsReg cr)
5869 %{
5870   single_instruction;
5871   cr     : EX1(read);
5872   INS01  : ISS;
5873   BRANCH : EX1;
5874 %}
5875 
5876 // Compare & Branch
5877 // EG.  CBZ/CBNZ
5878 pipe_class pipe_cmp_branch(iRegI op1)
5879 %{
5880   single_instruction;
5881   op1    : EX1(read);
5882   INS01  : ISS;
5883   BRANCH : EX1;
5884 %}
5885 
5886 //------- Synchronisation operations ----------------------
5887 
5888 // Any operation requiring serialization.
5889 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
5890 pipe_class pipe_serial()
5891 %{
5892   single_instruction;
5893   force_serialization;
5894   fixed_latency(16);
5895   INS01  : ISS(2); // Cannot dual issue with any other instruction
5896   LDST   : WR;
5897 %}
5898 
5899 // Generic big/slow expanded idiom - also serialized
5900 pipe_class pipe_slow()
5901 %{
5902   instruction_count(10);
5903   multiple_bundles;
5904   force_serialization;
5905   fixed_latency(16);
5906   INS01  : ISS(2); // Cannot dual issue with any other instruction
5907   LDST   : WR;
5908 %}
5909 
5910 // Empty pipeline class
5911 pipe_class pipe_class_empty()
5912 %{
5913   single_instruction;
5914   fixed_latency(0);
5915 %}
5916 
5917 // Default pipeline class.
5918 pipe_class pipe_class_default()
5919 %{
5920   single_instruction;
5921   fixed_latency(2);
5922 %}
5923 
5924 // Pipeline class for compares.
5925 pipe_class pipe_class_compare()
5926 %{
5927   single_instruction;
5928   fixed_latency(16);
5929 %}
5930 
5931 // Pipeline class for memory operations.
5932 pipe_class pipe_class_memory()
5933 %{
5934   single_instruction;
5935   fixed_latency(16);
5936 %}
5937 
5938 // Pipeline class for call.
5939 pipe_class pipe_class_call()
5940 %{
5941   single_instruction;
5942   fixed_latency(100);
5943 %}
5944 
5945 // Define the class for the Nop node.
5946 define %{
5947    MachNop = pipe_class_empty;
5948 %}
5949 
5950 %}
5951 //----------INSTRUCTIONS-------------------------------------------------------
5952 //
5953 // match      -- States which machine-independent subtree may be replaced
5954 //               by this instruction.
5955 // ins_cost   -- The estimated cost of this instruction is used by instruction
5956 //               selection to identify a minimum cost tree of machine
5957 //               instructions that matches a tree of machine-independent
5958 //               instructions.
5959 // format     -- A string providing the disassembly for this instruction.
5960 //               The value of an instruction's operand may be inserted
5961 //               by referring to it with a '$' prefix.
5962 // opcode     -- Three instruction opcodes may be provided.  These are referred
5963 //               to within an encode class as $primary, $secondary, and $tertiary
5964 //               rrspectively.  The primary opcode is commonly used to
5965 //               indicate the type of machine instruction, while secondary
5966 //               and tertiary are often used for prefix options or addressing
5967 //               modes.
5968 // ins_encode -- A list of encode classes with parameters. The encode class
5969 //               name must have been defined in an 'enc_class' specification
5970 //               in the encode section of the architecture description.
5971 
5972 // ============================================================================
5973 // Memory (Load/Store) Instructions
5974 
5975 // Load Instructions
5976 
5977 // Load Byte (8 bit signed)
5978 instruct loadB(iRegINoSp dst, memory mem)
5979 %{
5980   match(Set dst (LoadB mem));
5981   predicate(!needs_acquiring_load(n));
5982 
5983   ins_cost(4 * INSN_COST);
5984   format %{ "ldrsbw  $dst, $mem\t# byte" %}
5985 
5986   ins_encode(aarch64_enc_ldrsbw(dst, mem));
5987 
5988   ins_pipe(iload_reg_mem);
5989 %}
5990 
5991 // Load Byte (8 bit signed) into long
5992 instruct loadB2L(iRegLNoSp dst, memory mem)
5993 %{
5994   match(Set dst (ConvI2L (LoadB mem)));
5995   predicate(!needs_acquiring_load(n->in(1)));
5996 
5997   ins_cost(4 * INSN_COST);
5998   format %{ "ldrsb  $dst, $mem\t# byte" %}
5999 
6000   ins_encode(aarch64_enc_ldrsb(dst, mem));
6001 
6002   ins_pipe(iload_reg_mem);
6003 %}
6004 
6005 // Load Byte (8 bit unsigned)
6006 instruct loadUB(iRegINoSp dst, memory mem)
6007 %{
6008   match(Set dst (LoadUB mem));
6009   predicate(!needs_acquiring_load(n));
6010 
6011   ins_cost(4 * INSN_COST);
6012   format %{ "ldrbw  $dst, $mem\t# byte" %}
6013 
6014   ins_encode(aarch64_enc_ldrb(dst, mem));
6015 
6016   ins_pipe(iload_reg_mem);
6017 %}
6018 
6019 // Load Byte (8 bit unsigned) into long
6020 instruct loadUB2L(iRegLNoSp dst, memory mem)
6021 %{
6022   match(Set dst (ConvI2L (LoadUB mem)));
6023   predicate(!needs_acquiring_load(n->in(1)));
6024 
6025   ins_cost(4 * INSN_COST);
6026   format %{ "ldrb  $dst, $mem\t# byte" %}
6027 
6028   ins_encode(aarch64_enc_ldrb(dst, mem));
6029 
6030   ins_pipe(iload_reg_mem);
6031 %}
6032 
6033 // Load Short (16 bit signed)
6034 instruct loadS(iRegINoSp dst, memory mem)
6035 %{
6036   match(Set dst (LoadS mem));
6037   predicate(!needs_acquiring_load(n));
6038 
6039   ins_cost(4 * INSN_COST);
6040   format %{ "ldrshw  $dst, $mem\t# short" %}
6041 
6042   ins_encode(aarch64_enc_ldrshw(dst, mem));
6043 
6044   ins_pipe(iload_reg_mem);
6045 %}
6046 
6047 // Load Short (16 bit signed) into long
6048 instruct loadS2L(iRegLNoSp dst, memory mem)
6049 %{
6050   match(Set dst (ConvI2L (LoadS mem)));
6051   predicate(!needs_acquiring_load(n->in(1)));
6052 
6053   ins_cost(4 * INSN_COST);
6054   format %{ "ldrsh  $dst, $mem\t# short" %}
6055 
6056   ins_encode(aarch64_enc_ldrsh(dst, mem));
6057 
6058   ins_pipe(iload_reg_mem);
6059 %}
6060 
6061 // Load Char (16 bit unsigned)
6062 instruct loadUS(iRegINoSp dst, memory mem)
6063 %{
6064   match(Set dst (LoadUS mem));
6065   predicate(!needs_acquiring_load(n));
6066 
6067   ins_cost(4 * INSN_COST);
6068   format %{ "ldrh  $dst, $mem\t# short" %}
6069 
6070   ins_encode(aarch64_enc_ldrh(dst, mem));
6071 
6072   ins_pipe(iload_reg_mem);
6073 %}
6074 
6075 // Load Short/Char (16 bit unsigned) into long
6076 instruct loadUS2L(iRegLNoSp dst, memory mem)
6077 %{
6078   match(Set dst (ConvI2L (LoadUS mem)));
6079   predicate(!needs_acquiring_load(n->in(1)));
6080 
6081   ins_cost(4 * INSN_COST);
6082   format %{ "ldrh  $dst, $mem\t# short" %}
6083 
6084   ins_encode(aarch64_enc_ldrh(dst, mem));
6085 
6086   ins_pipe(iload_reg_mem);
6087 %}
6088 
6089 // Load Integer (32 bit signed)
6090 instruct loadI(iRegINoSp dst, memory mem)
6091 %{
6092   match(Set dst (LoadI mem));
6093   predicate(!needs_acquiring_load(n));
6094 
6095   ins_cost(4 * INSN_COST);
6096   format %{ "ldrw  $dst, $mem\t# int" %}
6097 
6098   ins_encode(aarch64_enc_ldrw(dst, mem));
6099 
6100   ins_pipe(iload_reg_mem);
6101 %}
6102 
6103 // Load Integer (32 bit signed) into long
6104 instruct loadI2L(iRegLNoSp dst, memory mem)
6105 %{
6106   match(Set dst (ConvI2L (LoadI mem)));
6107   predicate(!needs_acquiring_load(n->in(1)));
6108 
6109   ins_cost(4 * INSN_COST);
6110   format %{ "ldrsw  $dst, $mem\t# int" %}
6111 
6112   ins_encode(aarch64_enc_ldrsw(dst, mem));
6113 
6114   ins_pipe(iload_reg_mem);
6115 %}
6116 
6117 // Load Integer (32 bit unsigned) into long
6118 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
6119 %{
6120   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
6121   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
6122 
6123   ins_cost(4 * INSN_COST);
6124   format %{ "ldrw  $dst, $mem\t# int" %}
6125 
6126   ins_encode(aarch64_enc_ldrw(dst, mem));
6127 
6128   ins_pipe(iload_reg_mem);
6129 %}
6130 
6131 // Load Long (64 bit signed)
6132 instruct loadL(iRegLNoSp dst, memory mem)
6133 %{
6134   match(Set dst (LoadL mem));
6135   predicate(!needs_acquiring_load(n));
6136 
6137   ins_cost(4 * INSN_COST);
6138   format %{ "ldr  $dst, $mem\t# int" %}
6139 
6140   ins_encode(aarch64_enc_ldr(dst, mem));
6141 
6142   ins_pipe(iload_reg_mem);
6143 %}
6144 
6145 // Load Range
6146 instruct loadRange(iRegINoSp dst, memory mem)
6147 %{
6148   match(Set dst (LoadRange mem));
6149 
6150   ins_cost(4 * INSN_COST);
6151   format %{ "ldrw  $dst, $mem\t# range" %}
6152 
6153   ins_encode(aarch64_enc_ldrw(dst, mem));
6154 
6155   ins_pipe(iload_reg_mem);
6156 %}
6157 
6158 // Load Pointer
6159 instruct loadP(iRegPNoSp dst, memory mem)
6160 %{
6161   match(Set dst (LoadP mem));
6162   predicate(!needs_acquiring_load(n));
6163 
6164   ins_cost(4 * INSN_COST);
6165   format %{ "ldr  $dst, $mem\t# ptr" %}
6166 
6167   ins_encode(aarch64_enc_ldr(dst, mem));
6168 
6169   ins_pipe(iload_reg_mem);
6170 %}
6171 
6172 // Load Compressed Pointer
6173 instruct loadN(iRegNNoSp dst, memory mem)
6174 %{
6175   match(Set dst (LoadN mem));
6176   predicate(!needs_acquiring_load(n));
6177 
6178   ins_cost(4 * INSN_COST);
6179   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
6180 
6181   ins_encode(aarch64_enc_ldrw(dst, mem));
6182 
6183   ins_pipe(iload_reg_mem);
6184 %}
6185 
6186 // Load Klass Pointer
6187 instruct loadKlass(iRegPNoSp dst, memory mem)
6188 %{
6189   match(Set dst (LoadKlass mem));
6190   predicate(!needs_acquiring_load(n));
6191 
6192   ins_cost(4 * INSN_COST);
6193   format %{ "ldr  $dst, $mem\t# class" %}
6194 
6195   ins_encode(aarch64_enc_ldr(dst, mem));
6196 
6197   ins_pipe(iload_reg_mem);
6198 %}
6199 
6200 // Load Narrow Klass Pointer
6201 instruct loadNKlass(iRegNNoSp dst, memory mem)
6202 %{
6203   match(Set dst (LoadNKlass mem));
6204   predicate(!needs_acquiring_load(n));
6205 
6206   ins_cost(4 * INSN_COST);
6207   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
6208 
6209   ins_encode(aarch64_enc_ldrw(dst, mem));
6210 
6211   ins_pipe(iload_reg_mem);
6212 %}
6213 
6214 // Load Float
6215 instruct loadF(vRegF dst, memory mem)
6216 %{
6217   match(Set dst (LoadF mem));
6218   predicate(!needs_acquiring_load(n));
6219 
6220   ins_cost(4 * INSN_COST);
6221   format %{ "ldrs  $dst, $mem\t# float" %}
6222 
6223   ins_encode( aarch64_enc_ldrs(dst, mem) );
6224 
6225   ins_pipe(pipe_class_memory);
6226 %}
6227 
6228 // Load Double
6229 instruct loadD(vRegD dst, memory mem)
6230 %{
6231   match(Set dst (LoadD mem));
6232   predicate(!needs_acquiring_load(n));
6233 
6234   ins_cost(4 * INSN_COST);
6235   format %{ "ldrd  $dst, $mem\t# double" %}
6236 
6237   ins_encode( aarch64_enc_ldrd(dst, mem) );
6238 
6239   ins_pipe(pipe_class_memory);
6240 %}
6241 
6242 
6243 // Load Int Constant
6244 instruct loadConI(iRegINoSp dst, immI src)
6245 %{
6246   match(Set dst src);
6247 
6248   ins_cost(INSN_COST);
6249   format %{ "mov $dst, $src\t# int" %}
6250 
6251   ins_encode( aarch64_enc_movw_imm(dst, src) );
6252 
6253   ins_pipe(ialu_imm);
6254 %}
6255 
6256 // Load Long Constant
6257 instruct loadConL(iRegLNoSp dst, immL src)
6258 %{
6259   match(Set dst src);
6260 
6261   ins_cost(INSN_COST);
6262   format %{ "mov $dst, $src\t# long" %}
6263 
6264   ins_encode( aarch64_enc_mov_imm(dst, src) );
6265 
6266   ins_pipe(ialu_imm);
6267 %}
6268 
6269 // Load Pointer Constant
6270 
6271 instruct loadConP(iRegPNoSp dst, immP con)
6272 %{
6273   match(Set dst con);
6274 
6275   ins_cost(INSN_COST * 4);
6276   format %{
6277     "mov  $dst, $con\t# ptr\n\t"
6278   %}
6279 
6280   ins_encode(aarch64_enc_mov_p(dst, con));
6281 
6282   ins_pipe(ialu_imm);
6283 %}
6284 
6285 // Load Null Pointer Constant
6286 
6287 instruct loadConP0(iRegPNoSp dst, immP0 con)
6288 %{
6289   match(Set dst con);
6290 
6291   ins_cost(INSN_COST);
6292   format %{ "mov  $dst, $con\t# NULL ptr" %}
6293 
6294   ins_encode(aarch64_enc_mov_p0(dst, con));
6295 
6296   ins_pipe(ialu_imm);
6297 %}
6298 
6299 // Load Pointer Constant One
6300 
6301 instruct loadConP1(iRegPNoSp dst, immP_1 con)
6302 %{
6303   match(Set dst con);
6304 
6305   ins_cost(INSN_COST);
6306   format %{ "mov  $dst, $con\t# NULL ptr" %}
6307 
6308   ins_encode(aarch64_enc_mov_p1(dst, con));
6309 
6310   ins_pipe(ialu_imm);
6311 %}
6312 
6313 // Load Poll Page Constant
6314 
6315 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
6316 %{
6317   match(Set dst con);
6318 
6319   ins_cost(INSN_COST);
6320   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
6321 
6322   ins_encode(aarch64_enc_mov_poll_page(dst, con));
6323 
6324   ins_pipe(ialu_imm);
6325 %}
6326 
6327 // Load Byte Map Base Constant
6328 
6329 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
6330 %{
6331   match(Set dst con);
6332 
6333   ins_cost(INSN_COST);
6334   format %{ "adr  $dst, $con\t# Byte Map Base" %}
6335 
6336   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
6337 
6338   ins_pipe(ialu_imm);
6339 %}
6340 
6341 // Load Narrow Pointer Constant
6342 
6343 instruct loadConN(iRegNNoSp dst, immN con)
6344 %{
6345   match(Set dst con);
6346 
6347   ins_cost(INSN_COST * 4);
6348   format %{ "mov  $dst, $con\t# compressed ptr" %}
6349 
6350   ins_encode(aarch64_enc_mov_n(dst, con));
6351 
6352   ins_pipe(ialu_imm);
6353 %}
6354 
6355 // Load Narrow Null Pointer Constant
6356 
6357 instruct loadConN0(iRegNNoSp dst, immN0 con)
6358 %{
6359   match(Set dst con);
6360 
6361   ins_cost(INSN_COST);
6362   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
6363 
6364   ins_encode(aarch64_enc_mov_n0(dst, con));
6365 
6366   ins_pipe(ialu_imm);
6367 %}
6368 
6369 // Load Narrow Klass Constant
6370 
6371 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
6372 %{
6373   match(Set dst con);
6374 
6375   ins_cost(INSN_COST);
6376   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
6377 
6378   ins_encode(aarch64_enc_mov_nk(dst, con));
6379 
6380   ins_pipe(ialu_imm);
6381 %}
6382 
6383 // Load Packed Float Constant
6384 
6385 instruct loadConF_packed(vRegF dst, immFPacked con) %{
6386   match(Set dst con);
6387   ins_cost(INSN_COST * 4);
6388   format %{ "fmovs  $dst, $con"%}
6389   ins_encode %{
6390     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
6391   %}
6392 
6393   ins_pipe(pipe_class_default);
6394 %}
6395 
6396 // Load Float Constant
6397 
6398 instruct loadConF(vRegF dst, immF con) %{
6399   match(Set dst con);
6400 
6401   ins_cost(INSN_COST * 4);
6402 
6403   format %{
6404     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
6405   %}
6406 
6407   ins_encode %{
6408     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
6409   %}
6410 
6411   ins_pipe(pipe_class_default);
6412 %}
6413 
6414 // Load Packed Double Constant
6415 
6416 instruct loadConD_packed(vRegD dst, immDPacked con) %{
6417   match(Set dst con);
6418   ins_cost(INSN_COST);
6419   format %{ "fmovd  $dst, $con"%}
6420   ins_encode %{
6421     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
6422   %}
6423 
6424   ins_pipe(pipe_class_default);
6425 %}
6426 
6427 // Load Double Constant
6428 
6429 instruct loadConD(vRegD dst, immD con) %{
6430   match(Set dst con);
6431 
6432   ins_cost(INSN_COST * 5);
6433   format %{
6434     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
6435   %}
6436 
6437   ins_encode %{
6438     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
6439   %}
6440 
6441   ins_pipe(pipe_class_default);
6442 %}
6443 
6444 // Store Instructions
6445 
6446 // Store CMS card-mark Immediate
6447 instruct storeimmCM0(immI0 zero, memory mem)
6448 %{
6449   match(Set mem (StoreCM mem zero));
6450 
6451   ins_cost(INSN_COST);
6452   format %{ "strb zr, $mem\t# byte" %}
6453 
6454   ins_encode(aarch64_enc_strb0(mem));
6455 
6456   ins_pipe(istore_mem);
6457 %}
6458 
6459 // Store Byte
6460 instruct storeB(iRegIorL2I src, memory mem)
6461 %{
6462   match(Set mem (StoreB mem src));
6463   predicate(!needs_releasing_store(n));
6464 
6465   ins_cost(INSN_COST);
6466   format %{ "strb  $src, $mem\t# byte" %}
6467 
6468   ins_encode(aarch64_enc_strb(src, mem));
6469 
6470   ins_pipe(istore_reg_mem);
6471 %}
6472 
6473 
6474 instruct storeimmB0(immI0 zero, memory mem)
6475 %{
6476   match(Set mem (StoreB mem zero));
6477   predicate(!needs_releasing_store(n));
6478 
6479   ins_cost(INSN_COST);
6480   format %{ "strb zr, $mem\t# byte" %}
6481 
6482   ins_encode(aarch64_enc_strb0(mem));
6483 
6484   ins_pipe(istore_mem);
6485 %}
6486 
6487 // Store Char/Short
6488 instruct storeC(iRegIorL2I src, memory mem)
6489 %{
6490   match(Set mem (StoreC mem src));
6491   predicate(!needs_releasing_store(n));
6492 
6493   ins_cost(INSN_COST);
6494   format %{ "strh  $src, $mem\t# short" %}
6495 
6496   ins_encode(aarch64_enc_strh(src, mem));
6497 
6498   ins_pipe(istore_reg_mem);
6499 %}
6500 
6501 instruct storeimmC0(immI0 zero, memory mem)
6502 %{
6503   match(Set mem (StoreC mem zero));
6504   predicate(!needs_releasing_store(n));
6505 
6506   ins_cost(INSN_COST);
6507   format %{ "strh  zr, $mem\t# short" %}
6508 
6509   ins_encode(aarch64_enc_strh0(mem));
6510 
6511   ins_pipe(istore_mem);
6512 %}
6513 
6514 // Store Integer
6515 
6516 instruct storeI(iRegIorL2I src, memory mem)
6517 %{
6518   match(Set mem(StoreI mem src));
6519   predicate(!needs_releasing_store(n));
6520 
6521   ins_cost(INSN_COST);
6522   format %{ "strw  $src, $mem\t# int" %}
6523 
6524   ins_encode(aarch64_enc_strw(src, mem));
6525 
6526   ins_pipe(istore_reg_mem);
6527 %}
6528 
6529 instruct storeimmI0(immI0 zero, memory mem)
6530 %{
6531   match(Set mem(StoreI mem zero));
6532   predicate(!needs_releasing_store(n));
6533 
6534   ins_cost(INSN_COST);
6535   format %{ "strw  zr, $mem\t# int" %}
6536 
6537   ins_encode(aarch64_enc_strw0(mem));
6538 
6539   ins_pipe(istore_mem);
6540 %}
6541 
6542 // Store Long (64 bit signed)
6543 instruct storeL(iRegL src, memory mem)
6544 %{
6545   match(Set mem (StoreL mem src));
6546   predicate(!needs_releasing_store(n));
6547 
6548   ins_cost(INSN_COST);
6549   format %{ "str  $src, $mem\t# int" %}
6550 
6551   ins_encode(aarch64_enc_str(src, mem));
6552 
6553   ins_pipe(istore_reg_mem);
6554 %}
6555 
6556 // Store Long (64 bit signed)
6557 instruct storeimmL0(immL0 zero, memory mem)
6558 %{
6559   match(Set mem (StoreL mem zero));
6560   predicate(!needs_releasing_store(n));
6561 
6562   ins_cost(INSN_COST);
6563   format %{ "str  zr, $mem\t# int" %}
6564 
6565   ins_encode(aarch64_enc_str0(mem));
6566 
6567   ins_pipe(istore_mem);
6568 %}
6569 
6570 // Store Pointer
6571 instruct storeP(iRegP src, memory mem)
6572 %{
6573   match(Set mem (StoreP mem src));
6574   predicate(!needs_releasing_store(n));
6575 
6576   ins_cost(INSN_COST);
6577   format %{ "str  $src, $mem\t# ptr" %}
6578 
6579   ins_encode(aarch64_enc_str(src, mem));
6580 
6581   ins_pipe(istore_reg_mem);
6582 %}
6583 
6584 // Store Pointer
6585 instruct storeimmP0(immP0 zero, memory mem)
6586 %{
6587   match(Set mem (StoreP mem zero));
6588   predicate(!needs_releasing_store(n));
6589 
6590   ins_cost(INSN_COST);
6591   format %{ "str zr, $mem\t# ptr" %}
6592 
6593   ins_encode(aarch64_enc_str0(mem));
6594 
6595   ins_pipe(istore_mem);
6596 %}
6597 
6598 // Store Compressed Pointer
6599 instruct storeN(iRegN src, memory mem)
6600 %{
6601   match(Set mem (StoreN mem src));
6602   predicate(!needs_releasing_store(n));
6603 
6604   ins_cost(INSN_COST);
6605   format %{ "strw  $src, $mem\t# compressed ptr" %}
6606 
6607   ins_encode(aarch64_enc_strw(src, mem));
6608 
6609   ins_pipe(istore_reg_mem);
6610 %}
6611 
6612 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
6613 %{
6614   match(Set mem (StoreN mem zero));
6615   predicate(Universe::narrow_oop_base() == NULL &&
6616             Universe::narrow_klass_base() == NULL &&
6617             (!needs_releasing_store(n)));
6618 
6619   ins_cost(INSN_COST);
6620   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
6621 
6622   ins_encode(aarch64_enc_strw(heapbase, mem));
6623 
6624   ins_pipe(istore_reg_mem);
6625 %}
6626 
6627 // Store Float
6628 instruct storeF(vRegF src, memory mem)
6629 %{
6630   match(Set mem (StoreF mem src));
6631   predicate(!needs_releasing_store(n));
6632 
6633   ins_cost(INSN_COST);
6634   format %{ "strs  $src, $mem\t# float" %}
6635 
6636   ins_encode( aarch64_enc_strs(src, mem) );
6637 
6638   ins_pipe(pipe_class_memory);
6639 %}
6640 
6641 // TODO
6642 // implement storeImmF0 and storeFImmPacked
6643 
6644 // Store Double
6645 instruct storeD(vRegD src, memory mem)
6646 %{
6647   match(Set mem (StoreD mem src));
6648   predicate(!needs_releasing_store(n));
6649 
6650   ins_cost(INSN_COST);
6651   format %{ "strd  $src, $mem\t# double" %}
6652 
6653   ins_encode( aarch64_enc_strd(src, mem) );
6654 
6655   ins_pipe(pipe_class_memory);
6656 %}
6657 
6658 // Store Compressed Klass Pointer
6659 instruct storeNKlass(iRegN src, memory mem)
6660 %{
6661   predicate(!needs_releasing_store(n));
6662   match(Set mem (StoreNKlass mem src));
6663 
6664   ins_cost(INSN_COST);
6665   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
6666 
6667   ins_encode(aarch64_enc_strw(src, mem));
6668 
6669   ins_pipe(istore_reg_mem);
6670 %}
6671 
6672 // TODO
6673 // implement storeImmD0 and storeDImmPacked
6674 
6675 // prefetch instructions
6676 // Must be safe to execute with invalid address (cannot fault).
6677 
6678 instruct prefetchalloc( memory mem ) %{
6679   match(PrefetchAllocation mem);
6680 
6681   ins_cost(INSN_COST);
6682   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
6683 
6684   ins_encode( aarch64_enc_prefetchw(mem) );
6685 
6686   ins_pipe(iload_prefetch);
6687 %}
6688 
6689 //  ---------------- volatile loads and stores ----------------
6690 
6691 // Load Byte (8 bit signed)
6692 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
6693 %{
6694   match(Set dst (LoadB mem));
6695 
6696   ins_cost(VOLATILE_REF_COST);
6697   format %{ "ldarsb  $dst, $mem\t# byte" %}
6698 
6699   ins_encode(aarch64_enc_ldarsb(dst, mem));
6700 
6701   ins_pipe(pipe_serial);
6702 %}
6703 
6704 // Load Byte (8 bit signed) into long
6705 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
6706 %{
6707   match(Set dst (ConvI2L (LoadB mem)));
6708 
6709   ins_cost(VOLATILE_REF_COST);
6710   format %{ "ldarsb  $dst, $mem\t# byte" %}
6711 
6712   ins_encode(aarch64_enc_ldarsb(dst, mem));
6713 
6714   ins_pipe(pipe_serial);
6715 %}
6716 
6717 // Load Byte (8 bit unsigned)
6718 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
6719 %{
6720   match(Set dst (LoadUB mem));
6721 
6722   ins_cost(VOLATILE_REF_COST);
6723   format %{ "ldarb  $dst, $mem\t# byte" %}
6724 
6725   ins_encode(aarch64_enc_ldarb(dst, mem));
6726 
6727   ins_pipe(pipe_serial);
6728 %}
6729 
6730 // Load Byte (8 bit unsigned) into long
6731 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
6732 %{
6733   match(Set dst (ConvI2L (LoadUB mem)));
6734 
6735   ins_cost(VOLATILE_REF_COST);
6736   format %{ "ldarb  $dst, $mem\t# byte" %}
6737 
6738   ins_encode(aarch64_enc_ldarb(dst, mem));
6739 
6740   ins_pipe(pipe_serial);
6741 %}
6742 
6743 // Load Short (16 bit signed)
6744 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
6745 %{
6746   match(Set dst (LoadS mem));
6747 
6748   ins_cost(VOLATILE_REF_COST);
6749   format %{ "ldarshw  $dst, $mem\t# short" %}
6750 
6751   ins_encode(aarch64_enc_ldarshw(dst, mem));
6752 
6753   ins_pipe(pipe_serial);
6754 %}
6755 
6756 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
6757 %{
6758   match(Set dst (LoadUS mem));
6759 
6760   ins_cost(VOLATILE_REF_COST);
6761   format %{ "ldarhw  $dst, $mem\t# short" %}
6762 
6763   ins_encode(aarch64_enc_ldarhw(dst, mem));
6764 
6765   ins_pipe(pipe_serial);
6766 %}
6767 
6768 // Load Short/Char (16 bit unsigned) into long
6769 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
6770 %{
6771   match(Set dst (ConvI2L (LoadUS mem)));
6772 
6773   ins_cost(VOLATILE_REF_COST);
6774   format %{ "ldarh  $dst, $mem\t# short" %}
6775 
6776   ins_encode(aarch64_enc_ldarh(dst, mem));
6777 
6778   ins_pipe(pipe_serial);
6779 %}
6780 
6781 // Load Short/Char (16 bit signed) into long
6782 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
6783 %{
6784   match(Set dst (ConvI2L (LoadS mem)));
6785 
6786   ins_cost(VOLATILE_REF_COST);
6787   format %{ "ldarh  $dst, $mem\t# short" %}
6788 
6789   ins_encode(aarch64_enc_ldarsh(dst, mem));
6790 
6791   ins_pipe(pipe_serial);
6792 %}
6793 
6794 // Load Integer (32 bit signed)
6795 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
6796 %{
6797   match(Set dst (LoadI mem));
6798 
6799   ins_cost(VOLATILE_REF_COST);
6800   format %{ "ldarw  $dst, $mem\t# int" %}
6801 
6802   ins_encode(aarch64_enc_ldarw(dst, mem));
6803 
6804   ins_pipe(pipe_serial);
6805 %}
6806 
6807 // Load Integer (32 bit unsigned) into long
6808 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
6809 %{
6810   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
6811 
6812   ins_cost(VOLATILE_REF_COST);
6813   format %{ "ldarw  $dst, $mem\t# int" %}
6814 
6815   ins_encode(aarch64_enc_ldarw(dst, mem));
6816 
6817   ins_pipe(pipe_serial);
6818 %}
6819 
6820 // Load Long (64 bit signed)
6821 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
6822 %{
6823   match(Set dst (LoadL mem));
6824 
6825   ins_cost(VOLATILE_REF_COST);
6826   format %{ "ldar  $dst, $mem\t# int" %}
6827 
6828   ins_encode(aarch64_enc_ldar(dst, mem));
6829 
6830   ins_pipe(pipe_serial);
6831 %}
6832 
6833 // Load Pointer
6834 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
6835 %{
6836   match(Set dst (LoadP mem));
6837 
6838   ins_cost(VOLATILE_REF_COST);
6839   format %{ "ldar  $dst, $mem\t# ptr" %}
6840 
6841   ins_encode(aarch64_enc_ldar(dst, mem));
6842 
6843   ins_pipe(pipe_serial);
6844 %}
6845 
6846 // Load Compressed Pointer
6847 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
6848 %{
6849   match(Set dst (LoadN mem));
6850 
6851   ins_cost(VOLATILE_REF_COST);
6852   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
6853 
6854   ins_encode(aarch64_enc_ldarw(dst, mem));
6855 
6856   ins_pipe(pipe_serial);
6857 %}
6858 
6859 // Load Float
6860 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
6861 %{
6862   match(Set dst (LoadF mem));
6863 
6864   ins_cost(VOLATILE_REF_COST);
6865   format %{ "ldars  $dst, $mem\t# float" %}
6866 
6867   ins_encode( aarch64_enc_fldars(dst, mem) );
6868 
6869   ins_pipe(pipe_serial);
6870 %}
6871 
6872 // Load Double
6873 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
6874 %{
6875   match(Set dst (LoadD mem));
6876 
6877   ins_cost(VOLATILE_REF_COST);
6878   format %{ "ldard  $dst, $mem\t# double" %}
6879 
6880   ins_encode( aarch64_enc_fldard(dst, mem) );
6881 
6882   ins_pipe(pipe_serial);
6883 %}
6884 
6885 // Store Byte
6886 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
6887 %{
6888   match(Set mem (StoreB mem src));
6889 
6890   ins_cost(VOLATILE_REF_COST);
6891   format %{ "stlrb  $src, $mem\t# byte" %}
6892 
6893   ins_encode(aarch64_enc_stlrb(src, mem));
6894 
6895   ins_pipe(pipe_class_memory);
6896 %}
6897 
6898 // Store Char/Short
6899 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
6900 %{
6901   match(Set mem (StoreC mem src));
6902 
6903   ins_cost(VOLATILE_REF_COST);
6904   format %{ "stlrh  $src, $mem\t# short" %}
6905 
6906   ins_encode(aarch64_enc_stlrh(src, mem));
6907 
6908   ins_pipe(pipe_class_memory);
6909 %}
6910 
6911 // Store Integer
6912 
6913 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
6914 %{
6915   match(Set mem(StoreI mem src));
6916 
6917   ins_cost(VOLATILE_REF_COST);
6918   format %{ "stlrw  $src, $mem\t# int" %}
6919 
6920   ins_encode(aarch64_enc_stlrw(src, mem));
6921 
6922   ins_pipe(pipe_class_memory);
6923 %}
6924 
6925 // Store Long (64 bit signed)
6926 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
6927 %{
6928   match(Set mem (StoreL mem src));
6929 
6930   ins_cost(VOLATILE_REF_COST);
6931   format %{ "stlr  $src, $mem\t# int" %}
6932 
6933   ins_encode(aarch64_enc_stlr(src, mem));
6934 
6935   ins_pipe(pipe_class_memory);
6936 %}
6937 
6938 // Store Pointer
6939 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
6940 %{
6941   match(Set mem (StoreP mem src));
6942 
6943   ins_cost(VOLATILE_REF_COST);
6944   format %{ "stlr  $src, $mem\t# ptr" %}
6945 
6946   ins_encode(aarch64_enc_stlr(src, mem));
6947 
6948   ins_pipe(pipe_class_memory);
6949 %}
6950 
6951 // Store Compressed Pointer
6952 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
6953 %{
6954   match(Set mem (StoreN mem src));
6955 
6956   ins_cost(VOLATILE_REF_COST);
6957   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
6958 
6959   ins_encode(aarch64_enc_stlrw(src, mem));
6960 
6961   ins_pipe(pipe_class_memory);
6962 %}
6963 
6964 // Store Float
6965 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
6966 %{
6967   match(Set mem (StoreF mem src));
6968 
6969   ins_cost(VOLATILE_REF_COST);
6970   format %{ "stlrs  $src, $mem\t# float" %}
6971 
6972   ins_encode( aarch64_enc_fstlrs(src, mem) );
6973 
6974   ins_pipe(pipe_class_memory);
6975 %}
6976 
6977 // TODO
6978 // implement storeImmF0 and storeFImmPacked
6979 
6980 // Store Double
6981 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
6982 %{
6983   match(Set mem (StoreD mem src));
6984 
6985   ins_cost(VOLATILE_REF_COST);
6986   format %{ "stlrd  $src, $mem\t# double" %}
6987 
6988   ins_encode( aarch64_enc_fstlrd(src, mem) );
6989 
6990   ins_pipe(pipe_class_memory);
6991 %}
6992 
6993 //  ---------------- end of volatile loads and stores ----------------
6994 
6995 // ============================================================================
6996 // BSWAP Instructions
6997 
6998 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
6999   match(Set dst (ReverseBytesI src));
7000 
7001   ins_cost(INSN_COST);
7002   format %{ "revw  $dst, $src" %}
7003 
7004   ins_encode %{
7005     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
7006   %}
7007 
7008   ins_pipe(ialu_reg);
7009 %}
7010 
7011 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
7012   match(Set dst (ReverseBytesL src));
7013 
7014   ins_cost(INSN_COST);
7015   format %{ "rev  $dst, $src" %}
7016 
7017   ins_encode %{
7018     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
7019   %}
7020 
7021   ins_pipe(ialu_reg);
7022 %}
7023 
7024 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
7025   match(Set dst (ReverseBytesUS src));
7026 
7027   ins_cost(INSN_COST);
7028   format %{ "rev16w  $dst, $src" %}
7029 
7030   ins_encode %{
7031     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7032   %}
7033 
7034   ins_pipe(ialu_reg);
7035 %}
7036 
7037 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
7038   match(Set dst (ReverseBytesS src));
7039 
7040   ins_cost(INSN_COST);
7041   format %{ "rev16w  $dst, $src\n\t"
7042             "sbfmw $dst, $dst, #0, #15" %}
7043 
7044   ins_encode %{
7045     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7046     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
7047   %}
7048 
7049   ins_pipe(ialu_reg);
7050 %}
7051 
7052 // ============================================================================
7053 // Zero Count Instructions
7054 
7055 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7056   match(Set dst (CountLeadingZerosI src));
7057 
7058   ins_cost(INSN_COST);
7059   format %{ "clzw  $dst, $src" %}
7060   ins_encode %{
7061     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
7062   %}
7063 
7064   ins_pipe(ialu_reg);
7065 %}
7066 
7067 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
7068   match(Set dst (CountLeadingZerosL src));
7069 
7070   ins_cost(INSN_COST);
7071   format %{ "clz   $dst, $src" %}
7072   ins_encode %{
7073     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
7074   %}
7075 
7076   ins_pipe(ialu_reg);
7077 %}
7078 
7079 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7080   match(Set dst (CountTrailingZerosI src));
7081 
7082   ins_cost(INSN_COST * 2);
7083   format %{ "rbitw  $dst, $src\n\t"
7084             "clzw   $dst, $dst" %}
7085   ins_encode %{
7086     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
7087     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
7088   %}
7089 
7090   ins_pipe(ialu_reg);
7091 %}
7092 
7093 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
7094   match(Set dst (CountTrailingZerosL src));
7095 
7096   ins_cost(INSN_COST * 2);
7097   format %{ "rbit   $dst, $src\n\t"
7098             "clz    $dst, $dst" %}
7099   ins_encode %{
7100     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
7101     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
7102   %}
7103 
7104   ins_pipe(ialu_reg);
7105 %}
7106 
7107 // ============================================================================
7108 // MemBar Instruction
7109 
7110 instruct load_fence() %{
7111   match(LoadFence);
7112   ins_cost(VOLATILE_REF_COST);
7113 
7114   format %{ "load_fence" %}
7115 
7116   ins_encode %{
7117     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7118   %}
7119   ins_pipe(pipe_serial);
7120 %}
7121 
7122 instruct unnecessary_membar_acquire() %{
7123   predicate(unnecessary_acquire(n));
7124   match(MemBarAcquire);
7125   ins_cost(0);
7126 
7127   format %{ "membar_acquire (elided)" %}
7128 
7129   ins_encode %{
7130     __ block_comment("membar_acquire (elided)");
7131   %}
7132 
7133   ins_pipe(pipe_class_empty);
7134 %}
7135 
7136 instruct membar_acquire() %{
7137   match(MemBarAcquire);
7138   ins_cost(VOLATILE_REF_COST);
7139 
7140   format %{ "membar_acquire" %}
7141 
7142   ins_encode %{
7143     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7144   %}
7145 
7146   ins_pipe(pipe_serial);
7147 %}
7148 
7149 
7150 instruct membar_acquire_lock() %{
7151   match(MemBarAcquireLock);
7152   ins_cost(VOLATILE_REF_COST);
7153 
7154   format %{ "membar_acquire_lock" %}
7155 
7156   ins_encode %{
7157     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7158   %}
7159 
7160   ins_pipe(pipe_serial);
7161 %}
7162 
7163 instruct store_fence() %{
7164   match(StoreFence);
7165   ins_cost(VOLATILE_REF_COST);
7166 
7167   format %{ "store_fence" %}
7168 
7169   ins_encode %{
7170     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7171   %}
7172   ins_pipe(pipe_serial);
7173 %}
7174 
7175 instruct unnecessary_membar_release() %{
7176   predicate(unnecessary_release(n));
7177   match(MemBarRelease);
7178   ins_cost(0);
7179 
7180   format %{ "membar_release (elided)" %}
7181 
7182   ins_encode %{
7183     __ block_comment("membar_release (elided)");
7184   %}
7185   ins_pipe(pipe_serial);
7186 %}
7187 
7188 instruct membar_release() %{
7189   match(MemBarRelease);
7190   ins_cost(VOLATILE_REF_COST);
7191 
7192   format %{ "membar_release" %}
7193 
7194   ins_encode %{
7195     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7196   %}
7197   ins_pipe(pipe_serial);
7198 %}
7199 
7200 instruct membar_storestore() %{
7201   match(MemBarStoreStore);
7202   ins_cost(VOLATILE_REF_COST);
7203 
7204   format %{ "MEMBAR-store-store" %}
7205 
7206   ins_encode %{
7207     __ membar(Assembler::StoreStore);
7208   %}
7209   ins_pipe(pipe_serial);
7210 %}
7211 
7212 instruct membar_release_lock() %{
7213   match(MemBarReleaseLock);
7214   ins_cost(VOLATILE_REF_COST);
7215 
7216   format %{ "membar_release_lock" %}
7217 
7218   ins_encode %{
7219     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7220   %}
7221 
7222   ins_pipe(pipe_serial);
7223 %}
7224 
7225 instruct unnecessary_membar_volatile() %{
7226   predicate(unnecessary_volatile(n));
7227   match(MemBarVolatile);
7228   ins_cost(0);
7229 
7230   format %{ "membar_volatile (elided)" %}
7231 
7232   ins_encode %{
7233     __ block_comment("membar_volatile (elided)");
7234   %}
7235 
7236   ins_pipe(pipe_serial);
7237 %}
7238 
7239 instruct membar_volatile() %{
7240   match(MemBarVolatile);
7241   ins_cost(VOLATILE_REF_COST*100);
7242 
7243   format %{ "membar_volatile" %}
7244 
7245   ins_encode %{
7246     __ membar(Assembler::StoreLoad);
7247   %}
7248 
7249   ins_pipe(pipe_serial);
7250 %}
7251 
7252 // ============================================================================
7253 // Cast/Convert Instructions
7254 
7255 instruct castX2P(iRegPNoSp dst, iRegL src) %{
7256   match(Set dst (CastX2P src));
7257 
7258   ins_cost(INSN_COST);
7259   format %{ "mov $dst, $src\t# long -> ptr" %}
7260 
7261   ins_encode %{
7262     if ($dst$$reg != $src$$reg) {
7263       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
7264     }
7265   %}
7266 
7267   ins_pipe(ialu_reg);
7268 %}
7269 
7270 instruct castP2X(iRegLNoSp dst, iRegP src) %{
7271   match(Set dst (CastP2X src));
7272 
7273   ins_cost(INSN_COST);
7274   format %{ "mov $dst, $src\t# ptr -> long" %}
7275 
7276   ins_encode %{
7277     if ($dst$$reg != $src$$reg) {
7278       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
7279     }
7280   %}
7281 
7282   ins_pipe(ialu_reg);
7283 %}
7284 
7285 // Convert oop into int for vectors alignment masking
7286 instruct convP2I(iRegINoSp dst, iRegP src) %{
7287   match(Set dst (ConvL2I (CastP2X src)));
7288 
7289   ins_cost(INSN_COST);
7290   format %{ "movw $dst, $src\t# ptr -> int" %}
7291   ins_encode %{
7292     __ movw($dst$$Register, $src$$Register);
7293   %}
7294 
7295   ins_pipe(ialu_reg);
7296 %}
7297 
7298 // Convert compressed oop into int for vectors alignment masking
7299 // in case of 32bit oops (heap < 4Gb).
7300 instruct convN2I(iRegINoSp dst, iRegN src)
7301 %{
7302   predicate(Universe::narrow_oop_shift() == 0);
7303   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
7304 
7305   ins_cost(INSN_COST);
7306   format %{ "mov dst, $src\t# compressed ptr -> int" %}
7307   ins_encode %{
7308     __ movw($dst$$Register, $src$$Register);
7309   %}
7310 
7311   ins_pipe(ialu_reg);
7312 %}
7313 
7314 
7315 // Convert oop pointer into compressed form
7316 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
7317   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
7318   match(Set dst (EncodeP src));
7319   effect(KILL cr);
7320   ins_cost(INSN_COST * 3);
7321   format %{ "encode_heap_oop $dst, $src" %}
7322   ins_encode %{
7323     Register s = $src$$Register;
7324     Register d = $dst$$Register;
7325     __ encode_heap_oop(d, s);
7326   %}
7327   ins_pipe(ialu_reg);
7328 %}
7329 
7330 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
7331   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
7332   match(Set dst (EncodeP src));
7333   ins_cost(INSN_COST * 3);
7334   format %{ "encode_heap_oop_not_null $dst, $src" %}
7335   ins_encode %{
7336     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
7337   %}
7338   ins_pipe(ialu_reg);
7339 %}
7340 
7341 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
7342   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
7343             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
7344   match(Set dst (DecodeN src));
7345   ins_cost(INSN_COST * 3);
7346   format %{ "decode_heap_oop $dst, $src" %}
7347   ins_encode %{
7348     Register s = $src$$Register;
7349     Register d = $dst$$Register;
7350     __ decode_heap_oop(d, s);
7351   %}
7352   ins_pipe(ialu_reg);
7353 %}
7354 
7355 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
7356   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
7357             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
7358   match(Set dst (DecodeN src));
7359   ins_cost(INSN_COST * 3);
7360   format %{ "decode_heap_oop_not_null $dst, $src" %}
7361   ins_encode %{
7362     Register s = $src$$Register;
7363     Register d = $dst$$Register;
7364     __ decode_heap_oop_not_null(d, s);
7365   %}
7366   ins_pipe(ialu_reg);
7367 %}
7368 
7369 // n.b. AArch64 implementations of encode_klass_not_null and
7370 // decode_klass_not_null do not modify the flags register so, unlike
7371 // Intel, we don't kill CR as a side effect here
7372 
7373 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
7374   match(Set dst (EncodePKlass src));
7375 
7376   ins_cost(INSN_COST * 3);
7377   format %{ "encode_klass_not_null $dst,$src" %}
7378 
7379   ins_encode %{
7380     Register src_reg = as_Register($src$$reg);
7381     Register dst_reg = as_Register($dst$$reg);
7382     __ encode_klass_not_null(dst_reg, src_reg);
7383   %}
7384 
7385    ins_pipe(ialu_reg);
7386 %}
7387 
7388 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
7389   match(Set dst (DecodeNKlass src));
7390 
7391   ins_cost(INSN_COST * 3);
7392   format %{ "decode_klass_not_null $dst,$src" %}
7393 
7394   ins_encode %{
7395     Register src_reg = as_Register($src$$reg);
7396     Register dst_reg = as_Register($dst$$reg);
7397     if (dst_reg != src_reg) {
7398       __ decode_klass_not_null(dst_reg, src_reg);
7399     } else {
7400       __ decode_klass_not_null(dst_reg);
7401     }
7402   %}
7403 
7404    ins_pipe(ialu_reg);
7405 %}
7406 
7407 instruct checkCastPP(iRegPNoSp dst)
7408 %{
7409   match(Set dst (CheckCastPP dst));
7410 
7411   size(0);
7412   format %{ "# checkcastPP of $dst" %}
7413   ins_encode(/* empty encoding */);
7414   ins_pipe(pipe_class_empty);
7415 %}
7416 
7417 instruct castPP(iRegPNoSp dst)
7418 %{
7419   match(Set dst (CastPP dst));
7420 
7421   size(0);
7422   format %{ "# castPP of $dst" %}
7423   ins_encode(/* empty encoding */);
7424   ins_pipe(pipe_class_empty);
7425 %}
7426 
7427 instruct castII(iRegI dst)
7428 %{
7429   match(Set dst (CastII dst));
7430 
7431   size(0);
7432   format %{ "# castII of $dst" %}
7433   ins_encode(/* empty encoding */);
7434   ins_cost(0);
7435   ins_pipe(pipe_class_empty);
7436 %}
7437 
7438 // ============================================================================
7439 // Atomic operation instructions
7440 //
7441 // Intel and SPARC both implement Ideal Node LoadPLocked and
7442 // Store{PIL}Conditional instructions using a normal load for the
7443 // LoadPLocked and a CAS for the Store{PIL}Conditional.
7444 //
7445 // The ideal code appears only to use LoadPLocked/StorePLocked as a
7446 // pair to lock object allocations from Eden space when not using
7447 // TLABs.
7448 //
7449 // There does not appear to be a Load{IL}Locked Ideal Node and the
7450 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
7451 // and to use StoreIConditional only for 32-bit and StoreLConditional
7452 // only for 64-bit.
7453 //
7454 // We implement LoadPLocked and StorePLocked instructions using,
7455 // respectively the AArch64 hw load-exclusive and store-conditional
7456 // instructions. Whereas we must implement each of
7457 // Store{IL}Conditional using a CAS which employs a pair of
7458 // instructions comprising a load-exclusive followed by a
7459 // store-conditional.
7460 
7461 
7462 // Locked-load (linked load) of the current heap-top
7463 // used when updating the eden heap top
7464 // implemented using ldaxr on AArch64
7465 
7466 instruct loadPLocked(iRegPNoSp dst, indirect mem)
7467 %{
7468   match(Set dst (LoadPLocked mem));
7469 
7470   ins_cost(VOLATILE_REF_COST);
7471 
7472   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
7473 
7474   ins_encode(aarch64_enc_ldaxr(dst, mem));
7475 
7476   ins_pipe(pipe_serial);
7477 %}
7478 
7479 // Conditional-store of the updated heap-top.
7480 // Used during allocation of the shared heap.
7481 // Sets flag (EQ) on success.
7482 // implemented using stlxr on AArch64.
7483 
7484 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
7485 %{
7486   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
7487 
7488   ins_cost(VOLATILE_REF_COST);
7489 
7490  // TODO
7491  // do we need to do a store-conditional release or can we just use a
7492  // plain store-conditional?
7493 
7494   format %{
7495     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
7496     "cmpw rscratch1, zr\t# EQ on successful write"
7497   %}
7498 
7499   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
7500 
7501   ins_pipe(pipe_serial);
7502 %}
7503 
7504 // this has to be implemented as a CAS
7505 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
7506 %{
7507   match(Set cr (StoreLConditional mem (Binary oldval newval)));
7508 
7509   ins_cost(VOLATILE_REF_COST);
7510 
7511   format %{
7512     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
7513     "cmpw rscratch1, zr\t# EQ on successful write"
7514   %}
7515 
7516   ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval));
7517 
7518   ins_pipe(pipe_slow);
7519 %}
7520 
7521 // this has to be implemented as a CAS
7522 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
7523 %{
7524   match(Set cr (StoreIConditional mem (Binary oldval newval)));
7525 
7526   ins_cost(VOLATILE_REF_COST);
7527 
7528   format %{
7529     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
7530     "cmpw rscratch1, zr\t# EQ on successful write"
7531   %}
7532 
7533   ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval));
7534 
7535   ins_pipe(pipe_slow);
7536 %}
7537 
7538 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
7539 // can't match them
7540 
7541 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
7542 
7543   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
7544 
7545   effect(KILL cr);
7546 
7547  format %{
7548     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
7549     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
7550  %}
7551 
7552  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
7553             aarch64_enc_cset_eq(res));
7554 
7555   ins_pipe(pipe_slow);
7556 %}
7557 
7558 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
7559 
7560   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
7561 
7562   effect(KILL cr);
7563 
7564  format %{
7565     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
7566     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
7567  %}
7568 
7569  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
7570             aarch64_enc_cset_eq(res));
7571 
7572   ins_pipe(pipe_slow);
7573 %}
7574 
7575 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
7576 
7577   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
7578 
7579   effect(KILL cr);
7580 
7581  format %{
7582     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
7583     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
7584  %}
7585 
7586  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
7587             aarch64_enc_cset_eq(res));
7588 
7589   ins_pipe(pipe_slow);
7590 %}
7591 
7592 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
7593 
7594   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
7595 
7596   effect(KILL cr);
7597 
7598  format %{
7599     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
7600     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
7601  %}
7602 
7603  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
7604             aarch64_enc_cset_eq(res));
7605 
7606   ins_pipe(pipe_slow);
7607 %}
7608 
7609 
7610 instruct get_and_setI(indirect mem, iRegINoSp newv, iRegI prev) %{
7611   match(Set prev (GetAndSetI mem newv));
7612   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
7613   ins_encode %{
7614     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
7615   %}
7616   ins_pipe(pipe_serial);
7617 %}
7618 
7619 instruct get_and_setL(indirect mem, iRegLNoSp newv, iRegL prev) %{
7620   match(Set prev (GetAndSetL mem newv));
7621   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
7622   ins_encode %{
7623     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
7624   %}
7625   ins_pipe(pipe_serial);
7626 %}
7627 
7628 instruct get_and_setN(indirect mem, iRegNNoSp newv, iRegI prev) %{
7629   match(Set prev (GetAndSetN mem newv));
7630   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
7631   ins_encode %{
7632     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
7633   %}
7634   ins_pipe(pipe_serial);
7635 %}
7636 
7637 instruct get_and_setP(indirect mem, iRegPNoSp newv, iRegP prev) %{
7638   match(Set prev (GetAndSetP mem newv));
7639   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
7640   ins_encode %{
7641     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
7642   %}
7643   ins_pipe(pipe_serial);
7644 %}
7645 
7646 
7647 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
7648   match(Set newval (GetAndAddL mem incr));
7649   ins_cost(INSN_COST * 10);
7650   format %{ "get_and_addL $newval, [$mem], $incr" %}
7651   ins_encode %{
7652     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
7653   %}
7654   ins_pipe(pipe_serial);
7655 %}
7656 
7657 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
7658   predicate(n->as_LoadStore()->result_not_used());
7659   match(Set dummy (GetAndAddL mem incr));
7660   ins_cost(INSN_COST * 9);
7661   format %{ "get_and_addL [$mem], $incr" %}
7662   ins_encode %{
7663     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
7664   %}
7665   ins_pipe(pipe_serial);
7666 %}
7667 
7668 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
7669   match(Set newval (GetAndAddL mem incr));
7670   ins_cost(INSN_COST * 10);
7671   format %{ "get_and_addL $newval, [$mem], $incr" %}
7672   ins_encode %{
7673     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
7674   %}
7675   ins_pipe(pipe_serial);
7676 %}
7677 
7678 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
7679   predicate(n->as_LoadStore()->result_not_used());
7680   match(Set dummy (GetAndAddL mem incr));
7681   ins_cost(INSN_COST * 9);
7682   format %{ "get_and_addL [$mem], $incr" %}
7683   ins_encode %{
7684     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
7685   %}
7686   ins_pipe(pipe_serial);
7687 %}
7688 
7689 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
7690   match(Set newval (GetAndAddI mem incr));
7691   ins_cost(INSN_COST * 10);
7692   format %{ "get_and_addI $newval, [$mem], $incr" %}
7693   ins_encode %{
7694     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
7695   %}
7696   ins_pipe(pipe_serial);
7697 %}
7698 
7699 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
7700   predicate(n->as_LoadStore()->result_not_used());
7701   match(Set dummy (GetAndAddI mem incr));
7702   ins_cost(INSN_COST * 9);
7703   format %{ "get_and_addI [$mem], $incr" %}
7704   ins_encode %{
7705     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
7706   %}
7707   ins_pipe(pipe_serial);
7708 %}
7709 
7710 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
7711   match(Set newval (GetAndAddI mem incr));
7712   ins_cost(INSN_COST * 10);
7713   format %{ "get_and_addI $newval, [$mem], $incr" %}
7714   ins_encode %{
7715     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
7716   %}
7717   ins_pipe(pipe_serial);
7718 %}
7719 
7720 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
7721   predicate(n->as_LoadStore()->result_not_used());
7722   match(Set dummy (GetAndAddI mem incr));
7723   ins_cost(INSN_COST * 9);
7724   format %{ "get_and_addI [$mem], $incr" %}
7725   ins_encode %{
7726     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
7727   %}
7728   ins_pipe(pipe_serial);
7729 %}
7730 
7731 // Manifest a CmpL result in an integer register.
7732 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
7733 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
7734 %{
7735   match(Set dst (CmpL3 src1 src2));
7736   effect(KILL flags);
7737 
7738   ins_cost(INSN_COST * 6);
7739   format %{
7740       "cmp $src1, $src2"
7741       "csetw $dst, ne"
7742       "cnegw $dst, lt"
7743   %}
7744   // format %{ "CmpL3 $dst, $src1, $src2" %}
7745   ins_encode %{
7746     __ cmp($src1$$Register, $src2$$Register);
7747     __ csetw($dst$$Register, Assembler::NE);
7748     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
7749   %}
7750 
7751   ins_pipe(pipe_class_default);
7752 %}
7753 
7754 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
7755 %{
7756   match(Set dst (CmpL3 src1 src2));
7757   effect(KILL flags);
7758 
7759   ins_cost(INSN_COST * 6);
7760   format %{
7761       "cmp $src1, $src2"
7762       "csetw $dst, ne"
7763       "cnegw $dst, lt"
7764   %}
7765   ins_encode %{
7766     int32_t con = (int32_t)$src2$$constant;
7767      if (con < 0) {
7768       __ adds(zr, $src1$$Register, -con);
7769     } else {
7770       __ subs(zr, $src1$$Register, con);
7771     }
7772     __ csetw($dst$$Register, Assembler::NE);
7773     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
7774   %}
7775 
7776   ins_pipe(pipe_class_default);
7777 %}
7778 
7779 // ============================================================================
7780 // Conditional Move Instructions
7781 
7782 // n.b. we have identical rules for both a signed compare op (cmpOp)
7783 // and an unsigned compare op (cmpOpU). it would be nice if we could
7784 // define an op class which merged both inputs and use it to type the
7785 // argument to a single rule. unfortunatelyt his fails because the
7786 // opclass does not live up to the COND_INTER interface of its
7787 // component operands. When the generic code tries to negate the
7788 // operand it ends up running the generci Machoper::negate method
7789 // which throws a ShouldNotHappen. So, we have to provide two flavours
7790 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
7791 
7792 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
7793   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
7794 
7795   ins_cost(INSN_COST * 2);
7796   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
7797 
7798   ins_encode %{
7799     __ cselw(as_Register($dst$$reg),
7800              as_Register($src2$$reg),
7801              as_Register($src1$$reg),
7802              (Assembler::Condition)$cmp$$cmpcode);
7803   %}
7804 
7805   ins_pipe(icond_reg_reg);
7806 %}
7807 
7808 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
7809   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
7810 
7811   ins_cost(INSN_COST * 2);
7812   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
7813 
7814   ins_encode %{
7815     __ cselw(as_Register($dst$$reg),
7816              as_Register($src2$$reg),
7817              as_Register($src1$$reg),
7818              (Assembler::Condition)$cmp$$cmpcode);
7819   %}
7820 
7821   ins_pipe(icond_reg_reg);
7822 %}
7823 
7824 // special cases where one arg is zero
7825 
7826 // n.b. this is selected in preference to the rule above because it
7827 // avoids loading constant 0 into a source register
7828 
7829 // TODO
7830 // we ought only to be able to cull one of these variants as the ideal
7831 // transforms ought always to order the zero consistently (to left/right?)
7832 
7833 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
7834   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
7835 
7836   ins_cost(INSN_COST * 2);
7837   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
7838 
7839   ins_encode %{
7840     __ cselw(as_Register($dst$$reg),
7841              as_Register($src$$reg),
7842              zr,
7843              (Assembler::Condition)$cmp$$cmpcode);
7844   %}
7845 
7846   ins_pipe(icond_reg);
7847 %}
7848 
7849 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
7850   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
7851 
7852   ins_cost(INSN_COST * 2);
7853   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
7854 
7855   ins_encode %{
7856     __ cselw(as_Register($dst$$reg),
7857              as_Register($src$$reg),
7858              zr,
7859              (Assembler::Condition)$cmp$$cmpcode);
7860   %}
7861 
7862   ins_pipe(icond_reg);
7863 %}
7864 
7865 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
7866   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
7867 
7868   ins_cost(INSN_COST * 2);
7869   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
7870 
7871   ins_encode %{
7872     __ cselw(as_Register($dst$$reg),
7873              zr,
7874              as_Register($src$$reg),
7875              (Assembler::Condition)$cmp$$cmpcode);
7876   %}
7877 
7878   ins_pipe(icond_reg);
7879 %}
7880 
7881 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
7882   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
7883 
7884   ins_cost(INSN_COST * 2);
7885   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
7886 
7887   ins_encode %{
7888     __ cselw(as_Register($dst$$reg),
7889              zr,
7890              as_Register($src$$reg),
7891              (Assembler::Condition)$cmp$$cmpcode);
7892   %}
7893 
7894   ins_pipe(icond_reg);
7895 %}
7896 
7897 // special case for creating a boolean 0 or 1
7898 
7899 // n.b. this is selected in preference to the rule above because it
7900 // avoids loading constants 0 and 1 into a source register
7901 
7902 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
7903   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
7904 
7905   ins_cost(INSN_COST * 2);
7906   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
7907 
7908   ins_encode %{
7909     // equivalently
7910     // cset(as_Register($dst$$reg),
7911     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
7912     __ csincw(as_Register($dst$$reg),
7913              zr,
7914              zr,
7915              (Assembler::Condition)$cmp$$cmpcode);
7916   %}
7917 
7918   ins_pipe(icond_none);
7919 %}
7920 
7921 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
7922   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
7923 
7924   ins_cost(INSN_COST * 2);
7925   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
7926 
7927   ins_encode %{
7928     // equivalently
7929     // cset(as_Register($dst$$reg),
7930     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
7931     __ csincw(as_Register($dst$$reg),
7932              zr,
7933              zr,
7934              (Assembler::Condition)$cmp$$cmpcode);
7935   %}
7936 
7937   ins_pipe(icond_none);
7938 %}
7939 
7940 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
7941   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
7942 
7943   ins_cost(INSN_COST * 2);
7944   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
7945 
7946   ins_encode %{
7947     __ csel(as_Register($dst$$reg),
7948             as_Register($src2$$reg),
7949             as_Register($src1$$reg),
7950             (Assembler::Condition)$cmp$$cmpcode);
7951   %}
7952 
7953   ins_pipe(icond_reg_reg);
7954 %}
7955 
7956 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
7957   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
7958 
7959   ins_cost(INSN_COST * 2);
7960   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
7961 
7962   ins_encode %{
7963     __ csel(as_Register($dst$$reg),
7964             as_Register($src2$$reg),
7965             as_Register($src1$$reg),
7966             (Assembler::Condition)$cmp$$cmpcode);
7967   %}
7968 
7969   ins_pipe(icond_reg_reg);
7970 %}
7971 
7972 // special cases where one arg is zero
7973 
7974 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
7975   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
7976 
7977   ins_cost(INSN_COST * 2);
7978   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
7979 
7980   ins_encode %{
7981     __ csel(as_Register($dst$$reg),
7982             zr,
7983             as_Register($src$$reg),
7984             (Assembler::Condition)$cmp$$cmpcode);
7985   %}
7986 
7987   ins_pipe(icond_reg);
7988 %}
7989 
7990 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
7991   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
7992 
7993   ins_cost(INSN_COST * 2);
7994   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
7995 
7996   ins_encode %{
7997     __ csel(as_Register($dst$$reg),
7998             zr,
7999             as_Register($src$$reg),
8000             (Assembler::Condition)$cmp$$cmpcode);
8001   %}
8002 
8003   ins_pipe(icond_reg);
8004 %}
8005 
8006 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
8007   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
8008 
8009   ins_cost(INSN_COST * 2);
8010   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
8011 
8012   ins_encode %{
8013     __ csel(as_Register($dst$$reg),
8014             as_Register($src$$reg),
8015             zr,
8016             (Assembler::Condition)$cmp$$cmpcode);
8017   %}
8018 
8019   ins_pipe(icond_reg);
8020 %}
8021 
8022 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
8023   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
8024 
8025   ins_cost(INSN_COST * 2);
8026   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
8027 
8028   ins_encode %{
8029     __ csel(as_Register($dst$$reg),
8030             as_Register($src$$reg),
8031             zr,
8032             (Assembler::Condition)$cmp$$cmpcode);
8033   %}
8034 
8035   ins_pipe(icond_reg);
8036 %}
8037 
8038 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
8039   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
8040 
8041   ins_cost(INSN_COST * 2);
8042   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
8043 
8044   ins_encode %{
8045     __ csel(as_Register($dst$$reg),
8046             as_Register($src2$$reg),
8047             as_Register($src1$$reg),
8048             (Assembler::Condition)$cmp$$cmpcode);
8049   %}
8050 
8051   ins_pipe(icond_reg_reg);
8052 %}
8053 
8054 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
8055   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
8056 
8057   ins_cost(INSN_COST * 2);
8058   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
8059 
8060   ins_encode %{
8061     __ csel(as_Register($dst$$reg),
8062             as_Register($src2$$reg),
8063             as_Register($src1$$reg),
8064             (Assembler::Condition)$cmp$$cmpcode);
8065   %}
8066 
8067   ins_pipe(icond_reg_reg);
8068 %}
8069 
8070 // special cases where one arg is zero
8071 
8072 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
8073   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
8074 
8075   ins_cost(INSN_COST * 2);
8076   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
8077 
8078   ins_encode %{
8079     __ csel(as_Register($dst$$reg),
8080             zr,
8081             as_Register($src$$reg),
8082             (Assembler::Condition)$cmp$$cmpcode);
8083   %}
8084 
8085   ins_pipe(icond_reg);
8086 %}
8087 
8088 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
8089   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
8090 
8091   ins_cost(INSN_COST * 2);
8092   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
8093 
8094   ins_encode %{
8095     __ csel(as_Register($dst$$reg),
8096             zr,
8097             as_Register($src$$reg),
8098             (Assembler::Condition)$cmp$$cmpcode);
8099   %}
8100 
8101   ins_pipe(icond_reg);
8102 %}
8103 
8104 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
8105   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
8106 
8107   ins_cost(INSN_COST * 2);
8108   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
8109 
8110   ins_encode %{
8111     __ csel(as_Register($dst$$reg),
8112             as_Register($src$$reg),
8113             zr,
8114             (Assembler::Condition)$cmp$$cmpcode);
8115   %}
8116 
8117   ins_pipe(icond_reg);
8118 %}
8119 
8120 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
8121   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
8122 
8123   ins_cost(INSN_COST * 2);
8124   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
8125 
8126   ins_encode %{
8127     __ csel(as_Register($dst$$reg),
8128             as_Register($src$$reg),
8129             zr,
8130             (Assembler::Condition)$cmp$$cmpcode);
8131   %}
8132 
8133   ins_pipe(icond_reg);
8134 %}
8135 
8136 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
8137   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
8138 
8139   ins_cost(INSN_COST * 2);
8140   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
8141 
8142   ins_encode %{
8143     __ cselw(as_Register($dst$$reg),
8144              as_Register($src2$$reg),
8145              as_Register($src1$$reg),
8146              (Assembler::Condition)$cmp$$cmpcode);
8147   %}
8148 
8149   ins_pipe(icond_reg_reg);
8150 %}
8151 
8152 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
8153   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
8154 
8155   ins_cost(INSN_COST * 2);
8156   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
8157 
8158   ins_encode %{
8159     __ cselw(as_Register($dst$$reg),
8160              as_Register($src2$$reg),
8161              as_Register($src1$$reg),
8162              (Assembler::Condition)$cmp$$cmpcode);
8163   %}
8164 
8165   ins_pipe(icond_reg_reg);
8166 %}
8167 
8168 // special cases where one arg is zero
8169 
8170 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
8171   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
8172 
8173   ins_cost(INSN_COST * 2);
8174   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
8175 
8176   ins_encode %{
8177     __ cselw(as_Register($dst$$reg),
8178              zr,
8179              as_Register($src$$reg),
8180              (Assembler::Condition)$cmp$$cmpcode);
8181   %}
8182 
8183   ins_pipe(icond_reg);
8184 %}
8185 
8186 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
8187   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
8188 
8189   ins_cost(INSN_COST * 2);
8190   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
8191 
8192   ins_encode %{
8193     __ cselw(as_Register($dst$$reg),
8194              zr,
8195              as_Register($src$$reg),
8196              (Assembler::Condition)$cmp$$cmpcode);
8197   %}
8198 
8199   ins_pipe(icond_reg);
8200 %}
8201 
8202 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
8203   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
8204 
8205   ins_cost(INSN_COST * 2);
8206   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
8207 
8208   ins_encode %{
8209     __ cselw(as_Register($dst$$reg),
8210              as_Register($src$$reg),
8211              zr,
8212              (Assembler::Condition)$cmp$$cmpcode);
8213   %}
8214 
8215   ins_pipe(icond_reg);
8216 %}
8217 
8218 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
8219   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
8220 
8221   ins_cost(INSN_COST * 2);
8222   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
8223 
8224   ins_encode %{
8225     __ cselw(as_Register($dst$$reg),
8226              as_Register($src$$reg),
8227              zr,
8228              (Assembler::Condition)$cmp$$cmpcode);
8229   %}
8230 
8231   ins_pipe(icond_reg);
8232 %}
8233 
8234 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
8235 %{
8236   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
8237 
8238   ins_cost(INSN_COST * 3);
8239 
8240   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
8241   ins_encode %{
8242     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
8243     __ fcsels(as_FloatRegister($dst$$reg),
8244               as_FloatRegister($src2$$reg),
8245               as_FloatRegister($src1$$reg),
8246               cond);
8247   %}
8248 
8249   ins_pipe(pipe_class_default);
8250 %}
8251 
8252 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
8253 %{
8254   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
8255 
8256   ins_cost(INSN_COST * 3);
8257 
8258   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
8259   ins_encode %{
8260     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
8261     __ fcsels(as_FloatRegister($dst$$reg),
8262               as_FloatRegister($src2$$reg),
8263               as_FloatRegister($src1$$reg),
8264               cond);
8265   %}
8266 
8267   ins_pipe(pipe_class_default);
8268 %}
8269 
8270 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
8271 %{
8272   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
8273 
8274   ins_cost(INSN_COST * 3);
8275 
8276   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
8277   ins_encode %{
8278     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
8279     __ fcseld(as_FloatRegister($dst$$reg),
8280               as_FloatRegister($src2$$reg),
8281               as_FloatRegister($src1$$reg),
8282               cond);
8283   %}
8284 
8285   ins_pipe(pipe_class_default);
8286 %}
8287 
8288 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
8289 %{
8290   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
8291 
8292   ins_cost(INSN_COST * 3);
8293 
8294   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
8295   ins_encode %{
8296     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
8297     __ fcseld(as_FloatRegister($dst$$reg),
8298               as_FloatRegister($src2$$reg),
8299               as_FloatRegister($src1$$reg),
8300               cond);
8301   %}
8302 
8303   ins_pipe(pipe_class_default);
8304 %}
8305 
8306 // ============================================================================
8307 // Arithmetic Instructions
8308 //
8309 
8310 // Integer Addition
8311 
8312 // TODO
8313 // these currently employ operations which do not set CR and hence are
8314 // not flagged as killing CR but we would like to isolate the cases
8315 // where we want to set flags from those where we don't. need to work
8316 // out how to do that.
8317 
8318 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8319   match(Set dst (AddI src1 src2));
8320 
8321   ins_cost(INSN_COST);
8322   format %{ "addw  $dst, $src1, $src2" %}
8323 
8324   ins_encode %{
8325     __ addw(as_Register($dst$$reg),
8326             as_Register($src1$$reg),
8327             as_Register($src2$$reg));
8328   %}
8329 
8330   ins_pipe(ialu_reg_reg);
8331 %}
8332 
8333 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
8334   match(Set dst (AddI src1 src2));
8335 
8336   ins_cost(INSN_COST);
8337   format %{ "addw $dst, $src1, $src2" %}
8338 
8339   // use opcode to indicate that this is an add not a sub
8340   opcode(0x0);
8341 
8342   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
8343 
8344   ins_pipe(ialu_reg_imm);
8345 %}
8346 
8347 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
8348   match(Set dst (AddI (ConvL2I src1) src2));
8349 
8350   ins_cost(INSN_COST);
8351   format %{ "addw $dst, $src1, $src2" %}
8352 
8353   // use opcode to indicate that this is an add not a sub
8354   opcode(0x0);
8355 
8356   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
8357 
8358   ins_pipe(ialu_reg_imm);
8359 %}
8360 
8361 // Pointer Addition
8362 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
8363   match(Set dst (AddP src1 src2));
8364 
8365   ins_cost(INSN_COST);
8366   format %{ "add $dst, $src1, $src2\t# ptr" %}
8367 
8368   ins_encode %{
8369     __ add(as_Register($dst$$reg),
8370            as_Register($src1$$reg),
8371            as_Register($src2$$reg));
8372   %}
8373 
8374   ins_pipe(ialu_reg_reg);
8375 %}
8376 
8377 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
8378   match(Set dst (AddP src1 (ConvI2L src2)));
8379 
8380   ins_cost(1.9 * INSN_COST);
8381   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
8382 
8383   ins_encode %{
8384     __ add(as_Register($dst$$reg),
8385            as_Register($src1$$reg),
8386            as_Register($src2$$reg), ext::sxtw);
8387   %}
8388 
8389   ins_pipe(ialu_reg_reg);
8390 %}
8391 
8392 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
8393   match(Set dst (AddP src1 (LShiftL src2 scale)));
8394 
8395   ins_cost(1.9 * INSN_COST);
8396   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
8397 
8398   ins_encode %{
8399     __ lea(as_Register($dst$$reg),
8400            Address(as_Register($src1$$reg), as_Register($src2$$reg),
8401                    Address::lsl($scale$$constant)));
8402   %}
8403 
8404   ins_pipe(ialu_reg_reg_shift);
8405 %}
8406 
8407 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
8408   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
8409 
8410   ins_cost(1.9 * INSN_COST);
8411   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
8412 
8413   ins_encode %{
8414     __ lea(as_Register($dst$$reg),
8415            Address(as_Register($src1$$reg), as_Register($src2$$reg),
8416                    Address::sxtw($scale$$constant)));
8417   %}
8418 
8419   ins_pipe(ialu_reg_reg_shift);
8420 %}
8421 
8422 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
8423   match(Set dst (LShiftL (ConvI2L src) scale));
8424 
8425   ins_cost(INSN_COST);
8426   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
8427 
8428   ins_encode %{
8429     __ sbfiz(as_Register($dst$$reg),
8430           as_Register($src$$reg),
8431           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
8432   %}
8433 
8434   ins_pipe(ialu_reg_shift);
8435 %}
8436 
8437 // Pointer Immediate Addition
8438 // n.b. this needs to be more expensive than using an indirect memory
8439 // operand
8440 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
8441   match(Set dst (AddP src1 src2));
8442 
8443   ins_cost(INSN_COST);
8444   format %{ "add $dst, $src1, $src2\t# ptr" %}
8445 
8446   // use opcode to indicate that this is an add not a sub
8447   opcode(0x0);
8448 
8449   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
8450 
8451   ins_pipe(ialu_reg_imm);
8452 %}
8453 
8454 // Long Addition
8455 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
8456 
8457   match(Set dst (AddL src1 src2));
8458 
8459   ins_cost(INSN_COST);
8460   format %{ "add  $dst, $src1, $src2" %}
8461 
8462   ins_encode %{
8463     __ add(as_Register($dst$$reg),
8464            as_Register($src1$$reg),
8465            as_Register($src2$$reg));
8466   %}
8467 
8468   ins_pipe(ialu_reg_reg);
8469 %}
8470 
8471 // No constant pool entries requiredLong Immediate Addition.
8472 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
8473   match(Set dst (AddL src1 src2));
8474 
8475   ins_cost(INSN_COST);
8476   format %{ "add $dst, $src1, $src2" %}
8477 
8478   // use opcode to indicate that this is an add not a sub
8479   opcode(0x0);
8480 
8481   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
8482 
8483   ins_pipe(ialu_reg_imm);
8484 %}
8485 
8486 // Integer Subtraction
8487 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8488   match(Set dst (SubI src1 src2));
8489 
8490   ins_cost(INSN_COST);
8491   format %{ "subw  $dst, $src1, $src2" %}
8492 
8493   ins_encode %{
8494     __ subw(as_Register($dst$$reg),
8495             as_Register($src1$$reg),
8496             as_Register($src2$$reg));
8497   %}
8498 
8499   ins_pipe(ialu_reg_reg);
8500 %}
8501 
8502 // Immediate Subtraction
8503 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
8504   match(Set dst (SubI src1 src2));
8505 
8506   ins_cost(INSN_COST);
8507   format %{ "subw $dst, $src1, $src2" %}
8508 
8509   // use opcode to indicate that this is a sub not an add
8510   opcode(0x1);
8511 
8512   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
8513 
8514   ins_pipe(ialu_reg_imm);
8515 %}
8516 
8517 // Long Subtraction
8518 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
8519 
8520   match(Set dst (SubL src1 src2));
8521 
8522   ins_cost(INSN_COST);
8523   format %{ "sub  $dst, $src1, $src2" %}
8524 
8525   ins_encode %{
8526     __ sub(as_Register($dst$$reg),
8527            as_Register($src1$$reg),
8528            as_Register($src2$$reg));
8529   %}
8530 
8531   ins_pipe(ialu_reg_reg);
8532 %}
8533 
8534 // No constant pool entries requiredLong Immediate Subtraction.
8535 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
8536   match(Set dst (SubL src1 src2));
8537 
8538   ins_cost(INSN_COST);
8539   format %{ "sub$dst, $src1, $src2" %}
8540 
8541   // use opcode to indicate that this is a sub not an add
8542   opcode(0x1);
8543 
8544   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
8545 
8546   ins_pipe(ialu_reg_imm);
8547 %}
8548 
8549 // Integer Negation (special case for sub)
8550 
8551 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
8552   match(Set dst (SubI zero src));
8553 
8554   ins_cost(INSN_COST);
8555   format %{ "negw $dst, $src\t# int" %}
8556 
8557   ins_encode %{
8558     __ negw(as_Register($dst$$reg),
8559             as_Register($src$$reg));
8560   %}
8561 
8562   ins_pipe(ialu_reg);
8563 %}
8564 
8565 // Long Negation
8566 
8567 instruct negL_reg(iRegLNoSp dst, iRegIorL2I src, immL0 zero, rFlagsReg cr) %{
8568   match(Set dst (SubL zero src));
8569 
8570   ins_cost(INSN_COST);
8571   format %{ "neg $dst, $src\t# long" %}
8572 
8573   ins_encode %{
8574     __ neg(as_Register($dst$$reg),
8575            as_Register($src$$reg));
8576   %}
8577 
8578   ins_pipe(ialu_reg);
8579 %}
8580 
8581 // Integer Multiply
8582 
8583 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8584   match(Set dst (MulI src1 src2));
8585 
8586   ins_cost(INSN_COST * 3);
8587   format %{ "mulw  $dst, $src1, $src2" %}
8588 
8589   ins_encode %{
8590     __ mulw(as_Register($dst$$reg),
8591             as_Register($src1$$reg),
8592             as_Register($src2$$reg));
8593   %}
8594 
8595   ins_pipe(imul_reg_reg);
8596 %}
8597 
8598 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8599   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
8600 
8601   ins_cost(INSN_COST * 3);
8602   format %{ "smull  $dst, $src1, $src2" %}
8603 
8604   ins_encode %{
8605     __ smull(as_Register($dst$$reg),
8606              as_Register($src1$$reg),
8607              as_Register($src2$$reg));
8608   %}
8609 
8610   ins_pipe(imul_reg_reg);
8611 %}
8612 
8613 // Long Multiply
8614 
8615 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
8616   match(Set dst (MulL src1 src2));
8617 
8618   ins_cost(INSN_COST * 5);
8619   format %{ "mul  $dst, $src1, $src2" %}
8620 
8621   ins_encode %{
8622     __ mul(as_Register($dst$$reg),
8623            as_Register($src1$$reg),
8624            as_Register($src2$$reg));
8625   %}
8626 
8627   ins_pipe(lmul_reg_reg);
8628 %}
8629 
8630 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
8631 %{
8632   match(Set dst (MulHiL src1 src2));
8633 
8634   ins_cost(INSN_COST * 7);
8635   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
8636 
8637   ins_encode %{
8638     __ smulh(as_Register($dst$$reg),
8639              as_Register($src1$$reg),
8640              as_Register($src2$$reg));
8641   %}
8642 
8643   ins_pipe(lmul_reg_reg);
8644 %}
8645 
8646 // Combined Integer Multiply & Add/Sub
8647 
8648 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
8649   match(Set dst (AddI src3 (MulI src1 src2)));
8650 
8651   ins_cost(INSN_COST * 3);
8652   format %{ "madd  $dst, $src1, $src2, $src3" %}
8653 
8654   ins_encode %{
8655     __ maddw(as_Register($dst$$reg),
8656              as_Register($src1$$reg),
8657              as_Register($src2$$reg),
8658              as_Register($src3$$reg));
8659   %}
8660 
8661   ins_pipe(imac_reg_reg);
8662 %}
8663 
8664 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
8665   match(Set dst (SubI src3 (MulI src1 src2)));
8666 
8667   ins_cost(INSN_COST * 3);
8668   format %{ "msub  $dst, $src1, $src2, $src3" %}
8669 
8670   ins_encode %{
8671     __ msubw(as_Register($dst$$reg),
8672              as_Register($src1$$reg),
8673              as_Register($src2$$reg),
8674              as_Register($src3$$reg));
8675   %}
8676 
8677   ins_pipe(imac_reg_reg);
8678 %}
8679 
8680 // Combined Long Multiply & Add/Sub
8681 
8682 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
8683   match(Set dst (AddL src3 (MulL src1 src2)));
8684 
8685   ins_cost(INSN_COST * 5);
8686   format %{ "madd  $dst, $src1, $src2, $src3" %}
8687 
8688   ins_encode %{
8689     __ madd(as_Register($dst$$reg),
8690             as_Register($src1$$reg),
8691             as_Register($src2$$reg),
8692             as_Register($src3$$reg));
8693   %}
8694 
8695   ins_pipe(lmac_reg_reg);
8696 %}
8697 
8698 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
8699   match(Set dst (SubL src3 (MulL src1 src2)));
8700 
8701   ins_cost(INSN_COST * 5);
8702   format %{ "msub  $dst, $src1, $src2, $src3" %}
8703 
8704   ins_encode %{
8705     __ msub(as_Register($dst$$reg),
8706             as_Register($src1$$reg),
8707             as_Register($src2$$reg),
8708             as_Register($src3$$reg));
8709   %}
8710 
8711   ins_pipe(lmac_reg_reg);
8712 %}
8713 
8714 // Integer Divide
8715 
8716 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8717   match(Set dst (DivI src1 src2));
8718 
8719   ins_cost(INSN_COST * 19);
8720   format %{ "sdivw  $dst, $src1, $src2" %}
8721 
8722   ins_encode(aarch64_enc_divw(dst, src1, src2));
8723   ins_pipe(idiv_reg_reg);
8724 %}
8725 
8726 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
8727   match(Set dst (URShiftI (RShiftI src1 div1) div2));
8728   ins_cost(INSN_COST);
8729   format %{ "lsrw $dst, $src1, $div1" %}
8730   ins_encode %{
8731     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
8732   %}
8733   ins_pipe(ialu_reg_shift);
8734 %}
8735 
8736 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
8737   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
8738   ins_cost(INSN_COST);
8739   format %{ "addw $dst, $src, LSR $div1" %}
8740 
8741   ins_encode %{
8742     __ addw(as_Register($dst$$reg),
8743               as_Register($src$$reg),
8744               as_Register($src$$reg),
8745               Assembler::LSR, 31);
8746   %}
8747   ins_pipe(ialu_reg);
8748 %}
8749 
8750 // Long Divide
8751 
8752 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
8753   match(Set dst (DivL src1 src2));
8754 
8755   ins_cost(INSN_COST * 35);
8756   format %{ "sdiv   $dst, $src1, $src2" %}
8757 
8758   ins_encode(aarch64_enc_div(dst, src1, src2));
8759   ins_pipe(ldiv_reg_reg);
8760 %}
8761 
8762 instruct signExtractL(iRegLNoSp dst, iRegL src1, immL_63 div1, immL_63 div2) %{
8763   match(Set dst (URShiftL (RShiftL src1 div1) div2));
8764   ins_cost(INSN_COST);
8765   format %{ "lsr $dst, $src1, $div1" %}
8766   ins_encode %{
8767     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
8768   %}
8769   ins_pipe(ialu_reg_shift);
8770 %}
8771 
8772 instruct div2RoundL(iRegLNoSp dst, iRegL src, immL_63 div1, immL_63 div2) %{
8773   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
8774   ins_cost(INSN_COST);
8775   format %{ "add $dst, $src, $div1" %}
8776 
8777   ins_encode %{
8778     __ add(as_Register($dst$$reg),
8779               as_Register($src$$reg),
8780               as_Register($src$$reg),
8781               Assembler::LSR, 63);
8782   %}
8783   ins_pipe(ialu_reg);
8784 %}
8785 
8786 // Integer Remainder
8787 
8788 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8789   match(Set dst (ModI src1 src2));
8790 
8791   ins_cost(INSN_COST * 22);
8792   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
8793             "msubw($dst, rscratch1, $src2, $src1" %}
8794 
8795   ins_encode(aarch64_enc_modw(dst, src1, src2));
8796   ins_pipe(idiv_reg_reg);
8797 %}
8798 
8799 // Long Remainder
8800 
8801 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
8802   match(Set dst (ModL src1 src2));
8803 
8804   ins_cost(INSN_COST * 38);
8805   format %{ "sdiv   rscratch1, $src1, $src2\n"
8806             "msub($dst, rscratch1, $src2, $src1" %}
8807 
8808   ins_encode(aarch64_enc_mod(dst, src1, src2));
8809   ins_pipe(ldiv_reg_reg);
8810 %}
8811 
8812 // Integer Shifts
8813 
8814 // Shift Left Register
8815 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8816   match(Set dst (LShiftI src1 src2));
8817 
8818   ins_cost(INSN_COST * 2);
8819   format %{ "lslvw  $dst, $src1, $src2" %}
8820 
8821   ins_encode %{
8822     __ lslvw(as_Register($dst$$reg),
8823              as_Register($src1$$reg),
8824              as_Register($src2$$reg));
8825   %}
8826 
8827   ins_pipe(ialu_reg_reg_vshift);
8828 %}
8829 
8830 // Shift Left Immediate
8831 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
8832   match(Set dst (LShiftI src1 src2));
8833 
8834   ins_cost(INSN_COST);
8835   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
8836 
8837   ins_encode %{
8838     __ lslw(as_Register($dst$$reg),
8839             as_Register($src1$$reg),
8840             $src2$$constant & 0x1f);
8841   %}
8842 
8843   ins_pipe(ialu_reg_shift);
8844 %}
8845 
8846 // Shift Right Logical Register
8847 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8848   match(Set dst (URShiftI src1 src2));
8849 
8850   ins_cost(INSN_COST * 2);
8851   format %{ "lsrvw  $dst, $src1, $src2" %}
8852 
8853   ins_encode %{
8854     __ lsrvw(as_Register($dst$$reg),
8855              as_Register($src1$$reg),
8856              as_Register($src2$$reg));
8857   %}
8858 
8859   ins_pipe(ialu_reg_reg_vshift);
8860 %}
8861 
8862 // Shift Right Logical Immediate
8863 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
8864   match(Set dst (URShiftI src1 src2));
8865 
8866   ins_cost(INSN_COST);
8867   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
8868 
8869   ins_encode %{
8870     __ lsrw(as_Register($dst$$reg),
8871             as_Register($src1$$reg),
8872             $src2$$constant & 0x1f);
8873   %}
8874 
8875   ins_pipe(ialu_reg_shift);
8876 %}
8877 
8878 // Shift Right Arithmetic Register
8879 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8880   match(Set dst (RShiftI src1 src2));
8881 
8882   ins_cost(INSN_COST * 2);
8883   format %{ "asrvw  $dst, $src1, $src2" %}
8884 
8885   ins_encode %{
8886     __ asrvw(as_Register($dst$$reg),
8887              as_Register($src1$$reg),
8888              as_Register($src2$$reg));
8889   %}
8890 
8891   ins_pipe(ialu_reg_reg_vshift);
8892 %}
8893 
8894 // Shift Right Arithmetic Immediate
8895 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
8896   match(Set dst (RShiftI src1 src2));
8897 
8898   ins_cost(INSN_COST);
8899   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
8900 
8901   ins_encode %{
8902     __ asrw(as_Register($dst$$reg),
8903             as_Register($src1$$reg),
8904             $src2$$constant & 0x1f);
8905   %}
8906 
8907   ins_pipe(ialu_reg_shift);
8908 %}
8909 
8910 // Combined Int Mask and Right Shift (using UBFM)
8911 // TODO
8912 
8913 // Long Shifts
8914 
8915 // Shift Left Register
8916 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
8917   match(Set dst (LShiftL src1 src2));
8918 
8919   ins_cost(INSN_COST * 2);
8920   format %{ "lslv  $dst, $src1, $src2" %}
8921 
8922   ins_encode %{
8923     __ lslv(as_Register($dst$$reg),
8924             as_Register($src1$$reg),
8925             as_Register($src2$$reg));
8926   %}
8927 
8928   ins_pipe(ialu_reg_reg_vshift);
8929 %}
8930 
8931 // Shift Left Immediate
8932 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
8933   match(Set dst (LShiftL src1 src2));
8934 
8935   ins_cost(INSN_COST);
8936   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
8937 
8938   ins_encode %{
8939     __ lsl(as_Register($dst$$reg),
8940             as_Register($src1$$reg),
8941             $src2$$constant & 0x3f);
8942   %}
8943 
8944   ins_pipe(ialu_reg_shift);
8945 %}
8946 
8947 // Shift Right Logical Register
8948 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
8949   match(Set dst (URShiftL src1 src2));
8950 
8951   ins_cost(INSN_COST * 2);
8952   format %{ "lsrv  $dst, $src1, $src2" %}
8953 
8954   ins_encode %{
8955     __ lsrv(as_Register($dst$$reg),
8956             as_Register($src1$$reg),
8957             as_Register($src2$$reg));
8958   %}
8959 
8960   ins_pipe(ialu_reg_reg_vshift);
8961 %}
8962 
8963 // Shift Right Logical Immediate
8964 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
8965   match(Set dst (URShiftL src1 src2));
8966 
8967   ins_cost(INSN_COST);
8968   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
8969 
8970   ins_encode %{
8971     __ lsr(as_Register($dst$$reg),
8972            as_Register($src1$$reg),
8973            $src2$$constant & 0x3f);
8974   %}
8975 
8976   ins_pipe(ialu_reg_shift);
8977 %}
8978 
8979 // A special-case pattern for card table stores.
8980 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
8981   match(Set dst (URShiftL (CastP2X src1) src2));
8982 
8983   ins_cost(INSN_COST);
8984   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
8985 
8986   ins_encode %{
8987     __ lsr(as_Register($dst$$reg),
8988            as_Register($src1$$reg),
8989            $src2$$constant & 0x3f);
8990   %}
8991 
8992   ins_pipe(ialu_reg_shift);
8993 %}
8994 
8995 // Shift Right Arithmetic Register
8996 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
8997   match(Set dst (RShiftL src1 src2));
8998 
8999   ins_cost(INSN_COST * 2);
9000   format %{ "asrv  $dst, $src1, $src2" %}
9001 
9002   ins_encode %{
9003     __ asrv(as_Register($dst$$reg),
9004             as_Register($src1$$reg),
9005             as_Register($src2$$reg));
9006   %}
9007 
9008   ins_pipe(ialu_reg_reg_vshift);
9009 %}
9010 
9011 // Shift Right Arithmetic Immediate
9012 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
9013   match(Set dst (RShiftL src1 src2));
9014 
9015   ins_cost(INSN_COST);
9016   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
9017 
9018   ins_encode %{
9019     __ asr(as_Register($dst$$reg),
9020            as_Register($src1$$reg),
9021            $src2$$constant & 0x3f);
9022   %}
9023 
9024   ins_pipe(ialu_reg_shift);
9025 %}
9026 
9027 // BEGIN This section of the file is automatically generated. Do not edit --------------
9028 
9029 instruct regL_not_reg(iRegLNoSp dst,
9030                          iRegL src1, immL_M1 m1,
9031                          rFlagsReg cr) %{
9032   match(Set dst (XorL src1 m1));
9033   ins_cost(INSN_COST);
9034   format %{ "eon  $dst, $src1, zr" %}
9035 
9036   ins_encode %{
9037     __ eon(as_Register($dst$$reg),
9038               as_Register($src1$$reg),
9039               zr,
9040               Assembler::LSL, 0);
9041   %}
9042 
9043   ins_pipe(ialu_reg);
9044 %}
9045 instruct regI_not_reg(iRegINoSp dst,
9046                          iRegIorL2I src1, immI_M1 m1,
9047                          rFlagsReg cr) %{
9048   match(Set dst (XorI src1 m1));
9049   ins_cost(INSN_COST);
9050   format %{ "eonw  $dst, $src1, zr" %}
9051 
9052   ins_encode %{
9053     __ eonw(as_Register($dst$$reg),
9054               as_Register($src1$$reg),
9055               zr,
9056               Assembler::LSL, 0);
9057   %}
9058 
9059   ins_pipe(ialu_reg);
9060 %}
9061 
9062 instruct AndI_reg_not_reg(iRegINoSp dst,
9063                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
9064                          rFlagsReg cr) %{
9065   match(Set dst (AndI src1 (XorI src2 m1)));
9066   ins_cost(INSN_COST);
9067   format %{ "bicw  $dst, $src1, $src2" %}
9068 
9069   ins_encode %{
9070     __ bicw(as_Register($dst$$reg),
9071               as_Register($src1$$reg),
9072               as_Register($src2$$reg),
9073               Assembler::LSL, 0);
9074   %}
9075 
9076   ins_pipe(ialu_reg_reg);
9077 %}
9078 
9079 instruct AndL_reg_not_reg(iRegLNoSp dst,
9080                          iRegL src1, iRegL src2, immL_M1 m1,
9081                          rFlagsReg cr) %{
9082   match(Set dst (AndL src1 (XorL src2 m1)));
9083   ins_cost(INSN_COST);
9084   format %{ "bic  $dst, $src1, $src2" %}
9085 
9086   ins_encode %{
9087     __ bic(as_Register($dst$$reg),
9088               as_Register($src1$$reg),
9089               as_Register($src2$$reg),
9090               Assembler::LSL, 0);
9091   %}
9092 
9093   ins_pipe(ialu_reg_reg);
9094 %}
9095 
9096 instruct OrI_reg_not_reg(iRegINoSp dst,
9097                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
9098                          rFlagsReg cr) %{
9099   match(Set dst (OrI src1 (XorI src2 m1)));
9100   ins_cost(INSN_COST);
9101   format %{ "ornw  $dst, $src1, $src2" %}
9102 
9103   ins_encode %{
9104     __ ornw(as_Register($dst$$reg),
9105               as_Register($src1$$reg),
9106               as_Register($src2$$reg),
9107               Assembler::LSL, 0);
9108   %}
9109 
9110   ins_pipe(ialu_reg_reg);
9111 %}
9112 
9113 instruct OrL_reg_not_reg(iRegLNoSp dst,
9114                          iRegL src1, iRegL src2, immL_M1 m1,
9115                          rFlagsReg cr) %{
9116   match(Set dst (OrL src1 (XorL src2 m1)));
9117   ins_cost(INSN_COST);
9118   format %{ "orn  $dst, $src1, $src2" %}
9119 
9120   ins_encode %{
9121     __ orn(as_Register($dst$$reg),
9122               as_Register($src1$$reg),
9123               as_Register($src2$$reg),
9124               Assembler::LSL, 0);
9125   %}
9126 
9127   ins_pipe(ialu_reg_reg);
9128 %}
9129 
9130 instruct XorI_reg_not_reg(iRegINoSp dst,
9131                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
9132                          rFlagsReg cr) %{
9133   match(Set dst (XorI m1 (XorI src2 src1)));
9134   ins_cost(INSN_COST);
9135   format %{ "eonw  $dst, $src1, $src2" %}
9136 
9137   ins_encode %{
9138     __ eonw(as_Register($dst$$reg),
9139               as_Register($src1$$reg),
9140               as_Register($src2$$reg),
9141               Assembler::LSL, 0);
9142   %}
9143 
9144   ins_pipe(ialu_reg_reg);
9145 %}
9146 
9147 instruct XorL_reg_not_reg(iRegLNoSp dst,
9148                          iRegL src1, iRegL src2, immL_M1 m1,
9149                          rFlagsReg cr) %{
9150   match(Set dst (XorL m1 (XorL src2 src1)));
9151   ins_cost(INSN_COST);
9152   format %{ "eon  $dst, $src1, $src2" %}
9153 
9154   ins_encode %{
9155     __ eon(as_Register($dst$$reg),
9156               as_Register($src1$$reg),
9157               as_Register($src2$$reg),
9158               Assembler::LSL, 0);
9159   %}
9160 
9161   ins_pipe(ialu_reg_reg);
9162 %}
9163 
9164 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
9165                          iRegIorL2I src1, iRegIorL2I src2,
9166                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9167   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
9168   ins_cost(1.9 * INSN_COST);
9169   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
9170 
9171   ins_encode %{
9172     __ bicw(as_Register($dst$$reg),
9173               as_Register($src1$$reg),
9174               as_Register($src2$$reg),
9175               Assembler::LSR,
9176               $src3$$constant & 0x3f);
9177   %}
9178 
9179   ins_pipe(ialu_reg_reg_shift);
9180 %}
9181 
9182 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
9183                          iRegL src1, iRegL src2,
9184                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9185   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
9186   ins_cost(1.9 * INSN_COST);
9187   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
9188 
9189   ins_encode %{
9190     __ bic(as_Register($dst$$reg),
9191               as_Register($src1$$reg),
9192               as_Register($src2$$reg),
9193               Assembler::LSR,
9194               $src3$$constant & 0x3f);
9195   %}
9196 
9197   ins_pipe(ialu_reg_reg_shift);
9198 %}
9199 
9200 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
9201                          iRegIorL2I src1, iRegIorL2I src2,
9202                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9203   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
9204   ins_cost(1.9 * INSN_COST);
9205   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
9206 
9207   ins_encode %{
9208     __ bicw(as_Register($dst$$reg),
9209               as_Register($src1$$reg),
9210               as_Register($src2$$reg),
9211               Assembler::ASR,
9212               $src3$$constant & 0x3f);
9213   %}
9214 
9215   ins_pipe(ialu_reg_reg_shift);
9216 %}
9217 
9218 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
9219                          iRegL src1, iRegL src2,
9220                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9221   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
9222   ins_cost(1.9 * INSN_COST);
9223   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
9224 
9225   ins_encode %{
9226     __ bic(as_Register($dst$$reg),
9227               as_Register($src1$$reg),
9228               as_Register($src2$$reg),
9229               Assembler::ASR,
9230               $src3$$constant & 0x3f);
9231   %}
9232 
9233   ins_pipe(ialu_reg_reg_shift);
9234 %}
9235 
9236 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
9237                          iRegIorL2I src1, iRegIorL2I src2,
9238                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9239   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
9240   ins_cost(1.9 * INSN_COST);
9241   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
9242 
9243   ins_encode %{
9244     __ bicw(as_Register($dst$$reg),
9245               as_Register($src1$$reg),
9246               as_Register($src2$$reg),
9247               Assembler::LSL,
9248               $src3$$constant & 0x3f);
9249   %}
9250 
9251   ins_pipe(ialu_reg_reg_shift);
9252 %}
9253 
9254 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
9255                          iRegL src1, iRegL src2,
9256                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9257   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
9258   ins_cost(1.9 * INSN_COST);
9259   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
9260 
9261   ins_encode %{
9262     __ bic(as_Register($dst$$reg),
9263               as_Register($src1$$reg),
9264               as_Register($src2$$reg),
9265               Assembler::LSL,
9266               $src3$$constant & 0x3f);
9267   %}
9268 
9269   ins_pipe(ialu_reg_reg_shift);
9270 %}
9271 
9272 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
9273                          iRegIorL2I src1, iRegIorL2I src2,
9274                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9275   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
9276   ins_cost(1.9 * INSN_COST);
9277   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
9278 
9279   ins_encode %{
9280     __ eonw(as_Register($dst$$reg),
9281               as_Register($src1$$reg),
9282               as_Register($src2$$reg),
9283               Assembler::LSR,
9284               $src3$$constant & 0x3f);
9285   %}
9286 
9287   ins_pipe(ialu_reg_reg_shift);
9288 %}
9289 
9290 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
9291                          iRegL src1, iRegL src2,
9292                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9293   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
9294   ins_cost(1.9 * INSN_COST);
9295   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
9296 
9297   ins_encode %{
9298     __ eon(as_Register($dst$$reg),
9299               as_Register($src1$$reg),
9300               as_Register($src2$$reg),
9301               Assembler::LSR,
9302               $src3$$constant & 0x3f);
9303   %}
9304 
9305   ins_pipe(ialu_reg_reg_shift);
9306 %}
9307 
9308 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
9309                          iRegIorL2I src1, iRegIorL2I src2,
9310                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9311   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
9312   ins_cost(1.9 * INSN_COST);
9313   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
9314 
9315   ins_encode %{
9316     __ eonw(as_Register($dst$$reg),
9317               as_Register($src1$$reg),
9318               as_Register($src2$$reg),
9319               Assembler::ASR,
9320               $src3$$constant & 0x3f);
9321   %}
9322 
9323   ins_pipe(ialu_reg_reg_shift);
9324 %}
9325 
9326 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
9327                          iRegL src1, iRegL src2,
9328                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9329   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
9330   ins_cost(1.9 * INSN_COST);
9331   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
9332 
9333   ins_encode %{
9334     __ eon(as_Register($dst$$reg),
9335               as_Register($src1$$reg),
9336               as_Register($src2$$reg),
9337               Assembler::ASR,
9338               $src3$$constant & 0x3f);
9339   %}
9340 
9341   ins_pipe(ialu_reg_reg_shift);
9342 %}
9343 
9344 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
9345                          iRegIorL2I src1, iRegIorL2I src2,
9346                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9347   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
9348   ins_cost(1.9 * INSN_COST);
9349   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
9350 
9351   ins_encode %{
9352     __ eonw(as_Register($dst$$reg),
9353               as_Register($src1$$reg),
9354               as_Register($src2$$reg),
9355               Assembler::LSL,
9356               $src3$$constant & 0x3f);
9357   %}
9358 
9359   ins_pipe(ialu_reg_reg_shift);
9360 %}
9361 
9362 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
9363                          iRegL src1, iRegL src2,
9364                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9365   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
9366   ins_cost(1.9 * INSN_COST);
9367   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
9368 
9369   ins_encode %{
9370     __ eon(as_Register($dst$$reg),
9371               as_Register($src1$$reg),
9372               as_Register($src2$$reg),
9373               Assembler::LSL,
9374               $src3$$constant & 0x3f);
9375   %}
9376 
9377   ins_pipe(ialu_reg_reg_shift);
9378 %}
9379 
9380 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
9381                          iRegIorL2I src1, iRegIorL2I src2,
9382                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9383   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
9384   ins_cost(1.9 * INSN_COST);
9385   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
9386 
9387   ins_encode %{
9388     __ ornw(as_Register($dst$$reg),
9389               as_Register($src1$$reg),
9390               as_Register($src2$$reg),
9391               Assembler::LSR,
9392               $src3$$constant & 0x3f);
9393   %}
9394 
9395   ins_pipe(ialu_reg_reg_shift);
9396 %}
9397 
9398 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
9399                          iRegL src1, iRegL src2,
9400                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9401   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
9402   ins_cost(1.9 * INSN_COST);
9403   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
9404 
9405   ins_encode %{
9406     __ orn(as_Register($dst$$reg),
9407               as_Register($src1$$reg),
9408               as_Register($src2$$reg),
9409               Assembler::LSR,
9410               $src3$$constant & 0x3f);
9411   %}
9412 
9413   ins_pipe(ialu_reg_reg_shift);
9414 %}
9415 
9416 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
9417                          iRegIorL2I src1, iRegIorL2I src2,
9418                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9419   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
9420   ins_cost(1.9 * INSN_COST);
9421   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
9422 
9423   ins_encode %{
9424     __ ornw(as_Register($dst$$reg),
9425               as_Register($src1$$reg),
9426               as_Register($src2$$reg),
9427               Assembler::ASR,
9428               $src3$$constant & 0x3f);
9429   %}
9430 
9431   ins_pipe(ialu_reg_reg_shift);
9432 %}
9433 
9434 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
9435                          iRegL src1, iRegL src2,
9436                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9437   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
9438   ins_cost(1.9 * INSN_COST);
9439   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
9440 
9441   ins_encode %{
9442     __ orn(as_Register($dst$$reg),
9443               as_Register($src1$$reg),
9444               as_Register($src2$$reg),
9445               Assembler::ASR,
9446               $src3$$constant & 0x3f);
9447   %}
9448 
9449   ins_pipe(ialu_reg_reg_shift);
9450 %}
9451 
9452 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
9453                          iRegIorL2I src1, iRegIorL2I src2,
9454                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9455   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
9456   ins_cost(1.9 * INSN_COST);
9457   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
9458 
9459   ins_encode %{
9460     __ ornw(as_Register($dst$$reg),
9461               as_Register($src1$$reg),
9462               as_Register($src2$$reg),
9463               Assembler::LSL,
9464               $src3$$constant & 0x3f);
9465   %}
9466 
9467   ins_pipe(ialu_reg_reg_shift);
9468 %}
9469 
9470 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
9471                          iRegL src1, iRegL src2,
9472                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9473   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
9474   ins_cost(1.9 * INSN_COST);
9475   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
9476 
9477   ins_encode %{
9478     __ orn(as_Register($dst$$reg),
9479               as_Register($src1$$reg),
9480               as_Register($src2$$reg),
9481               Assembler::LSL,
9482               $src3$$constant & 0x3f);
9483   %}
9484 
9485   ins_pipe(ialu_reg_reg_shift);
9486 %}
9487 
9488 instruct AndI_reg_URShift_reg(iRegINoSp dst,
9489                          iRegIorL2I src1, iRegIorL2I src2,
9490                          immI src3, rFlagsReg cr) %{
9491   match(Set dst (AndI src1 (URShiftI src2 src3)));
9492 
9493   ins_cost(1.9 * INSN_COST);
9494   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
9495 
9496   ins_encode %{
9497     __ andw(as_Register($dst$$reg),
9498               as_Register($src1$$reg),
9499               as_Register($src2$$reg),
9500               Assembler::LSR,
9501               $src3$$constant & 0x3f);
9502   %}
9503 
9504   ins_pipe(ialu_reg_reg_shift);
9505 %}
9506 
9507 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
9508                          iRegL src1, iRegL src2,
9509                          immI src3, rFlagsReg cr) %{
9510   match(Set dst (AndL src1 (URShiftL src2 src3)));
9511 
9512   ins_cost(1.9 * INSN_COST);
9513   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
9514 
9515   ins_encode %{
9516     __ andr(as_Register($dst$$reg),
9517               as_Register($src1$$reg),
9518               as_Register($src2$$reg),
9519               Assembler::LSR,
9520               $src3$$constant & 0x3f);
9521   %}
9522 
9523   ins_pipe(ialu_reg_reg_shift);
9524 %}
9525 
9526 instruct AndI_reg_RShift_reg(iRegINoSp dst,
9527                          iRegIorL2I src1, iRegIorL2I src2,
9528                          immI src3, rFlagsReg cr) %{
9529   match(Set dst (AndI src1 (RShiftI src2 src3)));
9530 
9531   ins_cost(1.9 * INSN_COST);
9532   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
9533 
9534   ins_encode %{
9535     __ andw(as_Register($dst$$reg),
9536               as_Register($src1$$reg),
9537               as_Register($src2$$reg),
9538               Assembler::ASR,
9539               $src3$$constant & 0x3f);
9540   %}
9541 
9542   ins_pipe(ialu_reg_reg_shift);
9543 %}
9544 
9545 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
9546                          iRegL src1, iRegL src2,
9547                          immI src3, rFlagsReg cr) %{
9548   match(Set dst (AndL src1 (RShiftL src2 src3)));
9549 
9550   ins_cost(1.9 * INSN_COST);
9551   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
9552 
9553   ins_encode %{
9554     __ andr(as_Register($dst$$reg),
9555               as_Register($src1$$reg),
9556               as_Register($src2$$reg),
9557               Assembler::ASR,
9558               $src3$$constant & 0x3f);
9559   %}
9560 
9561   ins_pipe(ialu_reg_reg_shift);
9562 %}
9563 
9564 instruct AndI_reg_LShift_reg(iRegINoSp dst,
9565                          iRegIorL2I src1, iRegIorL2I src2,
9566                          immI src3, rFlagsReg cr) %{
9567   match(Set dst (AndI src1 (LShiftI src2 src3)));
9568 
9569   ins_cost(1.9 * INSN_COST);
9570   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
9571 
9572   ins_encode %{
9573     __ andw(as_Register($dst$$reg),
9574               as_Register($src1$$reg),
9575               as_Register($src2$$reg),
9576               Assembler::LSL,
9577               $src3$$constant & 0x3f);
9578   %}
9579 
9580   ins_pipe(ialu_reg_reg_shift);
9581 %}
9582 
9583 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
9584                          iRegL src1, iRegL src2,
9585                          immI src3, rFlagsReg cr) %{
9586   match(Set dst (AndL src1 (LShiftL src2 src3)));
9587 
9588   ins_cost(1.9 * INSN_COST);
9589   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
9590 
9591   ins_encode %{
9592     __ andr(as_Register($dst$$reg),
9593               as_Register($src1$$reg),
9594               as_Register($src2$$reg),
9595               Assembler::LSL,
9596               $src3$$constant & 0x3f);
9597   %}
9598 
9599   ins_pipe(ialu_reg_reg_shift);
9600 %}
9601 
9602 instruct XorI_reg_URShift_reg(iRegINoSp dst,
9603                          iRegIorL2I src1, iRegIorL2I src2,
9604                          immI src3, rFlagsReg cr) %{
9605   match(Set dst (XorI src1 (URShiftI src2 src3)));
9606 
9607   ins_cost(1.9 * INSN_COST);
9608   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
9609 
9610   ins_encode %{
9611     __ eorw(as_Register($dst$$reg),
9612               as_Register($src1$$reg),
9613               as_Register($src2$$reg),
9614               Assembler::LSR,
9615               $src3$$constant & 0x3f);
9616   %}
9617 
9618   ins_pipe(ialu_reg_reg_shift);
9619 %}
9620 
9621 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
9622                          iRegL src1, iRegL src2,
9623                          immI src3, rFlagsReg cr) %{
9624   match(Set dst (XorL src1 (URShiftL src2 src3)));
9625 
9626   ins_cost(1.9 * INSN_COST);
9627   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
9628 
9629   ins_encode %{
9630     __ eor(as_Register($dst$$reg),
9631               as_Register($src1$$reg),
9632               as_Register($src2$$reg),
9633               Assembler::LSR,
9634               $src3$$constant & 0x3f);
9635   %}
9636 
9637   ins_pipe(ialu_reg_reg_shift);
9638 %}
9639 
9640 instruct XorI_reg_RShift_reg(iRegINoSp dst,
9641                          iRegIorL2I src1, iRegIorL2I src2,
9642                          immI src3, rFlagsReg cr) %{
9643   match(Set dst (XorI src1 (RShiftI src2 src3)));
9644 
9645   ins_cost(1.9 * INSN_COST);
9646   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
9647 
9648   ins_encode %{
9649     __ eorw(as_Register($dst$$reg),
9650               as_Register($src1$$reg),
9651               as_Register($src2$$reg),
9652               Assembler::ASR,
9653               $src3$$constant & 0x3f);
9654   %}
9655 
9656   ins_pipe(ialu_reg_reg_shift);
9657 %}
9658 
9659 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
9660                          iRegL src1, iRegL src2,
9661                          immI src3, rFlagsReg cr) %{
9662   match(Set dst (XorL src1 (RShiftL src2 src3)));
9663 
9664   ins_cost(1.9 * INSN_COST);
9665   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
9666 
9667   ins_encode %{
9668     __ eor(as_Register($dst$$reg),
9669               as_Register($src1$$reg),
9670               as_Register($src2$$reg),
9671               Assembler::ASR,
9672               $src3$$constant & 0x3f);
9673   %}
9674 
9675   ins_pipe(ialu_reg_reg_shift);
9676 %}
9677 
9678 instruct XorI_reg_LShift_reg(iRegINoSp dst,
9679                          iRegIorL2I src1, iRegIorL2I src2,
9680                          immI src3, rFlagsReg cr) %{
9681   match(Set dst (XorI src1 (LShiftI src2 src3)));
9682 
9683   ins_cost(1.9 * INSN_COST);
9684   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
9685 
9686   ins_encode %{
9687     __ eorw(as_Register($dst$$reg),
9688               as_Register($src1$$reg),
9689               as_Register($src2$$reg),
9690               Assembler::LSL,
9691               $src3$$constant & 0x3f);
9692   %}
9693 
9694   ins_pipe(ialu_reg_reg_shift);
9695 %}
9696 
9697 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
9698                          iRegL src1, iRegL src2,
9699                          immI src3, rFlagsReg cr) %{
9700   match(Set dst (XorL src1 (LShiftL src2 src3)));
9701 
9702   ins_cost(1.9 * INSN_COST);
9703   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
9704 
9705   ins_encode %{
9706     __ eor(as_Register($dst$$reg),
9707               as_Register($src1$$reg),
9708               as_Register($src2$$reg),
9709               Assembler::LSL,
9710               $src3$$constant & 0x3f);
9711   %}
9712 
9713   ins_pipe(ialu_reg_reg_shift);
9714 %}
9715 
9716 instruct OrI_reg_URShift_reg(iRegINoSp dst,
9717                          iRegIorL2I src1, iRegIorL2I src2,
9718                          immI src3, rFlagsReg cr) %{
9719   match(Set dst (OrI src1 (URShiftI src2 src3)));
9720 
9721   ins_cost(1.9 * INSN_COST);
9722   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
9723 
9724   ins_encode %{
9725     __ orrw(as_Register($dst$$reg),
9726               as_Register($src1$$reg),
9727               as_Register($src2$$reg),
9728               Assembler::LSR,
9729               $src3$$constant & 0x3f);
9730   %}
9731 
9732   ins_pipe(ialu_reg_reg_shift);
9733 %}
9734 
9735 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
9736                          iRegL src1, iRegL src2,
9737                          immI src3, rFlagsReg cr) %{
9738   match(Set dst (OrL src1 (URShiftL src2 src3)));
9739 
9740   ins_cost(1.9 * INSN_COST);
9741   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
9742 
9743   ins_encode %{
9744     __ orr(as_Register($dst$$reg),
9745               as_Register($src1$$reg),
9746               as_Register($src2$$reg),
9747               Assembler::LSR,
9748               $src3$$constant & 0x3f);
9749   %}
9750 
9751   ins_pipe(ialu_reg_reg_shift);
9752 %}
9753 
9754 instruct OrI_reg_RShift_reg(iRegINoSp dst,
9755                          iRegIorL2I src1, iRegIorL2I src2,
9756                          immI src3, rFlagsReg cr) %{
9757   match(Set dst (OrI src1 (RShiftI src2 src3)));
9758 
9759   ins_cost(1.9 * INSN_COST);
9760   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
9761 
9762   ins_encode %{
9763     __ orrw(as_Register($dst$$reg),
9764               as_Register($src1$$reg),
9765               as_Register($src2$$reg),
9766               Assembler::ASR,
9767               $src3$$constant & 0x3f);
9768   %}
9769 
9770   ins_pipe(ialu_reg_reg_shift);
9771 %}
9772 
9773 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
9774                          iRegL src1, iRegL src2,
9775                          immI src3, rFlagsReg cr) %{
9776   match(Set dst (OrL src1 (RShiftL src2 src3)));
9777 
9778   ins_cost(1.9 * INSN_COST);
9779   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
9780 
9781   ins_encode %{
9782     __ orr(as_Register($dst$$reg),
9783               as_Register($src1$$reg),
9784               as_Register($src2$$reg),
9785               Assembler::ASR,
9786               $src3$$constant & 0x3f);
9787   %}
9788 
9789   ins_pipe(ialu_reg_reg_shift);
9790 %}
9791 
9792 instruct OrI_reg_LShift_reg(iRegINoSp dst,
9793                          iRegIorL2I src1, iRegIorL2I src2,
9794                          immI src3, rFlagsReg cr) %{
9795   match(Set dst (OrI src1 (LShiftI src2 src3)));
9796 
9797   ins_cost(1.9 * INSN_COST);
9798   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
9799 
9800   ins_encode %{
9801     __ orrw(as_Register($dst$$reg),
9802               as_Register($src1$$reg),
9803               as_Register($src2$$reg),
9804               Assembler::LSL,
9805               $src3$$constant & 0x3f);
9806   %}
9807 
9808   ins_pipe(ialu_reg_reg_shift);
9809 %}
9810 
9811 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
9812                          iRegL src1, iRegL src2,
9813                          immI src3, rFlagsReg cr) %{
9814   match(Set dst (OrL src1 (LShiftL src2 src3)));
9815 
9816   ins_cost(1.9 * INSN_COST);
9817   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
9818 
9819   ins_encode %{
9820     __ orr(as_Register($dst$$reg),
9821               as_Register($src1$$reg),
9822               as_Register($src2$$reg),
9823               Assembler::LSL,
9824               $src3$$constant & 0x3f);
9825   %}
9826 
9827   ins_pipe(ialu_reg_reg_shift);
9828 %}
9829 
9830 instruct AddI_reg_URShift_reg(iRegINoSp dst,
9831                          iRegIorL2I src1, iRegIorL2I src2,
9832                          immI src3, rFlagsReg cr) %{
9833   match(Set dst (AddI src1 (URShiftI src2 src3)));
9834 
9835   ins_cost(1.9 * INSN_COST);
9836   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
9837 
9838   ins_encode %{
9839     __ addw(as_Register($dst$$reg),
9840               as_Register($src1$$reg),
9841               as_Register($src2$$reg),
9842               Assembler::LSR,
9843               $src3$$constant & 0x3f);
9844   %}
9845 
9846   ins_pipe(ialu_reg_reg_shift);
9847 %}
9848 
9849 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
9850                          iRegL src1, iRegL src2,
9851                          immI src3, rFlagsReg cr) %{
9852   match(Set dst (AddL src1 (URShiftL src2 src3)));
9853 
9854   ins_cost(1.9 * INSN_COST);
9855   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
9856 
9857   ins_encode %{
9858     __ add(as_Register($dst$$reg),
9859               as_Register($src1$$reg),
9860               as_Register($src2$$reg),
9861               Assembler::LSR,
9862               $src3$$constant & 0x3f);
9863   %}
9864 
9865   ins_pipe(ialu_reg_reg_shift);
9866 %}
9867 
9868 instruct AddI_reg_RShift_reg(iRegINoSp dst,
9869                          iRegIorL2I src1, iRegIorL2I src2,
9870                          immI src3, rFlagsReg cr) %{
9871   match(Set dst (AddI src1 (RShiftI src2 src3)));
9872 
9873   ins_cost(1.9 * INSN_COST);
9874   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
9875 
9876   ins_encode %{
9877     __ addw(as_Register($dst$$reg),
9878               as_Register($src1$$reg),
9879               as_Register($src2$$reg),
9880               Assembler::ASR,
9881               $src3$$constant & 0x3f);
9882   %}
9883 
9884   ins_pipe(ialu_reg_reg_shift);
9885 %}
9886 
9887 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
9888                          iRegL src1, iRegL src2,
9889                          immI src3, rFlagsReg cr) %{
9890   match(Set dst (AddL src1 (RShiftL src2 src3)));
9891 
9892   ins_cost(1.9 * INSN_COST);
9893   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
9894 
9895   ins_encode %{
9896     __ add(as_Register($dst$$reg),
9897               as_Register($src1$$reg),
9898               as_Register($src2$$reg),
9899               Assembler::ASR,
9900               $src3$$constant & 0x3f);
9901   %}
9902 
9903   ins_pipe(ialu_reg_reg_shift);
9904 %}
9905 
9906 instruct AddI_reg_LShift_reg(iRegINoSp dst,
9907                          iRegIorL2I src1, iRegIorL2I src2,
9908                          immI src3, rFlagsReg cr) %{
9909   match(Set dst (AddI src1 (LShiftI src2 src3)));
9910 
9911   ins_cost(1.9 * INSN_COST);
9912   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
9913 
9914   ins_encode %{
9915     __ addw(as_Register($dst$$reg),
9916               as_Register($src1$$reg),
9917               as_Register($src2$$reg),
9918               Assembler::LSL,
9919               $src3$$constant & 0x3f);
9920   %}
9921 
9922   ins_pipe(ialu_reg_reg_shift);
9923 %}
9924 
9925 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
9926                          iRegL src1, iRegL src2,
9927                          immI src3, rFlagsReg cr) %{
9928   match(Set dst (AddL src1 (LShiftL src2 src3)));
9929 
9930   ins_cost(1.9 * INSN_COST);
9931   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
9932 
9933   ins_encode %{
9934     __ add(as_Register($dst$$reg),
9935               as_Register($src1$$reg),
9936               as_Register($src2$$reg),
9937               Assembler::LSL,
9938               $src3$$constant & 0x3f);
9939   %}
9940 
9941   ins_pipe(ialu_reg_reg_shift);
9942 %}
9943 
9944 instruct SubI_reg_URShift_reg(iRegINoSp dst,
9945                          iRegIorL2I src1, iRegIorL2I src2,
9946                          immI src3, rFlagsReg cr) %{
9947   match(Set dst (SubI src1 (URShiftI src2 src3)));
9948 
9949   ins_cost(1.9 * INSN_COST);
9950   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
9951 
9952   ins_encode %{
9953     __ subw(as_Register($dst$$reg),
9954               as_Register($src1$$reg),
9955               as_Register($src2$$reg),
9956               Assembler::LSR,
9957               $src3$$constant & 0x3f);
9958   %}
9959 
9960   ins_pipe(ialu_reg_reg_shift);
9961 %}
9962 
9963 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
9964                          iRegL src1, iRegL src2,
9965                          immI src3, rFlagsReg cr) %{
9966   match(Set dst (SubL src1 (URShiftL src2 src3)));
9967 
9968   ins_cost(1.9 * INSN_COST);
9969   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
9970 
9971   ins_encode %{
9972     __ sub(as_Register($dst$$reg),
9973               as_Register($src1$$reg),
9974               as_Register($src2$$reg),
9975               Assembler::LSR,
9976               $src3$$constant & 0x3f);
9977   %}
9978 
9979   ins_pipe(ialu_reg_reg_shift);
9980 %}
9981 
9982 instruct SubI_reg_RShift_reg(iRegINoSp dst,
9983                          iRegIorL2I src1, iRegIorL2I src2,
9984                          immI src3, rFlagsReg cr) %{
9985   match(Set dst (SubI src1 (RShiftI src2 src3)));
9986 
9987   ins_cost(1.9 * INSN_COST);
9988   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
9989 
9990   ins_encode %{
9991     __ subw(as_Register($dst$$reg),
9992               as_Register($src1$$reg),
9993               as_Register($src2$$reg),
9994               Assembler::ASR,
9995               $src3$$constant & 0x3f);
9996   %}
9997 
9998   ins_pipe(ialu_reg_reg_shift);
9999 %}
10000 
10001 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
10002                          iRegL src1, iRegL src2,
10003                          immI src3, rFlagsReg cr) %{
10004   match(Set dst (SubL src1 (RShiftL src2 src3)));
10005 
10006   ins_cost(1.9 * INSN_COST);
10007   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
10008 
10009   ins_encode %{
10010     __ sub(as_Register($dst$$reg),
10011               as_Register($src1$$reg),
10012               as_Register($src2$$reg),
10013               Assembler::ASR,
10014               $src3$$constant & 0x3f);
10015   %}
10016 
10017   ins_pipe(ialu_reg_reg_shift);
10018 %}
10019 
10020 instruct SubI_reg_LShift_reg(iRegINoSp dst,
10021                          iRegIorL2I src1, iRegIorL2I src2,
10022                          immI src3, rFlagsReg cr) %{
10023   match(Set dst (SubI src1 (LShiftI src2 src3)));
10024 
10025   ins_cost(1.9 * INSN_COST);
10026   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
10027 
10028   ins_encode %{
10029     __ subw(as_Register($dst$$reg),
10030               as_Register($src1$$reg),
10031               as_Register($src2$$reg),
10032               Assembler::LSL,
10033               $src3$$constant & 0x3f);
10034   %}
10035 
10036   ins_pipe(ialu_reg_reg_shift);
10037 %}
10038 
10039 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
10040                          iRegL src1, iRegL src2,
10041                          immI src3, rFlagsReg cr) %{
10042   match(Set dst (SubL src1 (LShiftL src2 src3)));
10043 
10044   ins_cost(1.9 * INSN_COST);
10045   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
10046 
10047   ins_encode %{
10048     __ sub(as_Register($dst$$reg),
10049               as_Register($src1$$reg),
10050               as_Register($src2$$reg),
10051               Assembler::LSL,
10052               $src3$$constant & 0x3f);
10053   %}
10054 
10055   ins_pipe(ialu_reg_reg_shift);
10056 %}
10057 
10058 
10059 
10060 // Shift Left followed by Shift Right.
10061 // This idiom is used by the compiler for the i2b bytecode etc.
10062 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
10063 %{
10064   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
10065   // Make sure we are not going to exceed what sbfm can do.
10066   predicate((unsigned int)n->in(2)->get_int() <= 63
10067             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
10068 
10069   ins_cost(INSN_COST * 2);
10070   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
10071   ins_encode %{
10072     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
10073     int s = 63 - lshift;
10074     int r = (rshift - lshift) & 63;
10075     __ sbfm(as_Register($dst$$reg),
10076             as_Register($src$$reg),
10077             r, s);
10078   %}
10079 
10080   ins_pipe(ialu_reg_shift);
10081 %}
10082 
10083 // Shift Left followed by Shift Right.
10084 // This idiom is used by the compiler for the i2b bytecode etc.
10085 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
10086 %{
10087   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
10088   // Make sure we are not going to exceed what sbfmw can do.
10089   predicate((unsigned int)n->in(2)->get_int() <= 31
10090             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
10091 
10092   ins_cost(INSN_COST * 2);
10093   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
10094   ins_encode %{
10095     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
10096     int s = 31 - lshift;
10097     int r = (rshift - lshift) & 31;
10098     __ sbfmw(as_Register($dst$$reg),
10099             as_Register($src$$reg),
10100             r, s);
10101   %}
10102 
10103   ins_pipe(ialu_reg_shift);
10104 %}
10105 
10106 // Shift Left followed by Shift Right.
10107 // This idiom is used by the compiler for the i2b bytecode etc.
10108 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
10109 %{
10110   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
10111   // Make sure we are not going to exceed what ubfm can do.
10112   predicate((unsigned int)n->in(2)->get_int() <= 63
10113             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
10114 
10115   ins_cost(INSN_COST * 2);
10116   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
10117   ins_encode %{
10118     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
10119     int s = 63 - lshift;
10120     int r = (rshift - lshift) & 63;
10121     __ ubfm(as_Register($dst$$reg),
10122             as_Register($src$$reg),
10123             r, s);
10124   %}
10125 
10126   ins_pipe(ialu_reg_shift);
10127 %}
10128 
10129 // Shift Left followed by Shift Right.
10130 // This idiom is used by the compiler for the i2b bytecode etc.
10131 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
10132 %{
10133   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
10134   // Make sure we are not going to exceed what ubfmw can do.
10135   predicate((unsigned int)n->in(2)->get_int() <= 31
10136             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
10137 
10138   ins_cost(INSN_COST * 2);
10139   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
10140   ins_encode %{
10141     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
10142     int s = 31 - lshift;
10143     int r = (rshift - lshift) & 31;
10144     __ ubfmw(as_Register($dst$$reg),
10145             as_Register($src$$reg),
10146             r, s);
10147   %}
10148 
10149   ins_pipe(ialu_reg_shift);
10150 %}
10151 // Bitfield extract with shift & mask
10152 
10153 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
10154 %{
10155   match(Set dst (AndI (URShiftI src rshift) mask));
10156 
10157   ins_cost(INSN_COST);
10158   format %{ "ubfxw $dst, $src, $mask" %}
10159   ins_encode %{
10160     int rshift = $rshift$$constant;
10161     long mask = $mask$$constant;
10162     int width = exact_log2(mask+1);
10163     __ ubfxw(as_Register($dst$$reg),
10164             as_Register($src$$reg), rshift, width);
10165   %}
10166   ins_pipe(ialu_reg_shift);
10167 %}
10168 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
10169 %{
10170   match(Set dst (AndL (URShiftL src rshift) mask));
10171 
10172   ins_cost(INSN_COST);
10173   format %{ "ubfx $dst, $src, $mask" %}
10174   ins_encode %{
10175     int rshift = $rshift$$constant;
10176     long mask = $mask$$constant;
10177     int width = exact_log2(mask+1);
10178     __ ubfx(as_Register($dst$$reg),
10179             as_Register($src$$reg), rshift, width);
10180   %}
10181   ins_pipe(ialu_reg_shift);
10182 %}
10183 
10184 // We can use ubfx when extending an And with a mask when we know mask
10185 // is positive.  We know that because immI_bitmask guarantees it.
10186 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
10187 %{
10188   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
10189 
10190   ins_cost(INSN_COST * 2);
10191   format %{ "ubfx $dst, $src, $mask" %}
10192   ins_encode %{
10193     int rshift = $rshift$$constant;
10194     long mask = $mask$$constant;
10195     int width = exact_log2(mask+1);
10196     __ ubfx(as_Register($dst$$reg),
10197             as_Register($src$$reg), rshift, width);
10198   %}
10199   ins_pipe(ialu_reg_shift);
10200 %}
10201 
10202 // Rotations
10203 
10204 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
10205 %{
10206   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
10207   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
10208 
10209   ins_cost(INSN_COST);
10210   format %{ "extr $dst, $src1, $src2, #$rshift" %}
10211 
10212   ins_encode %{
10213     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
10214             $rshift$$constant & 63);
10215   %}
10216   ins_pipe(ialu_reg_reg_extr);
10217 %}
10218 
10219 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
10220 %{
10221   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
10222   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
10223 
10224   ins_cost(INSN_COST);
10225   format %{ "extr $dst, $src1, $src2, #$rshift" %}
10226 
10227   ins_encode %{
10228     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
10229             $rshift$$constant & 31);
10230   %}
10231   ins_pipe(ialu_reg_reg_extr);
10232 %}
10233 
10234 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
10235 %{
10236   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
10237   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
10238 
10239   ins_cost(INSN_COST);
10240   format %{ "extr $dst, $src1, $src2, #$rshift" %}
10241 
10242   ins_encode %{
10243     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
10244             $rshift$$constant & 63);
10245   %}
10246   ins_pipe(ialu_reg_reg_extr);
10247 %}
10248 
10249 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
10250 %{
10251   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
10252   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
10253 
10254   ins_cost(INSN_COST);
10255   format %{ "extr $dst, $src1, $src2, #$rshift" %}
10256 
10257   ins_encode %{
10258     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
10259             $rshift$$constant & 31);
10260   %}
10261   ins_pipe(ialu_reg_reg_extr);
10262 %}
10263 
10264 
10265 // rol expander
10266 
10267 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
10268 %{
10269   effect(DEF dst, USE src, USE shift);
10270 
10271   format %{ "rol    $dst, $src, $shift" %}
10272   ins_cost(INSN_COST * 3);
10273   ins_encode %{
10274     __ subw(rscratch1, zr, as_Register($shift$$reg));
10275     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
10276             rscratch1);
10277     %}
10278   ins_pipe(ialu_reg_reg_vshift);
10279 %}
10280 
10281 // rol expander
10282 
10283 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
10284 %{
10285   effect(DEF dst, USE src, USE shift);
10286 
10287   format %{ "rol    $dst, $src, $shift" %}
10288   ins_cost(INSN_COST * 3);
10289   ins_encode %{
10290     __ subw(rscratch1, zr, as_Register($shift$$reg));
10291     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
10292             rscratch1);
10293     %}
10294   ins_pipe(ialu_reg_reg_vshift);
10295 %}
10296 
10297 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
10298 %{
10299   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
10300 
10301   expand %{
10302     rolL_rReg(dst, src, shift, cr);
10303   %}
10304 %}
10305 
10306 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
10307 %{
10308   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
10309 
10310   expand %{
10311     rolL_rReg(dst, src, shift, cr);
10312   %}
10313 %}
10314 
10315 instruct rolI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
10316 %{
10317   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
10318 
10319   expand %{
10320     rolL_rReg(dst, src, shift, cr);
10321   %}
10322 %}
10323 
10324 instruct rolI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
10325 %{
10326   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
10327 
10328   expand %{
10329     rolL_rReg(dst, src, shift, cr);
10330   %}
10331 %}
10332 
10333 // ror expander
10334 
10335 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
10336 %{
10337   effect(DEF dst, USE src, USE shift);
10338 
10339   format %{ "ror    $dst, $src, $shift" %}
10340   ins_cost(INSN_COST);
10341   ins_encode %{
10342     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
10343             as_Register($shift$$reg));
10344     %}
10345   ins_pipe(ialu_reg_reg_vshift);
10346 %}
10347 
10348 // ror expander
10349 
10350 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
10351 %{
10352   effect(DEF dst, USE src, USE shift);
10353 
10354   format %{ "ror    $dst, $src, $shift" %}
10355   ins_cost(INSN_COST);
10356   ins_encode %{
10357     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
10358             as_Register($shift$$reg));
10359     %}
10360   ins_pipe(ialu_reg_reg_vshift);
10361 %}
10362 
10363 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
10364 %{
10365   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
10366 
10367   expand %{
10368     rorL_rReg(dst, src, shift, cr);
10369   %}
10370 %}
10371 
10372 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
10373 %{
10374   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
10375 
10376   expand %{
10377     rorL_rReg(dst, src, shift, cr);
10378   %}
10379 %}
10380 
10381 instruct rorI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
10382 %{
10383   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
10384 
10385   expand %{
10386     rorL_rReg(dst, src, shift, cr);
10387   %}
10388 %}
10389 
10390 instruct rorI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
10391 %{
10392   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
10393 
10394   expand %{
10395     rorL_rReg(dst, src, shift, cr);
10396   %}
10397 %}
10398 
10399 // Add/subtract (extended)
10400 
10401 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
10402 %{
10403   match(Set dst (AddL src1 (ConvI2L src2)));
10404   ins_cost(INSN_COST);
10405   format %{ "add  $dst, $src1, sxtw $src2" %}
10406 
10407    ins_encode %{
10408      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10409             as_Register($src2$$reg), ext::sxtw);
10410    %}
10411   ins_pipe(ialu_reg_reg);
10412 %};
10413 
10414 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
10415 %{
10416   match(Set dst (SubL src1 (ConvI2L src2)));
10417   ins_cost(INSN_COST);
10418   format %{ "sub  $dst, $src1, sxtw $src2" %}
10419 
10420    ins_encode %{
10421      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
10422             as_Register($src2$$reg), ext::sxtw);
10423    %}
10424   ins_pipe(ialu_reg_reg);
10425 %};
10426 
10427 
10428 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
10429 %{
10430   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
10431   ins_cost(INSN_COST);
10432   format %{ "add  $dst, $src1, sxth $src2" %}
10433 
10434    ins_encode %{
10435      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10436             as_Register($src2$$reg), ext::sxth);
10437    %}
10438   ins_pipe(ialu_reg_reg);
10439 %}
10440 
10441 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
10442 %{
10443   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
10444   ins_cost(INSN_COST);
10445   format %{ "add  $dst, $src1, sxtb $src2" %}
10446 
10447    ins_encode %{
10448      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10449             as_Register($src2$$reg), ext::sxtb);
10450    %}
10451   ins_pipe(ialu_reg_reg);
10452 %}
10453 
10454 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
10455 %{
10456   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
10457   ins_cost(INSN_COST);
10458   format %{ "add  $dst, $src1, uxtb $src2" %}
10459 
10460    ins_encode %{
10461      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10462             as_Register($src2$$reg), ext::uxtb);
10463    %}
10464   ins_pipe(ialu_reg_reg);
10465 %}
10466 
10467 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
10468 %{
10469   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
10470   ins_cost(INSN_COST);
10471   format %{ "add  $dst, $src1, sxth $src2" %}
10472 
10473    ins_encode %{
10474      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10475             as_Register($src2$$reg), ext::sxth);
10476    %}
10477   ins_pipe(ialu_reg_reg);
10478 %}
10479 
10480 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
10481 %{
10482   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
10483   ins_cost(INSN_COST);
10484   format %{ "add  $dst, $src1, sxtw $src2" %}
10485 
10486    ins_encode %{
10487      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10488             as_Register($src2$$reg), ext::sxtw);
10489    %}
10490   ins_pipe(ialu_reg_reg);
10491 %}
10492 
10493 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
10494 %{
10495   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
10496   ins_cost(INSN_COST);
10497   format %{ "add  $dst, $src1, sxtb $src2" %}
10498 
10499    ins_encode %{
10500      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10501             as_Register($src2$$reg), ext::sxtb);
10502    %}
10503   ins_pipe(ialu_reg_reg);
10504 %}
10505 
10506 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
10507 %{
10508   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
10509   ins_cost(INSN_COST);
10510   format %{ "add  $dst, $src1, uxtb $src2" %}
10511 
10512    ins_encode %{
10513      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10514             as_Register($src2$$reg), ext::uxtb);
10515    %}
10516   ins_pipe(ialu_reg_reg);
10517 %}
10518 
10519 
10520 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
10521 %{
10522   match(Set dst (AddI src1 (AndI src2 mask)));
10523   ins_cost(INSN_COST);
10524   format %{ "addw  $dst, $src1, $src2, uxtb" %}
10525 
10526    ins_encode %{
10527      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
10528             as_Register($src2$$reg), ext::uxtb);
10529    %}
10530   ins_pipe(ialu_reg_reg);
10531 %}
10532 
10533 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
10534 %{
10535   match(Set dst (AddI src1 (AndI src2 mask)));
10536   ins_cost(INSN_COST);
10537   format %{ "addw  $dst, $src1, $src2, uxth" %}
10538 
10539    ins_encode %{
10540      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
10541             as_Register($src2$$reg), ext::uxth);
10542    %}
10543   ins_pipe(ialu_reg_reg);
10544 %}
10545 
10546 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
10547 %{
10548   match(Set dst (AddL src1 (AndL src2 mask)));
10549   ins_cost(INSN_COST);
10550   format %{ "add  $dst, $src1, $src2, uxtb" %}
10551 
10552    ins_encode %{
10553      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10554             as_Register($src2$$reg), ext::uxtb);
10555    %}
10556   ins_pipe(ialu_reg_reg);
10557 %}
10558 
10559 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
10560 %{
10561   match(Set dst (AddL src1 (AndL src2 mask)));
10562   ins_cost(INSN_COST);
10563   format %{ "add  $dst, $src1, $src2, uxth" %}
10564 
10565    ins_encode %{
10566      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10567             as_Register($src2$$reg), ext::uxth);
10568    %}
10569   ins_pipe(ialu_reg_reg);
10570 %}
10571 
10572 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
10573 %{
10574   match(Set dst (AddL src1 (AndL src2 mask)));
10575   ins_cost(INSN_COST);
10576   format %{ "add  $dst, $src1, $src2, uxtw" %}
10577 
10578    ins_encode %{
10579      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10580             as_Register($src2$$reg), ext::uxtw);
10581    %}
10582   ins_pipe(ialu_reg_reg);
10583 %}
10584 
10585 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
10586 %{
10587   match(Set dst (SubI src1 (AndI src2 mask)));
10588   ins_cost(INSN_COST);
10589   format %{ "subw  $dst, $src1, $src2, uxtb" %}
10590 
10591    ins_encode %{
10592      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
10593             as_Register($src2$$reg), ext::uxtb);
10594    %}
10595   ins_pipe(ialu_reg_reg);
10596 %}
10597 
10598 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
10599 %{
10600   match(Set dst (SubI src1 (AndI src2 mask)));
10601   ins_cost(INSN_COST);
10602   format %{ "subw  $dst, $src1, $src2, uxth" %}
10603 
10604    ins_encode %{
10605      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
10606             as_Register($src2$$reg), ext::uxth);
10607    %}
10608   ins_pipe(ialu_reg_reg);
10609 %}
10610 
10611 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
10612 %{
10613   match(Set dst (SubL src1 (AndL src2 mask)));
10614   ins_cost(INSN_COST);
10615   format %{ "sub  $dst, $src1, $src2, uxtb" %}
10616 
10617    ins_encode %{
10618      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
10619             as_Register($src2$$reg), ext::uxtb);
10620    %}
10621   ins_pipe(ialu_reg_reg);
10622 %}
10623 
10624 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
10625 %{
10626   match(Set dst (SubL src1 (AndL src2 mask)));
10627   ins_cost(INSN_COST);
10628   format %{ "sub  $dst, $src1, $src2, uxth" %}
10629 
10630    ins_encode %{
10631      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
10632             as_Register($src2$$reg), ext::uxth);
10633    %}
10634   ins_pipe(ialu_reg_reg);
10635 %}
10636 
10637 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
10638 %{
10639   match(Set dst (SubL src1 (AndL src2 mask)));
10640   ins_cost(INSN_COST);
10641   format %{ "sub  $dst, $src1, $src2, uxtw" %}
10642 
10643    ins_encode %{
10644      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
10645             as_Register($src2$$reg), ext::uxtw);
10646    %}
10647   ins_pipe(ialu_reg_reg);
10648 %}
10649 
10650 // END This section of the file is automatically generated. Do not edit --------------
10651 
10652 // ============================================================================
10653 // Floating Point Arithmetic Instructions
10654 
10655 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
10656   match(Set dst (AddF src1 src2));
10657 
10658   ins_cost(INSN_COST * 5);
10659   format %{ "fadds   $dst, $src1, $src2" %}
10660 
10661   ins_encode %{
10662     __ fadds(as_FloatRegister($dst$$reg),
10663              as_FloatRegister($src1$$reg),
10664              as_FloatRegister($src2$$reg));
10665   %}
10666 
10667   ins_pipe(pipe_class_default);
10668 %}
10669 
10670 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
10671   match(Set dst (AddD src1 src2));
10672 
10673   ins_cost(INSN_COST * 5);
10674   format %{ "faddd   $dst, $src1, $src2" %}
10675 
10676   ins_encode %{
10677     __ faddd(as_FloatRegister($dst$$reg),
10678              as_FloatRegister($src1$$reg),
10679              as_FloatRegister($src2$$reg));
10680   %}
10681 
10682   ins_pipe(pipe_class_default);
10683 %}
10684 
10685 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
10686   match(Set dst (SubF src1 src2));
10687 
10688   ins_cost(INSN_COST * 5);
10689   format %{ "fsubs   $dst, $src1, $src2" %}
10690 
10691   ins_encode %{
10692     __ fsubs(as_FloatRegister($dst$$reg),
10693              as_FloatRegister($src1$$reg),
10694              as_FloatRegister($src2$$reg));
10695   %}
10696 
10697   ins_pipe(pipe_class_default);
10698 %}
10699 
10700 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
10701   match(Set dst (SubD src1 src2));
10702 
10703   ins_cost(INSN_COST * 5);
10704   format %{ "fsubd   $dst, $src1, $src2" %}
10705 
10706   ins_encode %{
10707     __ fsubd(as_FloatRegister($dst$$reg),
10708              as_FloatRegister($src1$$reg),
10709              as_FloatRegister($src2$$reg));
10710   %}
10711 
10712   ins_pipe(pipe_class_default);
10713 %}
10714 
10715 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
10716   match(Set dst (MulF src1 src2));
10717 
10718   ins_cost(INSN_COST * 6);
10719   format %{ "fmuls   $dst, $src1, $src2" %}
10720 
10721   ins_encode %{
10722     __ fmuls(as_FloatRegister($dst$$reg),
10723              as_FloatRegister($src1$$reg),
10724              as_FloatRegister($src2$$reg));
10725   %}
10726 
10727   ins_pipe(pipe_class_default);
10728 %}
10729 
10730 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
10731   match(Set dst (MulD src1 src2));
10732 
10733   ins_cost(INSN_COST * 6);
10734   format %{ "fmuld   $dst, $src1, $src2" %}
10735 
10736   ins_encode %{
10737     __ fmuld(as_FloatRegister($dst$$reg),
10738              as_FloatRegister($src1$$reg),
10739              as_FloatRegister($src2$$reg));
10740   %}
10741 
10742   ins_pipe(pipe_class_default);
10743 %}
10744 
10745 // We cannot use these fused mul w add/sub ops because they don't
10746 // produce the same result as the equivalent separated ops
10747 // (essentially they don't round the intermediate result). that's a
10748 // shame. leaving them here in case we can idenitfy cases where it is
10749 // legitimate to use them
10750 
10751 
10752 // instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
10753 //   match(Set dst (AddF (MulF src1 src2) src3));
10754 
10755 //   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
10756 
10757 //   ins_encode %{
10758 //     __ fmadds(as_FloatRegister($dst$$reg),
10759 //              as_FloatRegister($src1$$reg),
10760 //              as_FloatRegister($src2$$reg),
10761 //              as_FloatRegister($src3$$reg));
10762 //   %}
10763 
10764 //   ins_pipe(pipe_class_default);
10765 // %}
10766 
10767 // instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
10768 //   match(Set dst (AddD (MulD src1 src2) src3));
10769 
10770 //   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
10771 
10772 //   ins_encode %{
10773 //     __ fmaddd(as_FloatRegister($dst$$reg),
10774 //              as_FloatRegister($src1$$reg),
10775 //              as_FloatRegister($src2$$reg),
10776 //              as_FloatRegister($src3$$reg));
10777 //   %}
10778 
10779 //   ins_pipe(pipe_class_default);
10780 // %}
10781 
10782 // instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
10783 //   match(Set dst (AddF (MulF (NegF src1) src2) src3));
10784 //   match(Set dst (AddF (NegF (MulF src1 src2)) src3));
10785 
10786 //   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
10787 
10788 //   ins_encode %{
10789 //     __ fmsubs(as_FloatRegister($dst$$reg),
10790 //               as_FloatRegister($src1$$reg),
10791 //               as_FloatRegister($src2$$reg),
10792 //              as_FloatRegister($src3$$reg));
10793 //   %}
10794 
10795 //   ins_pipe(pipe_class_default);
10796 // %}
10797 
10798 // instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
10799 //   match(Set dst (AddD (MulD (NegD src1) src2) src3));
10800 //   match(Set dst (AddD (NegD (MulD src1 src2)) src3));
10801 
10802 //   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
10803 
10804 //   ins_encode %{
10805 //     __ fmsubd(as_FloatRegister($dst$$reg),
10806 //               as_FloatRegister($src1$$reg),
10807 //               as_FloatRegister($src2$$reg),
10808 //               as_FloatRegister($src3$$reg));
10809 //   %}
10810 
10811 //   ins_pipe(pipe_class_default);
10812 // %}
10813 
10814 // instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
10815 //   match(Set dst (SubF (MulF (NegF src1) src2) src3));
10816 //   match(Set dst (SubF (NegF (MulF src1 src2)) src3));
10817 
10818 //   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
10819 
10820 //   ins_encode %{
10821 //     __ fnmadds(as_FloatRegister($dst$$reg),
10822 //                as_FloatRegister($src1$$reg),
10823 //                as_FloatRegister($src2$$reg),
10824 //                as_FloatRegister($src3$$reg));
10825 //   %}
10826 
10827 //   ins_pipe(pipe_class_default);
10828 // %}
10829 
10830 // instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
10831 //   match(Set dst (SubD (MulD (NegD src1) src2) src3));
10832 //   match(Set dst (SubD (NegD (MulD src1 src2)) src3));
10833 
10834 //   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
10835 
10836 //   ins_encode %{
10837 //     __ fnmaddd(as_FloatRegister($dst$$reg),
10838 //                as_FloatRegister($src1$$reg),
10839 //                as_FloatRegister($src2$$reg),
10840 //                as_FloatRegister($src3$$reg));
10841 //   %}
10842 
10843 //   ins_pipe(pipe_class_default);
10844 // %}
10845 
10846 // instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
10847 //   match(Set dst (SubF (MulF src1 src2) src3));
10848 
10849 //   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
10850 
10851 //   ins_encode %{
10852 //     __ fnmsubs(as_FloatRegister($dst$$reg),
10853 //                as_FloatRegister($src1$$reg),
10854 //                as_FloatRegister($src2$$reg),
10855 //                as_FloatRegister($src3$$reg));
10856 //   %}
10857 
10858 //   ins_pipe(pipe_class_default);
10859 // %}
10860 
10861 // instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
10862 //   match(Set dst (SubD (MulD src1 src2) src3));
10863 
10864 //   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
10865 
10866 //   ins_encode %{
10867 //   // n.b. insn name should be fnmsubd
10868 //     __ fnmsub(as_FloatRegister($dst$$reg),
10869 //                as_FloatRegister($src1$$reg),
10870 //                as_FloatRegister($src2$$reg),
10871 //                as_FloatRegister($src3$$reg));
10872 //   %}
10873 
10874 //   ins_pipe(pipe_class_default);
10875 // %}
10876 
10877 
10878 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
10879   match(Set dst (DivF src1  src2));
10880 
10881   ins_cost(INSN_COST * 18);
10882   format %{ "fdivs   $dst, $src1, $src2" %}
10883 
10884   ins_encode %{
10885     __ fdivs(as_FloatRegister($dst$$reg),
10886              as_FloatRegister($src1$$reg),
10887              as_FloatRegister($src2$$reg));
10888   %}
10889 
10890   ins_pipe(pipe_class_default);
10891 %}
10892 
10893 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
10894   match(Set dst (DivD src1  src2));
10895 
10896   ins_cost(INSN_COST * 32);
10897   format %{ "fdivd   $dst, $src1, $src2" %}
10898 
10899   ins_encode %{
10900     __ fdivd(as_FloatRegister($dst$$reg),
10901              as_FloatRegister($src1$$reg),
10902              as_FloatRegister($src2$$reg));
10903   %}
10904 
10905   ins_pipe(pipe_class_default);
10906 %}
10907 
10908 instruct negF_reg_reg(vRegF dst, vRegF src) %{
10909   match(Set dst (NegF src));
10910 
10911   ins_cost(INSN_COST * 3);
10912   format %{ "fneg   $dst, $src" %}
10913 
10914   ins_encode %{
10915     __ fnegs(as_FloatRegister($dst$$reg),
10916              as_FloatRegister($src$$reg));
10917   %}
10918 
10919   ins_pipe(pipe_class_default);
10920 %}
10921 
10922 instruct negD_reg_reg(vRegD dst, vRegD src) %{
10923   match(Set dst (NegD src));
10924 
10925   ins_cost(INSN_COST * 3);
10926   format %{ "fnegd   $dst, $src" %}
10927 
10928   ins_encode %{
10929     __ fnegd(as_FloatRegister($dst$$reg),
10930              as_FloatRegister($src$$reg));
10931   %}
10932 
10933   ins_pipe(pipe_class_default);
10934 %}
10935 
10936 instruct absF_reg(vRegF dst, vRegF src) %{
10937   match(Set dst (AbsF src));
10938 
10939   ins_cost(INSN_COST * 3);
10940   format %{ "fabss   $dst, $src" %}
10941   ins_encode %{
10942     __ fabss(as_FloatRegister($dst$$reg),
10943              as_FloatRegister($src$$reg));
10944   %}
10945 
10946   ins_pipe(pipe_class_default);
10947 %}
10948 
10949 instruct absD_reg(vRegD dst, vRegD src) %{
10950   match(Set dst (AbsD src));
10951 
10952   ins_cost(INSN_COST * 3);
10953   format %{ "fabsd   $dst, $src" %}
10954   ins_encode %{
10955     __ fabsd(as_FloatRegister($dst$$reg),
10956              as_FloatRegister($src$$reg));
10957   %}
10958 
10959   ins_pipe(pipe_class_default);
10960 %}
10961 
10962 instruct sqrtD_reg(vRegD dst, vRegD src) %{
10963   match(Set dst (SqrtD src));
10964 
10965   ins_cost(INSN_COST * 50);
10966   format %{ "fsqrtd  $dst, $src" %}
10967   ins_encode %{
10968     __ fsqrtd(as_FloatRegister($dst$$reg),
10969              as_FloatRegister($src$$reg));
10970   %}
10971 
10972   ins_pipe(pipe_class_default);
10973 %}
10974 
10975 instruct sqrtF_reg(vRegF dst, vRegF src) %{
10976   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
10977 
10978   ins_cost(INSN_COST * 50);
10979   format %{ "fsqrts  $dst, $src" %}
10980   ins_encode %{
10981     __ fsqrts(as_FloatRegister($dst$$reg),
10982              as_FloatRegister($src$$reg));
10983   %}
10984 
10985   ins_pipe(pipe_class_default);
10986 %}
10987 
10988 // ============================================================================
10989 // Logical Instructions
10990 
10991 // Integer Logical Instructions
10992 
10993 // And Instructions
10994 
10995 
10996 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
10997   match(Set dst (AndI src1 src2));
10998 
10999   format %{ "andw  $dst, $src1, $src2\t# int" %}
11000 
11001   ins_cost(INSN_COST);
11002   ins_encode %{
11003     __ andw(as_Register($dst$$reg),
11004             as_Register($src1$$reg),
11005             as_Register($src2$$reg));
11006   %}
11007 
11008   ins_pipe(ialu_reg_reg);
11009 %}
11010 
11011 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
11012   match(Set dst (AndI src1 src2));
11013 
11014   format %{ "andsw  $dst, $src1, $src2\t# int" %}
11015 
11016   ins_cost(INSN_COST);
11017   ins_encode %{
11018     __ andw(as_Register($dst$$reg),
11019             as_Register($src1$$reg),
11020             (unsigned long)($src2$$constant));
11021   %}
11022 
11023   ins_pipe(ialu_reg_imm);
11024 %}
11025 
11026 // Or Instructions
11027 
11028 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11029   match(Set dst (OrI src1 src2));
11030 
11031   format %{ "orrw  $dst, $src1, $src2\t# int" %}
11032 
11033   ins_cost(INSN_COST);
11034   ins_encode %{
11035     __ orrw(as_Register($dst$$reg),
11036             as_Register($src1$$reg),
11037             as_Register($src2$$reg));
11038   %}
11039 
11040   ins_pipe(ialu_reg_reg);
11041 %}
11042 
11043 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
11044   match(Set dst (OrI src1 src2));
11045 
11046   format %{ "orrw  $dst, $src1, $src2\t# int" %}
11047 
11048   ins_cost(INSN_COST);
11049   ins_encode %{
11050     __ orrw(as_Register($dst$$reg),
11051             as_Register($src1$$reg),
11052             (unsigned long)($src2$$constant));
11053   %}
11054 
11055   ins_pipe(ialu_reg_imm);
11056 %}
11057 
11058 // Xor Instructions
11059 
11060 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11061   match(Set dst (XorI src1 src2));
11062 
11063   format %{ "eorw  $dst, $src1, $src2\t# int" %}
11064 
11065   ins_cost(INSN_COST);
11066   ins_encode %{
11067     __ eorw(as_Register($dst$$reg),
11068             as_Register($src1$$reg),
11069             as_Register($src2$$reg));
11070   %}
11071 
11072   ins_pipe(ialu_reg_reg);
11073 %}
11074 
11075 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
11076   match(Set dst (XorI src1 src2));
11077 
11078   format %{ "eorw  $dst, $src1, $src2\t# int" %}
11079 
11080   ins_cost(INSN_COST);
11081   ins_encode %{
11082     __ eorw(as_Register($dst$$reg),
11083             as_Register($src1$$reg),
11084             (unsigned long)($src2$$constant));
11085   %}
11086 
11087   ins_pipe(ialu_reg_imm);
11088 %}
11089 
11090 // Long Logical Instructions
11091 // TODO
11092 
11093 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
11094   match(Set dst (AndL src1 src2));
11095 
11096   format %{ "and  $dst, $src1, $src2\t# int" %}
11097 
11098   ins_cost(INSN_COST);
11099   ins_encode %{
11100     __ andr(as_Register($dst$$reg),
11101             as_Register($src1$$reg),
11102             as_Register($src2$$reg));
11103   %}
11104 
11105   ins_pipe(ialu_reg_reg);
11106 %}
11107 
11108 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
11109   match(Set dst (AndL src1 src2));
11110 
11111   format %{ "and  $dst, $src1, $src2\t# int" %}
11112 
11113   ins_cost(INSN_COST);
11114   ins_encode %{
11115     __ andr(as_Register($dst$$reg),
11116             as_Register($src1$$reg),
11117             (unsigned long)($src2$$constant));
11118   %}
11119 
11120   ins_pipe(ialu_reg_imm);
11121 %}
11122 
11123 // Or Instructions
11124 
11125 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11126   match(Set dst (OrL src1 src2));
11127 
11128   format %{ "orr  $dst, $src1, $src2\t# int" %}
11129 
11130   ins_cost(INSN_COST);
11131   ins_encode %{
11132     __ orr(as_Register($dst$$reg),
11133            as_Register($src1$$reg),
11134            as_Register($src2$$reg));
11135   %}
11136 
11137   ins_pipe(ialu_reg_reg);
11138 %}
11139 
11140 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
11141   match(Set dst (OrL src1 src2));
11142 
11143   format %{ "orr  $dst, $src1, $src2\t# int" %}
11144 
11145   ins_cost(INSN_COST);
11146   ins_encode %{
11147     __ orr(as_Register($dst$$reg),
11148            as_Register($src1$$reg),
11149            (unsigned long)($src2$$constant));
11150   %}
11151 
11152   ins_pipe(ialu_reg_imm);
11153 %}
11154 
11155 // Xor Instructions
11156 
11157 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11158   match(Set dst (XorL src1 src2));
11159 
11160   format %{ "eor  $dst, $src1, $src2\t# int" %}
11161 
11162   ins_cost(INSN_COST);
11163   ins_encode %{
11164     __ eor(as_Register($dst$$reg),
11165            as_Register($src1$$reg),
11166            as_Register($src2$$reg));
11167   %}
11168 
11169   ins_pipe(ialu_reg_reg);
11170 %}
11171 
11172 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
11173   match(Set dst (XorL src1 src2));
11174 
11175   ins_cost(INSN_COST);
11176   format %{ "eor  $dst, $src1, $src2\t# int" %}
11177 
11178   ins_encode %{
11179     __ eor(as_Register($dst$$reg),
11180            as_Register($src1$$reg),
11181            (unsigned long)($src2$$constant));
11182   %}
11183 
11184   ins_pipe(ialu_reg_imm);
11185 %}
11186 
11187 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
11188 %{
11189   match(Set dst (ConvI2L src));
11190 
11191   ins_cost(INSN_COST);
11192   format %{ "sxtw  $dst, $src\t# i2l" %}
11193   ins_encode %{
11194     __ sbfm($dst$$Register, $src$$Register, 0, 31);
11195   %}
11196   ins_pipe(ialu_reg_shift);
11197 %}
11198 
11199 // this pattern occurs in bigmath arithmetic
11200 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
11201 %{
11202   match(Set dst (AndL (ConvI2L src) mask));
11203 
11204   ins_cost(INSN_COST);
11205   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
11206   ins_encode %{
11207     __ ubfm($dst$$Register, $src$$Register, 0, 31);
11208   %}
11209 
11210   ins_pipe(ialu_reg_shift);
11211 %}
11212 
11213 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
11214   match(Set dst (ConvL2I src));
11215 
11216   ins_cost(INSN_COST);
11217   format %{ "movw  $dst, $src \t// l2i" %}
11218 
11219   ins_encode %{
11220     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
11221   %}
11222 
11223   ins_pipe(ialu_reg);
11224 %}
11225 
11226 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
11227 %{
11228   match(Set dst (Conv2B src));
11229   effect(KILL cr);
11230 
11231   format %{
11232     "cmpw $src, zr\n\t"
11233     "cset $dst, ne"
11234   %}
11235 
11236   ins_encode %{
11237     __ cmpw(as_Register($src$$reg), zr);
11238     __ cset(as_Register($dst$$reg), Assembler::NE);
11239   %}
11240 
11241   ins_pipe(ialu_reg);
11242 %}
11243 
11244 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
11245 %{
11246   match(Set dst (Conv2B src));
11247   effect(KILL cr);
11248 
11249   format %{
11250     "cmp  $src, zr\n\t"
11251     "cset $dst, ne"
11252   %}
11253 
11254   ins_encode %{
11255     __ cmp(as_Register($src$$reg), zr);
11256     __ cset(as_Register($dst$$reg), Assembler::NE);
11257   %}
11258 
11259   ins_pipe(ialu_reg);
11260 %}
11261 
11262 instruct convD2F_reg(vRegF dst, vRegD src) %{
11263   match(Set dst (ConvD2F src));
11264 
11265   ins_cost(INSN_COST * 5);
11266   format %{ "fcvtd  $dst, $src \t// d2f" %}
11267 
11268   ins_encode %{
11269     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
11270   %}
11271 
11272   ins_pipe(pipe_class_default);
11273 %}
11274 
11275 instruct convF2D_reg(vRegD dst, vRegF src) %{
11276   match(Set dst (ConvF2D src));
11277 
11278   ins_cost(INSN_COST * 5);
11279   format %{ "fcvts  $dst, $src \t// f2d" %}
11280 
11281   ins_encode %{
11282     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
11283   %}
11284 
11285   ins_pipe(pipe_class_default);
11286 %}
11287 
11288 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
11289   match(Set dst (ConvF2I src));
11290 
11291   ins_cost(INSN_COST * 5);
11292   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
11293 
11294   ins_encode %{
11295     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
11296   %}
11297 
11298   ins_pipe(pipe_class_default);
11299 %}
11300 
11301 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
11302   match(Set dst (ConvF2L src));
11303 
11304   ins_cost(INSN_COST * 5);
11305   format %{ "fcvtzs  $dst, $src \t// f2l" %}
11306 
11307   ins_encode %{
11308     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
11309   %}
11310 
11311   ins_pipe(pipe_class_default);
11312 %}
11313 
11314 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
11315   match(Set dst (ConvI2F src));
11316 
11317   ins_cost(INSN_COST * 5);
11318   format %{ "scvtfws  $dst, $src \t// i2f" %}
11319 
11320   ins_encode %{
11321     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
11322   %}
11323 
11324   ins_pipe(pipe_class_default);
11325 %}
11326 
11327 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
11328   match(Set dst (ConvL2F src));
11329 
11330   ins_cost(INSN_COST * 5);
11331   format %{ "scvtfs  $dst, $src \t// l2f" %}
11332 
11333   ins_encode %{
11334     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
11335   %}
11336 
11337   ins_pipe(pipe_class_default);
11338 %}
11339 
11340 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
11341   match(Set dst (ConvD2I src));
11342 
11343   ins_cost(INSN_COST * 5);
11344   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
11345 
11346   ins_encode %{
11347     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
11348   %}
11349 
11350   ins_pipe(pipe_class_default);
11351 %}
11352 
11353 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
11354   match(Set dst (ConvD2L src));
11355 
11356   ins_cost(INSN_COST * 5);
11357   format %{ "fcvtzd  $dst, $src \t// d2l" %}
11358 
11359   ins_encode %{
11360     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
11361   %}
11362 
11363   ins_pipe(pipe_class_default);
11364 %}
11365 
11366 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
11367   match(Set dst (ConvI2D src));
11368 
11369   ins_cost(INSN_COST * 5);
11370   format %{ "scvtfwd  $dst, $src \t// i2d" %}
11371 
11372   ins_encode %{
11373     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
11374   %}
11375 
11376   ins_pipe(pipe_class_default);
11377 %}
11378 
11379 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
11380   match(Set dst (ConvL2D src));
11381 
11382   ins_cost(INSN_COST * 5);
11383   format %{ "scvtfd  $dst, $src \t// l2d" %}
11384 
11385   ins_encode %{
11386     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
11387   %}
11388 
11389   ins_pipe(pipe_class_default);
11390 %}
11391 
11392 // stack <-> reg and reg <-> reg shuffles with no conversion
11393 
11394 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
11395 
11396   match(Set dst (MoveF2I src));
11397 
11398   effect(DEF dst, USE src);
11399 
11400   ins_cost(4 * INSN_COST);
11401 
11402   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
11403 
11404   ins_encode %{
11405     __ ldrw($dst$$Register, Address(sp, $src$$disp));
11406   %}
11407 
11408   ins_pipe(iload_reg_reg);
11409 
11410 %}
11411 
11412 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
11413 
11414   match(Set dst (MoveI2F src));
11415 
11416   effect(DEF dst, USE src);
11417 
11418   ins_cost(4 * INSN_COST);
11419 
11420   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
11421 
11422   ins_encode %{
11423     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
11424   %}
11425 
11426   ins_pipe(pipe_class_memory);
11427 
11428 %}
11429 
11430 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
11431 
11432   match(Set dst (MoveD2L src));
11433 
11434   effect(DEF dst, USE src);
11435 
11436   ins_cost(4 * INSN_COST);
11437 
11438   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
11439 
11440   ins_encode %{
11441     __ ldr($dst$$Register, Address(sp, $src$$disp));
11442   %}
11443 
11444   ins_pipe(iload_reg_reg);
11445 
11446 %}
11447 
11448 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
11449 
11450   match(Set dst (MoveL2D src));
11451 
11452   effect(DEF dst, USE src);
11453 
11454   ins_cost(4 * INSN_COST);
11455 
11456   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
11457 
11458   ins_encode %{
11459     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
11460   %}
11461 
11462   ins_pipe(pipe_class_memory);
11463 
11464 %}
11465 
11466 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
11467 
11468   match(Set dst (MoveF2I src));
11469 
11470   effect(DEF dst, USE src);
11471 
11472   ins_cost(INSN_COST);
11473 
11474   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
11475 
11476   ins_encode %{
11477     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
11478   %}
11479 
11480   ins_pipe(pipe_class_memory);
11481 
11482 %}
11483 
11484 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
11485 
11486   match(Set dst (MoveI2F src));
11487 
11488   effect(DEF dst, USE src);
11489 
11490   ins_cost(INSN_COST);
11491 
11492   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
11493 
11494   ins_encode %{
11495     __ strw($src$$Register, Address(sp, $dst$$disp));
11496   %}
11497 
11498   ins_pipe(istore_reg_reg);
11499 
11500 %}
11501 
11502 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
11503 
11504   match(Set dst (MoveD2L src));
11505 
11506   effect(DEF dst, USE src);
11507 
11508   ins_cost(INSN_COST);
11509 
11510   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
11511 
11512   ins_encode %{
11513     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
11514   %}
11515 
11516   ins_pipe(pipe_class_memory);
11517 
11518 %}
11519 
11520 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
11521 
11522   match(Set dst (MoveL2D src));
11523 
11524   effect(DEF dst, USE src);
11525 
11526   ins_cost(INSN_COST);
11527 
11528   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
11529 
11530   ins_encode %{
11531     __ str($src$$Register, Address(sp, $dst$$disp));
11532   %}
11533 
11534   ins_pipe(istore_reg_reg);
11535 
11536 %}
11537 
11538 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
11539 
11540   match(Set dst (MoveF2I src));
11541 
11542   effect(DEF dst, USE src);
11543 
11544   ins_cost(INSN_COST);
11545 
11546   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
11547 
11548   ins_encode %{
11549     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
11550   %}
11551 
11552   ins_pipe(pipe_class_memory);
11553 
11554 %}
11555 
11556 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
11557 
11558   match(Set dst (MoveI2F src));
11559 
11560   effect(DEF dst, USE src);
11561 
11562   ins_cost(INSN_COST);
11563 
11564   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
11565 
11566   ins_encode %{
11567     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
11568   %}
11569 
11570   ins_pipe(pipe_class_memory);
11571 
11572 %}
11573 
11574 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
11575 
11576   match(Set dst (MoveD2L src));
11577 
11578   effect(DEF dst, USE src);
11579 
11580   ins_cost(INSN_COST);
11581 
11582   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
11583 
11584   ins_encode %{
11585     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
11586   %}
11587 
11588   ins_pipe(pipe_class_memory);
11589 
11590 %}
11591 
11592 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
11593 
11594   match(Set dst (MoveL2D src));
11595 
11596   effect(DEF dst, USE src);
11597 
11598   ins_cost(INSN_COST);
11599 
11600   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
11601 
11602   ins_encode %{
11603     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
11604   %}
11605 
11606   ins_pipe(pipe_class_memory);
11607 
11608 %}
11609 
11610 // ============================================================================
11611 // clearing of an array
11612 
11613 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
11614 %{
11615   match(Set dummy (ClearArray cnt base));
11616   effect(USE_KILL cnt, USE_KILL base);
11617 
11618   ins_cost(4 * INSN_COST);
11619   format %{ "ClearArray $cnt, $base" %}
11620 
11621   ins_encode(aarch64_enc_clear_array_reg_reg(cnt, base));
11622 
11623   ins_pipe(pipe_class_memory);
11624 %}
11625 
11626 // ============================================================================
11627 // Overflow Math Instructions
11628 
11629 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
11630 %{
11631   match(Set cr (OverflowAddI op1 op2));
11632 
11633   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
11634   ins_cost(INSN_COST);
11635   ins_encode %{
11636     __ cmnw($op1$$Register, $op2$$Register);
11637   %}
11638 
11639   ins_pipe(icmp_reg_reg);
11640 %}
11641 
11642 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
11643 %{
11644   match(Set cr (OverflowAddI op1 op2));
11645 
11646   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
11647   ins_cost(INSN_COST);
11648   ins_encode %{
11649     __ cmnw($op1$$Register, $op2$$constant);
11650   %}
11651 
11652   ins_pipe(icmp_reg_imm);
11653 %}
11654 
11655 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
11656 %{
11657   match(Set cr (OverflowAddL op1 op2));
11658 
11659   format %{ "cmn   $op1, $op2\t# overflow check long" %}
11660   ins_cost(INSN_COST);
11661   ins_encode %{
11662     __ cmn($op1$$Register, $op2$$Register);
11663   %}
11664 
11665   ins_pipe(icmp_reg_reg);
11666 %}
11667 
11668 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
11669 %{
11670   match(Set cr (OverflowAddL op1 op2));
11671 
11672   format %{ "cmn   $op1, $op2\t# overflow check long" %}
11673   ins_cost(INSN_COST);
11674   ins_encode %{
11675     __ cmn($op1$$Register, $op2$$constant);
11676   %}
11677 
11678   ins_pipe(icmp_reg_imm);
11679 %}
11680 
11681 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
11682 %{
11683   match(Set cr (OverflowSubI op1 op2));
11684 
11685   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
11686   ins_cost(INSN_COST);
11687   ins_encode %{
11688     __ cmpw($op1$$Register, $op2$$Register);
11689   %}
11690 
11691   ins_pipe(icmp_reg_reg);
11692 %}
11693 
11694 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
11695 %{
11696   match(Set cr (OverflowSubI op1 op2));
11697 
11698   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
11699   ins_cost(INSN_COST);
11700   ins_encode %{
11701     __ cmpw($op1$$Register, $op2$$constant);
11702   %}
11703 
11704   ins_pipe(icmp_reg_imm);
11705 %}
11706 
11707 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
11708 %{
11709   match(Set cr (OverflowSubL op1 op2));
11710 
11711   format %{ "cmp   $op1, $op2\t# overflow check long" %}
11712   ins_cost(INSN_COST);
11713   ins_encode %{
11714     __ cmp($op1$$Register, $op2$$Register);
11715   %}
11716 
11717   ins_pipe(icmp_reg_reg);
11718 %}
11719 
11720 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
11721 %{
11722   match(Set cr (OverflowSubL op1 op2));
11723 
11724   format %{ "cmp   $op1, $op2\t# overflow check long" %}
11725   ins_cost(INSN_COST);
11726   ins_encode %{
11727     __ cmp($op1$$Register, $op2$$constant);
11728   %}
11729 
11730   ins_pipe(icmp_reg_imm);
11731 %}
11732 
11733 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
11734 %{
11735   match(Set cr (OverflowSubI zero op1));
11736 
11737   format %{ "cmpw  zr, $op1\t# overflow check int" %}
11738   ins_cost(INSN_COST);
11739   ins_encode %{
11740     __ cmpw(zr, $op1$$Register);
11741   %}
11742 
11743   ins_pipe(icmp_reg_imm);
11744 %}
11745 
11746 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
11747 %{
11748   match(Set cr (OverflowSubL zero op1));
11749 
11750   format %{ "cmp   zr, $op1\t# overflow check long" %}
11751   ins_cost(INSN_COST);
11752   ins_encode %{
11753     __ cmp(zr, $op1$$Register);
11754   %}
11755 
11756   ins_pipe(icmp_reg_imm);
11757 %}
11758 
11759 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
11760 %{
11761   match(Set cr (OverflowMulI op1 op2));
11762 
11763   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
11764             "cmp   rscratch1, rscratch1, sxtw\n\t"
11765             "movw  rscratch1, #0x80000000\n\t"
11766             "cselw rscratch1, rscratch1, zr, NE\n\t"
11767             "cmpw  rscratch1, #1" %}
11768   ins_cost(5 * INSN_COST);
11769   ins_encode %{
11770     __ smull(rscratch1, $op1$$Register, $op2$$Register);
11771     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
11772     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
11773     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
11774     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
11775   %}
11776 
11777   ins_pipe(pipe_slow);
11778 %}
11779 
11780 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
11781 %{
11782   match(If cmp (OverflowMulI op1 op2));
11783   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
11784             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
11785   effect(USE labl, KILL cr);
11786 
11787   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
11788             "cmp   rscratch1, rscratch1, sxtw\n\t"
11789             "b$cmp   $labl" %}
11790   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
11791   ins_encode %{
11792     Label* L = $labl$$label;
11793     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
11794     __ smull(rscratch1, $op1$$Register, $op2$$Register);
11795     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
11796     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
11797   %}
11798 
11799   ins_pipe(pipe_serial);
11800 %}
11801 
11802 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
11803 %{
11804   match(Set cr (OverflowMulL op1 op2));
11805 
11806   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
11807             "smulh rscratch2, $op1, $op2\n\t"
11808             "cmp   rscratch2, rscratch1, ASR #31\n\t"
11809             "movw  rscratch1, #0x80000000\n\t"
11810             "cselw rscratch1, rscratch1, zr, NE\n\t"
11811             "cmpw  rscratch1, #1" %}
11812   ins_cost(6 * INSN_COST);
11813   ins_encode %{
11814     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
11815     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
11816     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
11817     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
11818     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
11819     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
11820   %}
11821 
11822   ins_pipe(pipe_slow);
11823 %}
11824 
11825 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
11826 %{
11827   match(If cmp (OverflowMulL op1 op2));
11828   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
11829             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
11830   effect(USE labl, KILL cr);
11831 
11832   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
11833             "smulh rscratch2, $op1, $op2\n\t"
11834             "cmp   rscratch2, rscratch1, ASR #31\n\t"
11835             "b$cmp $labl" %}
11836   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
11837   ins_encode %{
11838     Label* L = $labl$$label;
11839     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
11840     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
11841     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
11842     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
11843     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
11844   %}
11845 
11846   ins_pipe(pipe_serial);
11847 %}
11848 
11849 // ============================================================================
11850 // Compare Instructions
11851 
11852 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
11853 %{
11854   match(Set cr (CmpI op1 op2));
11855 
11856   effect(DEF cr, USE op1, USE op2);
11857 
11858   ins_cost(INSN_COST);
11859   format %{ "cmpw  $op1, $op2" %}
11860 
11861   ins_encode(aarch64_enc_cmpw(op1, op2));
11862 
11863   ins_pipe(icmp_reg_reg);
11864 %}
11865 
11866 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
11867 %{
11868   match(Set cr (CmpI op1 zero));
11869 
11870   effect(DEF cr, USE op1);
11871 
11872   ins_cost(INSN_COST);
11873   format %{ "cmpw $op1, 0" %}
11874 
11875   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
11876 
11877   ins_pipe(icmp_reg_imm);
11878 %}
11879 
11880 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
11881 %{
11882   match(Set cr (CmpI op1 op2));
11883 
11884   effect(DEF cr, USE op1);
11885 
11886   ins_cost(INSN_COST);
11887   format %{ "cmpw  $op1, $op2" %}
11888 
11889   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
11890 
11891   ins_pipe(icmp_reg_imm);
11892 %}
11893 
11894 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
11895 %{
11896   match(Set cr (CmpI op1 op2));
11897 
11898   effect(DEF cr, USE op1);
11899 
11900   ins_cost(INSN_COST * 2);
11901   format %{ "cmpw  $op1, $op2" %}
11902 
11903   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
11904 
11905   ins_pipe(icmp_reg_imm);
11906 %}
11907 
11908 // Unsigned compare Instructions; really, same as signed compare
11909 // except it should only be used to feed an If or a CMovI which takes a
11910 // cmpOpU.
11911 
11912 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
11913 %{
11914   match(Set cr (CmpU op1 op2));
11915 
11916   effect(DEF cr, USE op1, USE op2);
11917 
11918   ins_cost(INSN_COST);
11919   format %{ "cmpw  $op1, $op2\t# unsigned" %}
11920 
11921   ins_encode(aarch64_enc_cmpw(op1, op2));
11922 
11923   ins_pipe(icmp_reg_reg);
11924 %}
11925 
11926 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
11927 %{
11928   match(Set cr (CmpU op1 zero));
11929 
11930   effect(DEF cr, USE op1);
11931 
11932   ins_cost(INSN_COST);
11933   format %{ "cmpw $op1, #0\t# unsigned" %}
11934 
11935   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
11936 
11937   ins_pipe(icmp_reg_imm);
11938 %}
11939 
11940 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
11941 %{
11942   match(Set cr (CmpU op1 op2));
11943 
11944   effect(DEF cr, USE op1);
11945 
11946   ins_cost(INSN_COST);
11947   format %{ "cmpw  $op1, $op2\t# unsigned" %}
11948 
11949   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
11950 
11951   ins_pipe(icmp_reg_imm);
11952 %}
11953 
11954 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
11955 %{
11956   match(Set cr (CmpU op1 op2));
11957 
11958   effect(DEF cr, USE op1);
11959 
11960   ins_cost(INSN_COST * 2);
11961   format %{ "cmpw  $op1, $op2\t# unsigned" %}
11962 
11963   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
11964 
11965   ins_pipe(icmp_reg_imm);
11966 %}
11967 
11968 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
11969 %{
11970   match(Set cr (CmpL op1 op2));
11971 
11972   effect(DEF cr, USE op1, USE op2);
11973 
11974   ins_cost(INSN_COST);
11975   format %{ "cmp  $op1, $op2" %}
11976 
11977   ins_encode(aarch64_enc_cmp(op1, op2));
11978 
11979   ins_pipe(icmp_reg_reg);
11980 %}
11981 
11982 instruct compL_reg_immI0(rFlagsReg cr, iRegL op1, immI0 zero)
11983 %{
11984   match(Set cr (CmpL op1 zero));
11985 
11986   effect(DEF cr, USE op1);
11987 
11988   ins_cost(INSN_COST);
11989   format %{ "tst  $op1" %}
11990 
11991   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
11992 
11993   ins_pipe(icmp_reg_imm);
11994 %}
11995 
11996 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
11997 %{
11998   match(Set cr (CmpL op1 op2));
11999 
12000   effect(DEF cr, USE op1);
12001 
12002   ins_cost(INSN_COST);
12003   format %{ "cmp  $op1, $op2" %}
12004 
12005   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
12006 
12007   ins_pipe(icmp_reg_imm);
12008 %}
12009 
12010 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
12011 %{
12012   match(Set cr (CmpL op1 op2));
12013 
12014   effect(DEF cr, USE op1);
12015 
12016   ins_cost(INSN_COST * 2);
12017   format %{ "cmp  $op1, $op2" %}
12018 
12019   ins_encode(aarch64_enc_cmp_imm(op1, op2));
12020 
12021   ins_pipe(icmp_reg_imm);
12022 %}
12023 
12024 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
12025 %{
12026   match(Set cr (CmpP op1 op2));
12027 
12028   effect(DEF cr, USE op1, USE op2);
12029 
12030   ins_cost(INSN_COST);
12031   format %{ "cmp  $op1, $op2\t // ptr" %}
12032 
12033   ins_encode(aarch64_enc_cmpp(op1, op2));
12034 
12035   ins_pipe(icmp_reg_reg);
12036 %}
12037 
12038 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
12039 %{
12040   match(Set cr (CmpN op1 op2));
12041 
12042   effect(DEF cr, USE op1, USE op2);
12043 
12044   ins_cost(INSN_COST);
12045   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
12046 
12047   ins_encode(aarch64_enc_cmpn(op1, op2));
12048 
12049   ins_pipe(icmp_reg_reg);
12050 %}
12051 
12052 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
12053 %{
12054   match(Set cr (CmpP op1 zero));
12055 
12056   effect(DEF cr, USE op1, USE zero);
12057 
12058   ins_cost(INSN_COST);
12059   format %{ "cmp  $op1, 0\t // ptr" %}
12060 
12061   ins_encode(aarch64_enc_testp(op1));
12062 
12063   ins_pipe(icmp_reg_imm);
12064 %}
12065 
12066 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
12067 %{
12068   match(Set cr (CmpN op1 zero));
12069 
12070   effect(DEF cr, USE op1, USE zero);
12071 
12072   ins_cost(INSN_COST);
12073   format %{ "cmp  $op1, 0\t // compressed ptr" %}
12074 
12075   ins_encode(aarch64_enc_testn(op1));
12076 
12077   ins_pipe(icmp_reg_imm);
12078 %}
12079 
12080 // FP comparisons
12081 //
12082 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
12083 // using normal cmpOp. See declaration of rFlagsReg for details.
12084 
12085 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
12086 %{
12087   match(Set cr (CmpF src1 src2));
12088 
12089   ins_cost(3 * INSN_COST);
12090   format %{ "fcmps $src1, $src2" %}
12091 
12092   ins_encode %{
12093     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
12094   %}
12095 
12096   ins_pipe(pipe_class_compare);
12097 %}
12098 
12099 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
12100 %{
12101   match(Set cr (CmpF src1 src2));
12102 
12103   ins_cost(3 * INSN_COST);
12104   format %{ "fcmps $src1, 0.0" %}
12105 
12106   ins_encode %{
12107     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
12108   %}
12109 
12110   ins_pipe(pipe_class_compare);
12111 %}
12112 // FROM HERE
12113 
12114 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
12115 %{
12116   match(Set cr (CmpD src1 src2));
12117 
12118   ins_cost(3 * INSN_COST);
12119   format %{ "fcmpd $src1, $src2" %}
12120 
12121   ins_encode %{
12122     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
12123   %}
12124 
12125   ins_pipe(pipe_class_compare);
12126 %}
12127 
12128 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
12129 %{
12130   match(Set cr (CmpD src1 src2));
12131 
12132   ins_cost(3 * INSN_COST);
12133   format %{ "fcmpd $src1, 0.0" %}
12134 
12135   ins_encode %{
12136     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
12137   %}
12138 
12139   ins_pipe(pipe_class_compare);
12140 %}
12141 
12142 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
12143 %{
12144   match(Set dst (CmpF3 src1 src2));
12145   effect(KILL cr);
12146 
12147   ins_cost(5 * INSN_COST);
12148   format %{ "fcmps $src1, $src2\n\t"
12149             "csinvw($dst, zr, zr, eq\n\t"
12150             "csnegw($dst, $dst, $dst, lt)"
12151   %}
12152 
12153   ins_encode %{
12154     Label done;
12155     FloatRegister s1 = as_FloatRegister($src1$$reg);
12156     FloatRegister s2 = as_FloatRegister($src2$$reg);
12157     Register d = as_Register($dst$$reg);
12158     __ fcmps(s1, s2);
12159     // installs 0 if EQ else -1
12160     __ csinvw(d, zr, zr, Assembler::EQ);
12161     // keeps -1 if less or unordered else installs 1
12162     __ csnegw(d, d, d, Assembler::LT);
12163     __ bind(done);
12164   %}
12165 
12166   ins_pipe(pipe_class_default);
12167 
12168 %}
12169 
12170 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
12171 %{
12172   match(Set dst (CmpD3 src1 src2));
12173   effect(KILL cr);
12174 
12175   ins_cost(5 * INSN_COST);
12176   format %{ "fcmpd $src1, $src2\n\t"
12177             "csinvw($dst, zr, zr, eq\n\t"
12178             "csnegw($dst, $dst, $dst, lt)"
12179   %}
12180 
12181   ins_encode %{
12182     Label done;
12183     FloatRegister s1 = as_FloatRegister($src1$$reg);
12184     FloatRegister s2 = as_FloatRegister($src2$$reg);
12185     Register d = as_Register($dst$$reg);
12186     __ fcmpd(s1, s2);
12187     // installs 0 if EQ else -1
12188     __ csinvw(d, zr, zr, Assembler::EQ);
12189     // keeps -1 if less or unordered else installs 1
12190     __ csnegw(d, d, d, Assembler::LT);
12191     __ bind(done);
12192   %}
12193   ins_pipe(pipe_class_default);
12194 
12195 %}
12196 
12197 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
12198 %{
12199   match(Set dst (CmpF3 src1 zero));
12200   effect(KILL cr);
12201 
12202   ins_cost(5 * INSN_COST);
12203   format %{ "fcmps $src1, 0.0\n\t"
12204             "csinvw($dst, zr, zr, eq\n\t"
12205             "csnegw($dst, $dst, $dst, lt)"
12206   %}
12207 
12208   ins_encode %{
12209     Label done;
12210     FloatRegister s1 = as_FloatRegister($src1$$reg);
12211     Register d = as_Register($dst$$reg);
12212     __ fcmps(s1, 0.0D);
12213     // installs 0 if EQ else -1
12214     __ csinvw(d, zr, zr, Assembler::EQ);
12215     // keeps -1 if less or unordered else installs 1
12216     __ csnegw(d, d, d, Assembler::LT);
12217     __ bind(done);
12218   %}
12219 
12220   ins_pipe(pipe_class_default);
12221 
12222 %}
12223 
12224 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
12225 %{
12226   match(Set dst (CmpD3 src1 zero));
12227   effect(KILL cr);
12228 
12229   ins_cost(5 * INSN_COST);
12230   format %{ "fcmpd $src1, 0.0\n\t"
12231             "csinvw($dst, zr, zr, eq\n\t"
12232             "csnegw($dst, $dst, $dst, lt)"
12233   %}
12234 
12235   ins_encode %{
12236     Label done;
12237     FloatRegister s1 = as_FloatRegister($src1$$reg);
12238     Register d = as_Register($dst$$reg);
12239     __ fcmpd(s1, 0.0D);
12240     // installs 0 if EQ else -1
12241     __ csinvw(d, zr, zr, Assembler::EQ);
12242     // keeps -1 if less or unordered else installs 1
12243     __ csnegw(d, d, d, Assembler::LT);
12244     __ bind(done);
12245   %}
12246   ins_pipe(pipe_class_default);
12247 
12248 %}
12249 
12250 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
12251 %{
12252   match(Set dst (CmpLTMask p q));
12253   effect(KILL cr);
12254 
12255   ins_cost(3 * INSN_COST);
12256 
12257   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
12258             "csetw $dst, lt\n\t"
12259             "subw $dst, zr, $dst"
12260   %}
12261 
12262   ins_encode %{
12263     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
12264     __ csetw(as_Register($dst$$reg), Assembler::LT);
12265     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
12266   %}
12267 
12268   ins_pipe(ialu_reg_reg);
12269 %}
12270 
12271 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
12272 %{
12273   match(Set dst (CmpLTMask src zero));
12274   effect(KILL cr);
12275 
12276   ins_cost(INSN_COST);
12277 
12278   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
12279 
12280   ins_encode %{
12281     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
12282   %}
12283 
12284   ins_pipe(ialu_reg_shift);
12285 %}
12286 
12287 // ============================================================================
12288 // Max and Min
12289 
12290 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
12291 %{
12292   match(Set dst (MinI src1 src2));
12293 
12294   effect(DEF dst, USE src1, USE src2, KILL cr);
12295   size(8);
12296 
12297   ins_cost(INSN_COST * 3);
12298   format %{
12299     "cmpw $src1 $src2\t signed int\n\t"
12300     "cselw $dst, $src1, $src2 lt\t"
12301   %}
12302 
12303   ins_encode %{
12304     __ cmpw(as_Register($src1$$reg),
12305             as_Register($src2$$reg));
12306     __ cselw(as_Register($dst$$reg),
12307              as_Register($src1$$reg),
12308              as_Register($src2$$reg),
12309              Assembler::LT);
12310   %}
12311 
12312   ins_pipe(ialu_reg_reg);
12313 %}
12314 // FROM HERE
12315 
12316 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
12317 %{
12318   match(Set dst (MaxI src1 src2));
12319 
12320   effect(DEF dst, USE src1, USE src2, KILL cr);
12321   size(8);
12322 
12323   ins_cost(INSN_COST * 3);
12324   format %{
12325     "cmpw $src1 $src2\t signed int\n\t"
12326     "cselw $dst, $src1, $src2 gt\t"
12327   %}
12328 
12329   ins_encode %{
12330     __ cmpw(as_Register($src1$$reg),
12331             as_Register($src2$$reg));
12332     __ cselw(as_Register($dst$$reg),
12333              as_Register($src1$$reg),
12334              as_Register($src2$$reg),
12335              Assembler::GT);
12336   %}
12337 
12338   ins_pipe(ialu_reg_reg);
12339 %}
12340 
12341 // ============================================================================
12342 // Branch Instructions
12343 
12344 // Direct Branch.
12345 instruct branch(label lbl)
12346 %{
12347   match(Goto);
12348 
12349   effect(USE lbl);
12350 
12351   ins_cost(BRANCH_COST);
12352   format %{ "b  $lbl" %}
12353 
12354   ins_encode(aarch64_enc_b(lbl));
12355 
12356   ins_pipe(pipe_branch);
12357 %}
12358 
12359 // Conditional Near Branch
12360 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
12361 %{
12362   // Same match rule as `branchConFar'.
12363   match(If cmp cr);
12364 
12365   effect(USE lbl);
12366 
12367   ins_cost(BRANCH_COST);
12368   // If set to 1 this indicates that the current instruction is a
12369   // short variant of a long branch. This avoids using this
12370   // instruction in first-pass matching. It will then only be used in
12371   // the `Shorten_branches' pass.
12372   // ins_short_branch(1);
12373   format %{ "b$cmp  $lbl" %}
12374 
12375   ins_encode(aarch64_enc_br_con(cmp, lbl));
12376 
12377   ins_pipe(pipe_branch_cond);
12378 %}
12379 
12380 // Conditional Near Branch Unsigned
12381 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
12382 %{
12383   // Same match rule as `branchConFar'.
12384   match(If cmp cr);
12385 
12386   effect(USE lbl);
12387 
12388   ins_cost(BRANCH_COST);
12389   // If set to 1 this indicates that the current instruction is a
12390   // short variant of a long branch. This avoids using this
12391   // instruction in first-pass matching. It will then only be used in
12392   // the `Shorten_branches' pass.
12393   // ins_short_branch(1);
12394   format %{ "b$cmp  $lbl\t# unsigned" %}
12395 
12396   ins_encode(aarch64_enc_br_conU(cmp, lbl));
12397 
12398   ins_pipe(pipe_branch_cond);
12399 %}
12400 
12401 // Make use of CBZ and CBNZ.  These instructions, as well as being
12402 // shorter than (cmp; branch), have the additional benefit of not
12403 // killing the flags.
12404 
12405 instruct cmpI_imm0_branch(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
12406   match(If cmp (CmpI op1 op2));
12407   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
12408             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
12409   effect(USE labl);
12410 
12411   ins_cost(BRANCH_COST);
12412   format %{ "cbw$cmp   $op1, $labl" %}
12413   ins_encode %{
12414     Label* L = $labl$$label;
12415     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
12416     if (cond == Assembler::EQ)
12417       __ cbzw($op1$$Register, *L);
12418     else
12419       __ cbnzw($op1$$Register, *L);
12420   %}
12421   ins_pipe(pipe_cmp_branch);
12422 %}
12423 
12424 instruct cmpL_imm0_branch(cmpOp cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
12425   match(If cmp (CmpL op1 op2));
12426   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
12427             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
12428   effect(USE labl);
12429 
12430   ins_cost(BRANCH_COST);
12431   format %{ "cb$cmp   $op1, $labl" %}
12432   ins_encode %{
12433     Label* L = $labl$$label;
12434     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
12435     if (cond == Assembler::EQ)
12436       __ cbz($op1$$Register, *L);
12437     else
12438       __ cbnz($op1$$Register, *L);
12439   %}
12440   ins_pipe(pipe_cmp_branch);
12441 %}
12442 
12443 instruct cmpP_imm0_branch(cmpOp cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
12444   match(If cmp (CmpP op1 op2));
12445   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
12446             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
12447   effect(USE labl);
12448 
12449   ins_cost(BRANCH_COST);
12450   format %{ "cb$cmp   $op1, $labl" %}
12451   ins_encode %{
12452     Label* L = $labl$$label;
12453     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
12454     if (cond == Assembler::EQ)
12455       __ cbz($op1$$Register, *L);
12456     else
12457       __ cbnz($op1$$Register, *L);
12458   %}
12459   ins_pipe(pipe_cmp_branch);
12460 %}
12461 
12462 // Conditional Far Branch
12463 // Conditional Far Branch Unsigned
12464 // TODO: fixme
12465 
12466 // counted loop end branch near
12467 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
12468 %{
12469   match(CountedLoopEnd cmp cr);
12470 
12471   effect(USE lbl);
12472 
12473   ins_cost(BRANCH_COST);
12474   // short variant.
12475   // ins_short_branch(1);
12476   format %{ "b$cmp $lbl \t// counted loop end" %}
12477 
12478   ins_encode(aarch64_enc_br_con(cmp, lbl));
12479 
12480   ins_pipe(pipe_branch);
12481 %}
12482 
12483 // counted loop end branch near Unsigned
12484 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
12485 %{
12486   match(CountedLoopEnd cmp cr);
12487 
12488   effect(USE lbl);
12489 
12490   ins_cost(BRANCH_COST);
12491   // short variant.
12492   // ins_short_branch(1);
12493   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
12494 
12495   ins_encode(aarch64_enc_br_conU(cmp, lbl));
12496 
12497   ins_pipe(pipe_branch);
12498 %}
12499 
12500 // counted loop end branch far
12501 // counted loop end branch far unsigned
12502 // TODO: fixme
12503 
12504 // ============================================================================
12505 // inlined locking and unlocking
12506 
12507 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
12508 %{
12509   match(Set cr (FastLock object box));
12510   effect(TEMP tmp, TEMP tmp2);
12511 
12512   // TODO
12513   // identify correct cost
12514   ins_cost(5 * INSN_COST);
12515   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
12516 
12517   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
12518 
12519   ins_pipe(pipe_serial);
12520 %}
12521 
12522 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
12523 %{
12524   match(Set cr (FastUnlock object box));
12525   effect(TEMP tmp, TEMP tmp2);
12526 
12527   ins_cost(5 * INSN_COST);
12528   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
12529 
12530   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
12531 
12532   ins_pipe(pipe_serial);
12533 %}
12534 
12535 
12536 // ============================================================================
12537 // Safepoint Instructions
12538 
12539 // TODO
12540 // provide a near and far version of this code
12541 
12542 instruct safePoint(iRegP poll)
12543 %{
12544   match(SafePoint poll);
12545 
12546   format %{
12547     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
12548   %}
12549   ins_encode %{
12550     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
12551   %}
12552   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
12553 %}
12554 
12555 
12556 // ============================================================================
12557 // Procedure Call/Return Instructions
12558 
12559 // Call Java Static Instruction
12560 
12561 instruct CallStaticJavaDirect(method meth)
12562 %{
12563   match(CallStaticJava);
12564 
12565   effect(USE meth);
12566 
12567   predicate(!((CallStaticJavaNode*)n)->is_method_handle_invoke());
12568 
12569   ins_cost(CALL_COST);
12570 
12571   format %{ "call,static $meth \t// ==> " %}
12572 
12573   ins_encode( aarch64_enc_java_static_call(meth),
12574               aarch64_enc_call_epilog );
12575 
12576   ins_pipe(pipe_class_call);
12577 %}
12578 
12579 // TO HERE
12580 
12581 // Call Java Static Instruction (method handle version)
12582 
12583 instruct CallStaticJavaDirectHandle(method meth, iRegP_FP reg_mh_save)
12584 %{
12585   match(CallStaticJava);
12586 
12587   effect(USE meth);
12588 
12589   predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
12590 
12591   ins_cost(CALL_COST);
12592 
12593   format %{ "call,static $meth \t// (methodhandle) ==> " %}
12594 
12595   ins_encode( aarch64_enc_java_handle_call(meth),
12596               aarch64_enc_call_epilog );
12597 
12598   ins_pipe(pipe_class_call);
12599 %}
12600 
12601 // Call Java Dynamic Instruction
12602 instruct CallDynamicJavaDirect(method meth)
12603 %{
12604   match(CallDynamicJava);
12605 
12606   effect(USE meth);
12607 
12608   ins_cost(CALL_COST);
12609 
12610   format %{ "CALL,dynamic $meth \t// ==> " %}
12611 
12612   ins_encode( aarch64_enc_java_dynamic_call(meth),
12613                aarch64_enc_call_epilog );
12614 
12615   ins_pipe(pipe_class_call);
12616 %}
12617 
12618 // Call Runtime Instruction
12619 
12620 instruct CallRuntimeDirect(method meth)
12621 %{
12622   match(CallRuntime);
12623 
12624   effect(USE meth);
12625 
12626   ins_cost(CALL_COST);
12627 
12628   format %{ "CALL, runtime $meth" %}
12629 
12630   ins_encode( aarch64_enc_java_to_runtime(meth) );
12631 
12632   ins_pipe(pipe_class_call);
12633 %}
12634 
12635 // Call Runtime Instruction
12636 
12637 instruct CallLeafDirect(method meth)
12638 %{
12639   match(CallLeaf);
12640 
12641   effect(USE meth);
12642 
12643   ins_cost(CALL_COST);
12644 
12645   format %{ "CALL, runtime leaf $meth" %}
12646 
12647   ins_encode( aarch64_enc_java_to_runtime(meth) );
12648 
12649   ins_pipe(pipe_class_call);
12650 %}
12651 
12652 // Call Runtime Instruction
12653 
12654 instruct CallLeafNoFPDirect(method meth)
12655 %{
12656   match(CallLeafNoFP);
12657 
12658   effect(USE meth);
12659 
12660   ins_cost(CALL_COST);
12661 
12662   format %{ "CALL, runtime leaf nofp $meth" %}
12663 
12664   ins_encode( aarch64_enc_java_to_runtime(meth) );
12665 
12666   ins_pipe(pipe_class_call);
12667 %}
12668 
12669 // Tail Call; Jump from runtime stub to Java code.
12670 // Also known as an 'interprocedural jump'.
12671 // Target of jump will eventually return to caller.
12672 // TailJump below removes the return address.
12673 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
12674 %{
12675   match(TailCall jump_target method_oop);
12676 
12677   ins_cost(CALL_COST);
12678 
12679   format %{ "br $jump_target\t# $method_oop holds method oop" %}
12680 
12681   ins_encode(aarch64_enc_tail_call(jump_target));
12682 
12683   ins_pipe(pipe_class_call);
12684 %}
12685 
12686 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
12687 %{
12688   match(TailJump jump_target ex_oop);
12689 
12690   ins_cost(CALL_COST);
12691 
12692   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
12693 
12694   ins_encode(aarch64_enc_tail_jmp(jump_target));
12695 
12696   ins_pipe(pipe_class_call);
12697 %}
12698 
12699 // Create exception oop: created by stack-crawling runtime code.
12700 // Created exception is now available to this handler, and is setup
12701 // just prior to jumping to this handler. No code emitted.
12702 // TODO check
12703 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
12704 instruct CreateException(iRegP_R0 ex_oop)
12705 %{
12706   match(Set ex_oop (CreateEx));
12707 
12708   format %{ " -- \t// exception oop; no code emitted" %}
12709 
12710   size(0);
12711 
12712   ins_encode( /*empty*/ );
12713 
12714   ins_pipe(pipe_class_empty);
12715 %}
12716 
12717 // Rethrow exception: The exception oop will come in the first
12718 // argument position. Then JUMP (not call) to the rethrow stub code.
12719 instruct RethrowException() %{
12720   match(Rethrow);
12721   ins_cost(CALL_COST);
12722 
12723   format %{ "b rethrow_stub" %}
12724 
12725   ins_encode( aarch64_enc_rethrow() );
12726 
12727   ins_pipe(pipe_class_call);
12728 %}
12729 
12730 
12731 // Return Instruction
12732 // epilog node loads ret address into lr as part of frame pop
12733 instruct Ret()
12734 %{
12735   match(Return);
12736 
12737   format %{ "ret\t// return register" %}
12738 
12739   ins_encode( aarch64_enc_ret() );
12740 
12741   ins_pipe(pipe_branch);
12742 %}
12743 
12744 // Die now.
12745 instruct ShouldNotReachHere() %{
12746   match(Halt);
12747 
12748   ins_cost(CALL_COST);
12749   format %{ "ShouldNotReachHere" %}
12750 
12751   ins_encode %{
12752     // TODO
12753     // implement proper trap call here
12754     __ brk(999);
12755   %}
12756 
12757   ins_pipe(pipe_class_default);
12758 %}
12759 
12760 // ============================================================================
12761 // Partial Subtype Check
12762 //
12763 // superklass array for an instance of the superklass.  Set a hidden
12764 // internal cache on a hit (cache is checked with exposed code in
12765 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
12766 // encoding ALSO sets flags.
12767 
12768 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
12769 %{
12770   match(Set result (PartialSubtypeCheck sub super));
12771   effect(KILL cr, KILL temp);
12772 
12773   ins_cost(1100);  // slightly larger than the next version
12774   format %{ "partialSubtypeCheck $result, $sub, $super" %}
12775 
12776   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
12777 
12778   opcode(0x1); // Force zero of result reg on hit
12779 
12780   ins_pipe(pipe_class_memory);
12781 %}
12782 
12783 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
12784 %{
12785   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
12786   effect(KILL temp, KILL result);
12787 
12788   ins_cost(1100);  // slightly larger than the next version
12789   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
12790 
12791   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
12792 
12793   opcode(0x0); // Don't zero result reg on hit
12794 
12795   ins_pipe(pipe_class_memory);
12796 %}
12797 
12798 instruct string_compare(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
12799                         iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
12800 %{
12801   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
12802   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
12803 
12804   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
12805   ins_encode %{
12806     __ string_compare($str1$$Register, $str2$$Register,
12807                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
12808                       $tmp1$$Register);
12809   %}
12810   ins_pipe(pipe_class_memory);
12811 %}
12812 
12813 instruct string_indexof(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
12814        iRegI_R0 result, iRegI tmp1, iRegI tmp2, iRegI tmp3, iRegI tmp4, rFlagsReg cr)
12815 %{
12816   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
12817   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
12818          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
12819   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result" %}
12820 
12821   ins_encode %{
12822     __ string_indexof($str1$$Register, $str2$$Register,
12823                       $cnt1$$Register, $cnt2$$Register,
12824                       $tmp1$$Register, $tmp2$$Register,
12825                       $tmp3$$Register, $tmp4$$Register,
12826                       -1, $result$$Register);
12827   %}
12828   ins_pipe(pipe_class_memory);
12829 %}
12830 
12831 instruct string_indexof_con(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
12832                  immI_le_4 int_cnt2, iRegI_R0 result, iRegI tmp1, iRegI tmp2,
12833                  iRegI tmp3, iRegI tmp4, rFlagsReg cr)
12834 %{
12835   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
12836   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
12837          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
12838   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result" %}
12839 
12840   ins_encode %{
12841     int icnt2 = (int)$int_cnt2$$constant;
12842     __ string_indexof($str1$$Register, $str2$$Register,
12843                       $cnt1$$Register, zr,
12844                       $tmp1$$Register, $tmp2$$Register,
12845                       $tmp3$$Register, $tmp4$$Register,
12846                       icnt2, $result$$Register);
12847   %}
12848   ins_pipe(pipe_class_memory);
12849 %}
12850 
12851 instruct string_equals(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
12852                         iRegI_R0 result, iRegP_R10 tmp, rFlagsReg cr)
12853 %{
12854   match(Set result (StrEquals (Binary str1 str2) cnt));
12855   effect(KILL tmp, USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
12856 
12857   format %{ "String Equals $str1,$str2,$cnt -> $result    // KILL $tmp" %}
12858   ins_encode %{
12859     __ string_equals($str1$$Register, $str2$$Register,
12860                       $cnt$$Register, $result$$Register,
12861                       $tmp$$Register);
12862   %}
12863   ins_pipe(pipe_class_memory);
12864 %}
12865 
12866 instruct array_equals(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
12867                       iRegP_R10 tmp, rFlagsReg cr)
12868 %{
12869   match(Set result (AryEq ary1 ary2));
12870   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
12871 
12872   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
12873   ins_encode %{
12874     __ char_arrays_equals($ary1$$Register, $ary2$$Register,
12875                           $result$$Register, $tmp$$Register);
12876   %}
12877   ins_pipe(pipe_class_memory);
12878 %}
12879 
12880 // encode char[] to byte[] in ISO_8859_1
12881 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
12882                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
12883                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
12884                           iRegI_R0 result, rFlagsReg cr)
12885 %{
12886   match(Set result (EncodeISOArray src (Binary dst len)));
12887   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
12888          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
12889 
12890   format %{ "Encode array $src,$dst,$len -> $result" %}
12891   ins_encode %{
12892     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
12893          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
12894          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
12895   %}
12896   ins_pipe( pipe_class_memory );
12897 %}
12898 
12899 // ============================================================================
12900 // This name is KNOWN by the ADLC and cannot be changed.
12901 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
12902 // for this guy.
12903 instruct tlsLoadP(thread_RegP dst)
12904 %{
12905   match(Set dst (ThreadLocal));
12906 
12907   ins_cost(0);
12908 
12909   format %{ " -- \t// $dst=Thread::current(), empty" %}
12910 
12911   size(0);
12912 
12913   ins_encode( /*empty*/ );
12914 
12915   ins_pipe(pipe_class_empty);
12916 %}
12917 
12918 
12919 
12920 //----------PEEPHOLE RULES-----------------------------------------------------
12921 // These must follow all instruction definitions as they use the names
12922 // defined in the instructions definitions.
12923 //
12924 // peepmatch ( root_instr_name [preceding_instruction]* );
12925 //
12926 // peepconstraint %{
12927 // (instruction_number.operand_name relational_op instruction_number.operand_name
12928 //  [, ...] );
12929 // // instruction numbers are zero-based using left to right order in peepmatch
12930 //
12931 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
12932 // // provide an instruction_number.operand_name for each operand that appears
12933 // // in the replacement instruction's match rule
12934 //
12935 // ---------VM FLAGS---------------------------------------------------------
12936 //
12937 // All peephole optimizations can be turned off using -XX:-OptoPeephole
12938 //
12939 // Each peephole rule is given an identifying number starting with zero and
12940 // increasing by one in the order seen by the parser.  An individual peephole
12941 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
12942 // on the command-line.
12943 //
12944 // ---------CURRENT LIMITATIONS----------------------------------------------
12945 //
12946 // Only match adjacent instructions in same basic block
12947 // Only equality constraints
12948 // Only constraints between operands, not (0.dest_reg == RAX_enc)
12949 // Only one replacement instruction
12950 //
12951 // ---------EXAMPLE----------------------------------------------------------
12952 //
12953 // // pertinent parts of existing instructions in architecture description
12954 // instruct movI(iRegINoSp dst, iRegI src)
12955 // %{
12956 //   match(Set dst (CopyI src));
12957 // %}
12958 //
12959 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
12960 // %{
12961 //   match(Set dst (AddI dst src));
12962 //   effect(KILL cr);
12963 // %}
12964 //
12965 // // Change (inc mov) to lea
12966 // peephole %{
12967 //   // increment preceeded by register-register move
12968 //   peepmatch ( incI_iReg movI );
12969 //   // require that the destination register of the increment
12970 //   // match the destination register of the move
12971 //   peepconstraint ( 0.dst == 1.dst );
12972 //   // construct a replacement instruction that sets
12973 //   // the destination to ( move's source register + one )
12974 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
12975 // %}
12976 //
12977 
12978 // Implementation no longer uses movX instructions since
12979 // machine-independent system no longer uses CopyX nodes.
12980 //
12981 // peephole
12982 // %{
12983 //   peepmatch (incI_iReg movI);
12984 //   peepconstraint (0.dst == 1.dst);
12985 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
12986 // %}
12987 
12988 // peephole
12989 // %{
12990 //   peepmatch (decI_iReg movI);
12991 //   peepconstraint (0.dst == 1.dst);
12992 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
12993 // %}
12994 
12995 // peephole
12996 // %{
12997 //   peepmatch (addI_iReg_imm movI);
12998 //   peepconstraint (0.dst == 1.dst);
12999 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
13000 // %}
13001 
13002 // peephole
13003 // %{
13004 //   peepmatch (incL_iReg movL);
13005 //   peepconstraint (0.dst == 1.dst);
13006 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
13007 // %}
13008 
13009 // peephole
13010 // %{
13011 //   peepmatch (decL_iReg movL);
13012 //   peepconstraint (0.dst == 1.dst);
13013 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
13014 // %}
13015 
13016 // peephole
13017 // %{
13018 //   peepmatch (addL_iReg_imm movL);
13019 //   peepconstraint (0.dst == 1.dst);
13020 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
13021 // %}
13022 
13023 // peephole
13024 // %{
13025 //   peepmatch (addP_iReg_imm movP);
13026 //   peepconstraint (0.dst == 1.dst);
13027 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
13028 // %}
13029 
13030 // // Change load of spilled value to only a spill
13031 // instruct storeI(memory mem, iRegI src)
13032 // %{
13033 //   match(Set mem (StoreI mem src));
13034 // %}
13035 //
13036 // instruct loadI(iRegINoSp dst, memory mem)
13037 // %{
13038 //   match(Set dst (LoadI mem));
13039 // %}
13040 //
13041 
13042 //----------SMARTSPILL RULES---------------------------------------------------
13043 // These must follow all instruction definitions as they use the names
13044 // defined in the instructions definitions.
13045 
13046 // Local Variables:
13047 // mode: c++
13048 // End: