1 //
   2 // Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()         );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next() );
 166   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()         );
 167   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next() );
 168   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()         );
 169   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next() );
 170   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()         );
 171   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next() );
 172   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()         );
 173   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next() );
 174   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()         );
 175   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next() );
 176   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()         );
 177   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next() );
 178   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()         );
 179   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next() );
 180   reg_def V8   ( SOC, SOE, Op_RegF,  8, v8->as_VMReg()         );
 181   reg_def V8_H ( SOC, SOE, Op_RegF,  8, v8->as_VMReg()->next() );
 182   reg_def V9   ( SOC, SOE, Op_RegF,  9, v9->as_VMReg()         );
 183   reg_def V9_H ( SOC, SOE, Op_RegF,  9, v9->as_VMReg()->next() );
 184   reg_def V10  ( SOC, SOE, Op_RegF, 10, v10->as_VMReg()        );
 185   reg_def V10_H( SOC, SOE, Op_RegF, 10, v10->as_VMReg()->next());
 186   reg_def V11  ( SOC, SOE, Op_RegF, 11, v11->as_VMReg()        );
 187   reg_def V11_H( SOC, SOE, Op_RegF, 11, v11->as_VMReg()->next());
 188   reg_def V12  ( SOC, SOE, Op_RegF, 12, v12->as_VMReg()        );
 189   reg_def V12_H( SOC, SOE, Op_RegF, 12, v12->as_VMReg()->next());
 190   reg_def V13  ( SOC, SOE, Op_RegF, 13, v13->as_VMReg()        );
 191   reg_def V13_H( SOC, SOE, Op_RegF, 13, v13->as_VMReg()->next());
 192   reg_def V14  ( SOC, SOE, Op_RegF, 14, v14->as_VMReg()        );
 193   reg_def V14_H( SOC, SOE, Op_RegF, 14, v14->as_VMReg()->next());
 194   reg_def V15  ( SOC, SOE, Op_RegF, 15, v15->as_VMReg()        );
 195   reg_def V15_H( SOC, SOE, Op_RegF, 15, v15->as_VMReg()->next());
 196   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()        );
 197   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next());
 198   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()        );
 199   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next());
 200   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()        );
 201   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next());
 202   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()        );
 203   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next());
 204   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()        );
 205   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next());
 206   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()        );
 207   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next());
 208   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()        );
 209   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next());
 210   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()        );
 211   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next());
 212   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()        );
 213   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next());
 214   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()        );
 215   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next());
 216   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()        );
 217   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next());
 218   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()        );
 219   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next());
 220   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()        );
 221   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next());
 222   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()        );
 223   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next());
 224   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()        );
 225   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next());
 226   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()        );
 227   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next());
 228 
 229 // ----------------------------
 230 // Special Registers
 231 // ----------------------------
 232 
 233 // the AArch64 CSPR status flag register is not directly acessible as
 234 // instruction operand. the FPSR status flag register is a system
 235 // register which can be written/read using MSR/MRS but again does not
 236 // appear as an operand (a code identifying the FSPR occurs as an
 237 // immediate value in the instruction).
 238 
 239 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 240 
 241 
 242 // Specify priority of register selection within phases of register
 243 // allocation.  Highest priority is first.  A useful heuristic is to
 244 // give registers a low priority when they are required by machine
 245 // instructions, like EAX and EDX on I486, and choose no-save registers
 246 // before save-on-call, & save-on-call before save-on-entry.  Registers
 247 // which participate in fixed calling sequences should come last.
 248 // Registers which are used as pairs must fall on an even boundary.
 249 
 250 alloc_class chunk0(
 251     // volatiles
 252     R10, R10_H,
 253     R11, R11_H,
 254     R12, R12_H,
 255     R13, R13_H,
 256     R14, R14_H,
 257     R15, R15_H,
 258     R16, R16_H,
 259     R17, R17_H,
 260     R18, R18_H,
 261 
 262     // arg registers
 263     R0, R0_H,
 264     R1, R1_H,
 265     R2, R2_H,
 266     R3, R3_H,
 267     R4, R4_H,
 268     R5, R5_H,
 269     R6, R6_H,
 270     R7, R7_H,
 271 
 272     // non-volatiles
 273     R19, R19_H,
 274     R20, R20_H,
 275     R21, R21_H,
 276     R22, R22_H,
 277     R23, R23_H,
 278     R24, R24_H,
 279     R25, R25_H,
 280     R26, R26_H,
 281 
 282     // non-allocatable registers
 283 
 284     R27, R27_H, // heapbase
 285     R28, R28_H, // thread
 286     R29, R29_H, // fp
 287     R30, R30_H, // lr
 288     R31, R31_H, // sp
 289 );
 290 
 291 alloc_class chunk1(
 292 
 293     // no save
 294     V16, V16_H,
 295     V17, V17_H,
 296     V18, V18_H,
 297     V19, V19_H,
 298     V20, V20_H,
 299     V21, V21_H,
 300     V22, V22_H,
 301     V23, V23_H,
 302     V24, V24_H,
 303     V25, V25_H,
 304     V26, V26_H,
 305     V27, V27_H,
 306     V28, V28_H,
 307     V29, V29_H,
 308     V30, V30_H,
 309     V31, V31_H,
 310 
 311     // arg registers
 312     V0, V0_H,
 313     V1, V1_H,
 314     V2, V2_H,
 315     V3, V3_H,
 316     V4, V4_H,
 317     V5, V5_H,
 318     V6, V6_H,
 319     V7, V7_H,
 320 
 321     // non-volatiles
 322     V8, V8_H,
 323     V9, V9_H,
 324     V10, V10_H,
 325     V11, V11_H,
 326     V12, V12_H,
 327     V13, V13_H,
 328     V14, V14_H,
 329     V15, V15_H,
 330 );
 331 
 332 alloc_class chunk2(RFLAGS);
 333 
 334 //----------Architecture Description Register Classes--------------------------
 335 // Several register classes are automatically defined based upon information in
 336 // this architecture description.
 337 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 338 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 339 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 340 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 341 //
 342 
 343 // Class for all 32 bit integer registers -- excludes SP which will
 344 // never be used as an integer register
 345 reg_class any_reg32(
 346     R0,
 347     R1,
 348     R2,
 349     R3,
 350     R4,
 351     R5,
 352     R6,
 353     R7,
 354     R10,
 355     R11,
 356     R12,
 357     R13,
 358     R14,
 359     R15,
 360     R16,
 361     R17,
 362     R18,
 363     R19,
 364     R20,
 365     R21,
 366     R22,
 367     R23,
 368     R24,
 369     R25,
 370     R26,
 371     R27,
 372     R28,
 373     R29,
 374     R30
 375 );
 376 
 377 // Singleton class for R0 int register
 378 reg_class int_r0_reg(R0);
 379 
 380 // Singleton class for R2 int register
 381 reg_class int_r2_reg(R2);
 382 
 383 // Singleton class for R3 int register
 384 reg_class int_r3_reg(R3);
 385 
 386 // Singleton class for R4 int register
 387 reg_class int_r4_reg(R4);
 388 
 389 // Class for all long integer registers (including RSP)
 390 reg_class any_reg(
 391     R0, R0_H,
 392     R1, R1_H,
 393     R2, R2_H,
 394     R3, R3_H,
 395     R4, R4_H,
 396     R5, R5_H,
 397     R6, R6_H,
 398     R7, R7_H,
 399     R10, R10_H,
 400     R11, R11_H,
 401     R12, R12_H,
 402     R13, R13_H,
 403     R14, R14_H,
 404     R15, R15_H,
 405     R16, R16_H,
 406     R17, R17_H,
 407     R18, R18_H,
 408     R19, R19_H,
 409     R20, R20_H,
 410     R21, R21_H,
 411     R22, R22_H,
 412     R23, R23_H,
 413     R24, R24_H,
 414     R25, R25_H,
 415     R26, R26_H,
 416     R27, R27_H,
 417     R28, R28_H,
 418     R29, R29_H,
 419     R30, R30_H,
 420     R31, R31_H
 421 );
 422 
 423 // Class for all non-special integer registers
 424 reg_class no_special_reg32(
 425     R0,
 426     R1,
 427     R2,
 428     R3,
 429     R4,
 430     R5,
 431     R6,
 432     R7,
 433     R10,
 434     R11,
 435     R12,                        // rmethod
 436     R13,
 437     R14,
 438     R15,
 439     R16,
 440     R17,
 441     R18,
 442     R19,
 443     R20,
 444     R21,
 445     R22,
 446     R23,
 447     R24,
 448     R25,
 449     R26
 450  /* R27, */                     // heapbase
 451  /* R28, */                     // thread
 452  /* R29, */                     // fp
 453  /* R30, */                     // lr
 454  /* R31 */                      // sp
 455 );
 456 
 457 // Class for all non-special long integer registers
 458 reg_class no_special_reg(
 459     R0, R0_H,
 460     R1, R1_H,
 461     R2, R2_H,
 462     R3, R3_H,
 463     R4, R4_H,
 464     R5, R5_H,
 465     R6, R6_H,
 466     R7, R7_H,
 467     R10, R10_H,
 468     R11, R11_H,
 469     R12, R12_H,                 // rmethod
 470     R13, R13_H,
 471     R14, R14_H,
 472     R15, R15_H,
 473     R16, R16_H,
 474     R17, R17_H,
 475     R18, R18_H,
 476     R19, R19_H,
 477     R20, R20_H,
 478     R21, R21_H,
 479     R22, R22_H,
 480     R23, R23_H,
 481     R24, R24_H,
 482     R25, R25_H,
 483     R26, R26_H,
 484  /* R27, R27_H, */              // heapbase
 485  /* R28, R28_H, */              // thread
 486  /* R29, R29_H, */              // fp
 487  /* R30, R30_H, */              // lr
 488  /* R31, R31_H */               // sp
 489 );
 490 
 491 // Class for 64 bit register r0
 492 reg_class r0_reg(
 493     R0, R0_H
 494 );
 495 
 496 // Class for 64 bit register r1
 497 reg_class r1_reg(
 498     R1, R1_H
 499 );
 500 
 501 // Class for 64 bit register r2
 502 reg_class r2_reg(
 503     R2, R2_H
 504 );
 505 
 506 // Class for 64 bit register r3
 507 reg_class r3_reg(
 508     R3, R3_H
 509 );
 510 
 511 // Class for 64 bit register r4
 512 reg_class r4_reg(
 513     R4, R4_H
 514 );
 515 
 516 // Class for 64 bit register r5
 517 reg_class r5_reg(
 518     R5, R5_H
 519 );
 520 
 521 // Class for 64 bit register r10
 522 reg_class r10_reg(
 523     R10, R10_H
 524 );
 525 
 526 // Class for 64 bit register r11
 527 reg_class r11_reg(
 528     R11, R11_H
 529 );
 530 
 531 // Class for method register
 532 reg_class method_reg(
 533     R12, R12_H
 534 );
 535 
 536 // Class for heapbase register
 537 reg_class heapbase_reg(
 538     R27, R27_H
 539 );
 540 
 541 // Class for thread register
 542 reg_class thread_reg(
 543     R28, R28_H
 544 );
 545 
 546 // Class for frame pointer register
 547 reg_class fp_reg(
 548     R29, R29_H
 549 );
 550 
 551 // Class for link register
 552 reg_class lr_reg(
 553     R30, R30_H
 554 );
 555 
 556 // Class for long sp register
 557 reg_class sp_reg(
 558   R31, R31_H
 559 );
 560 
 561 // Class for all pointer registers
 562 reg_class ptr_reg(
 563     R0, R0_H,
 564     R1, R1_H,
 565     R2, R2_H,
 566     R3, R3_H,
 567     R4, R4_H,
 568     R5, R5_H,
 569     R6, R6_H,
 570     R7, R7_H,
 571     R10, R10_H,
 572     R11, R11_H,
 573     R12, R12_H,
 574     R13, R13_H,
 575     R14, R14_H,
 576     R15, R15_H,
 577     R16, R16_H,
 578     R17, R17_H,
 579     R18, R18_H,
 580     R19, R19_H,
 581     R20, R20_H,
 582     R21, R21_H,
 583     R22, R22_H,
 584     R23, R23_H,
 585     R24, R24_H,
 586     R25, R25_H,
 587     R26, R26_H,
 588     R27, R27_H,
 589     R28, R28_H,
 590     R29, R29_H,
 591     R30, R30_H,
 592     R31, R31_H
 593 );
 594 
 595 // Class for all non_special pointer registers
 596 reg_class no_special_ptr_reg(
 597     R0, R0_H,
 598     R1, R1_H,
 599     R2, R2_H,
 600     R3, R3_H,
 601     R4, R4_H,
 602     R5, R5_H,
 603     R6, R6_H,
 604     R7, R7_H,
 605     R10, R10_H,
 606     R11, R11_H,
 607     R12, R12_H,
 608     R13, R13_H,
 609     R14, R14_H,
 610     R15, R15_H,
 611     R16, R16_H,
 612     R17, R17_H,
 613     R18, R18_H,
 614     R19, R19_H,
 615     R20, R20_H,
 616     R21, R21_H,
 617     R22, R22_H,
 618     R23, R23_H,
 619     R24, R24_H,
 620     R25, R25_H,
 621     R26, R26_H,
 622  /* R27, R27_H, */              // heapbase
 623  /* R28, R28_H, */              // thread
 624  /* R29, R29_H, */              // fp
 625  /* R30, R30_H, */              // lr
 626  /* R31, R31_H */               // sp
 627 );
 628 
 629 // Class for all float registers
 630 reg_class float_reg(
 631     V0,
 632     V1,
 633     V2,
 634     V3,
 635     V4,
 636     V5,
 637     V6,
 638     V7,
 639     V8,
 640     V9,
 641     V10,
 642     V11,
 643     V12,
 644     V13,
 645     V14,
 646     V15,
 647     V16,
 648     V17,
 649     V18,
 650     V19,
 651     V20,
 652     V21,
 653     V22,
 654     V23,
 655     V24,
 656     V25,
 657     V26,
 658     V27,
 659     V28,
 660     V29,
 661     V30,
 662     V31
 663 );
 664 
 665 // Double precision float registers have virtual `high halves' that
 666 // are needed by the allocator.
 667 // Class for all double registers
 668 reg_class double_reg(
 669     V0, V0_H,
 670     V1, V1_H,
 671     V2, V2_H,
 672     V3, V3_H,
 673     V4, V4_H,
 674     V5, V5_H,
 675     V6, V6_H,
 676     V7, V7_H,
 677     V8, V8_H,
 678     V9, V9_H,
 679     V10, V10_H,
 680     V11, V11_H,
 681     V12, V12_H,
 682     V13, V13_H,
 683     V14, V14_H,
 684     V15, V15_H,
 685     V16, V16_H,
 686     V17, V17_H,
 687     V18, V18_H,
 688     V19, V19_H,
 689     V20, V20_H,
 690     V21, V21_H,
 691     V22, V22_H,
 692     V23, V23_H,
 693     V24, V24_H,
 694     V25, V25_H,
 695     V26, V26_H,
 696     V27, V27_H,
 697     V28, V28_H,
 698     V29, V29_H,
 699     V30, V30_H,
 700     V31, V31_H
 701 );
 702 
 703 // Class for 128 bit register v0
 704 reg_class v0_reg(
 705     V0, V0_H
 706 );
 707 
 708 // Class for 128 bit register v1
 709 reg_class v1_reg(
 710     V1, V1_H
 711 );
 712 
 713 // Class for 128 bit register v2
 714 reg_class v2_reg(
 715     V2, V2_H
 716 );
 717 
 718 // Class for 128 bit register v3
 719 reg_class v3_reg(
 720     V3, V3_H
 721 );
 722 
 723 // Singleton class for condition codes
 724 reg_class int_flags(RFLAGS);
 725 
 726 %}
 727 
 728 //----------DEFINITION BLOCK---------------------------------------------------
 729 // Define name --> value mappings to inform the ADLC of an integer valued name
 730 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 731 // Format:
 732 //        int_def  <name>         ( <int_value>, <expression>);
 733 // Generated Code in ad_<arch>.hpp
 734 //        #define  <name>   (<expression>)
 735 //        // value == <int_value>
 736 // Generated code in ad_<arch>.cpp adlc_verification()
 737 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 738 //
 739 
 740 // we follow the ppc-aix port in using a simple cost model which ranks
 741 // register operations as cheap, memory ops as more expensive and
 742 // branches as most expensive. the first two have a low as well as a
 743 // normal cost. huge cost appears to be a way of saying don't do
 744 // something
 745 
 746 definitions %{
 747   // The default cost (of a register move instruction).
 748   int_def INSN_COST            (    100,     100);
 749   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 750   int_def CALL_COST            (    200,     2 * INSN_COST);
 751   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 752 %}
 753 
 754 
 755 //----------SOURCE BLOCK-------------------------------------------------------
 756 // This is a block of C++ code which provides values, functions, and
 757 // definitions necessary in the rest of the architecture description
 758 
 759 source_hpp %{
 760 
 761 class CallStubImpl {
 762 
 763   //--------------------------------------------------------------
 764   //---<  Used for optimization in Compile::shorten_branches  >---
 765   //--------------------------------------------------------------
 766 
 767  public:
 768   // Size of call trampoline stub.
 769   static uint size_call_trampoline() {
 770     return 0; // no call trampolines on this platform
 771   }
 772 
 773   // number of relocations needed by a call trampoline stub
 774   static uint reloc_call_trampoline() {
 775     return 0; // no call trampolines on this platform
 776   }
 777 };
 778 
 779 class HandlerImpl {
 780 
 781  public:
 782 
 783   static int emit_exception_handler(CodeBuffer &cbuf);
 784   static int emit_deopt_handler(CodeBuffer& cbuf);
 785 
 786   static uint size_exception_handler() {
 787     return MacroAssembler::far_branch_size();
 788   }
 789 
 790   static uint size_deopt_handler() {
 791     // count one adr and one far branch instruction
 792     return 4 * NativeInstruction::instruction_size;
 793   }
 794 };
 795 
 796   // graph traversal helpers
 797   MemBarNode *has_parent_membar(const Node *n,
 798                                 ProjNode *&ctl, ProjNode *&mem);
 799   MemBarNode *has_child_membar(const MemBarNode *n,
 800                                ProjNode *&ctl, ProjNode *&mem);
 801 
 802   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
 803   bool unnecessary_acquire(const Node *barrier);
 804   bool needs_acquiring_load(const Node *load);
 805 
 806   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
 807   bool unnecessary_release(const Node *barrier);
 808   bool unnecessary_volatile(const Node *barrier);
 809   bool needs_releasing_store(const Node *store);
 810 
 811   // Use barrier instructions rather than load acquire / store
 812   // release.
 813   const bool UseBarriersForVolatile = false;
 814   // Use barrier instructions for unsafe volatile gets rather than
 815   // trying to identify an exact signature for them
 816   const bool UseBarriersForUnsafeVolatileGet = false;
 817 %}
 818 
 819 source %{
 820 
 821   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
 822   // use to implement volatile reads and writes. For a volatile read
 823   // we simply need
 824   //
 825   //   ldar<x>
 826   //
 827   // and for a volatile write we need
 828   //
 829   //   stlr<x>
 830   // 
 831   // Alternatively, we can implement them by pairing a normal
 832   // load/store with a memory barrier. For a volatile read we need
 833   // 
 834   //   ldr<x>
 835   //   dmb ishld
 836   //
 837   // for a volatile write
 838   //
 839   //   dmb ish
 840   //   str<x>
 841   //   dmb ish
 842   //
 843   // In order to generate the desired instruction sequence we need to
 844   // be able to identify specific 'signature' ideal graph node
 845   // sequences which i) occur as a translation of a volatile reads or
 846   // writes and ii) do not occur through any other translation or
 847   // graph transformation. We can then provide alternative aldc
 848   // matching rules which translate these node sequences to the
 849   // desired machine code sequences. Selection of the alternative
 850   // rules can be implemented by predicates which identify the
 851   // relevant node sequences.
 852   //
 853   // The ideal graph generator translates a volatile read to the node
 854   // sequence
 855   //
 856   //   LoadX[mo_acquire]
 857   //   MemBarAcquire
 858   //
 859   // As a special case when using the compressed oops optimization we
 860   // may also see this variant
 861   //
 862   //   LoadN[mo_acquire]
 863   //   DecodeN
 864   //   MemBarAcquire
 865   //
 866   // A volatile write is translated to the node sequence
 867   //
 868   //   MemBarRelease
 869   //   StoreX[mo_release]
 870   //   MemBarVolatile
 871   //
 872   // n.b. the above node patterns are generated with a strict
 873   // 'signature' configuration of input and output dependencies (see
 874   // the predicates below for exact details). The two signatures are
 875   // unique to translated volatile reads/stores -- they will not
 876   // appear as a result of any other bytecode translation or inlining
 877   // nor as a consequence of optimizing transforms.
 878   //
 879   // We also want to catch inlined unsafe volatile gets and puts and
 880   // be able to implement them using either ldar<x>/stlr<x> or some
 881   // combination of ldr<x>/stlr<x> and dmb instructions.
 882   //
 883   // Inlined unsafe volatiles puts manifest as a minor variant of the
 884   // normal volatile put node sequence containing an extra cpuorder
 885   // membar
 886   //
 887   //   MemBarRelease
 888   //   MemBarCPUOrder
 889   //   StoreX[mo_release]
 890   //   MemBarVolatile
 891   //
 892   // n.b. as an aside, the cpuorder membar is not itself subject to
 893   // matching and translation by adlc rules.  However, the rule
 894   // predicates need to detect its presence in order to correctly
 895   // select the desired adlc rules.
 896   //
 897   // Inlined unsafe volatiles gets manifest as a somewhat different
 898   // node sequence to a normal volatile get
 899   //
 900   //   MemBarCPUOrder
 901   //        ||       \\
 902   //   MemBarAcquire LoadX[mo_acquire]
 903   //        ||
 904   //   MemBarCPUOrder
 905   //
 906   // In this case the acquire membar does not directly depend on the
 907   // load. However, we can be sure that the load is generated from an
 908   // inlined unsafe volatile get if we see it dependent on this unique
 909   // sequence of membar nodes. Similarly, given an acquire membar we
 910   // can know that it was added because of an inlined unsafe volatile
 911   // get if it is fed and feeds a cpuorder membar and if its feed
 912   // membar also feeds an acquiring load.
 913   //
 914   // So, where we can identify these volatile read and write
 915   // signatures we can choose to plant either of the above two code
 916   // sequences. For a volatile read we can simply plant a normal
 917   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
 918   // also choose to inhibit translation of the MemBarAcquire and
 919   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
 920   //
 921   // When we recognise a volatile store signature we can choose to
 922   // plant at a dmb ish as a translation for the MemBarRelease, a
 923   // normal str<x> and then a dmb ish for the MemBarVolatile.
 924   // Alternatively, we can inhibit translation of the MemBarRelease
 925   // and MemBarVolatile and instead plant a simple stlr<x>
 926   // instruction.
 927   //
 928   // Of course, the above only applies when we see these signature
 929   // configurations. We still want to plant dmb instructions in any
 930   // other cases where we may see a MemBarAcquire, MemBarRelease or
 931   // MemBarVolatile. For example, at the end of a constructor which
 932   // writes final/volatile fields we will see a MemBarRelease
 933   // instruction and this needs a 'dmb ish' lest we risk the
 934   // constructed object being visible without making the
 935   // final/volatile field writes visible.
 936   //
 937   // n.b. the translation rules below which rely on detection of the
 938   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
 939   // If we see anything other than the signature configurations we
 940   // always just translate the loads and stors to ldr<x> and str<x>
 941   // and translate acquire, release and volatile membars to the
 942   // relevant dmb instructions.
 943   //
 944   // n.b.b as a case in point for the above comment, the current
 945   // predicates don't detect the precise signature for certain types
 946   // of volatile object stores (where the heap_base input type is not
 947   // known at compile-time to be non-NULL). In those cases the
 948   // MemBarRelease and MemBarVolatile bracket an if-then-else sequence
 949   // with a store in each branch (we need a different store depending
 950   // on whether heap_base is actually NULL). In such a case we will
 951   // just plant a dmb both before and after the branch/merge. The
 952   // predicate could (and probably should) be fixed later to also
 953   // detect this case.
 954 
 955   // graph traversal helpers
 956 
 957   // if node n is linked to a parent MemBarNode by an intervening
 958   // Control or Memory ProjNode return the MemBarNode otherwise return
 959   // NULL.
 960   //
 961   // n may only be a Load or a MemBar.
 962   //
 963   // The ProjNode* references c and m are used to return the relevant
 964   // nodes.
 965 
 966   MemBarNode *has_parent_membar(const Node *n, ProjNode *&c, ProjNode *&m)
 967   {
 968     Node *ctl = NULL;
 969     Node *mem = NULL;
 970     Node *membar = NULL;
 971 
 972     if (n->is_Load()) {
 973       ctl = n->lookup(LoadNode::Control);
 974       mem = n->lookup(LoadNode::Memory);
 975     } else if (n->is_MemBar()) {
 976       ctl = n->lookup(TypeFunc::Control);
 977       mem = n->lookup(TypeFunc::Memory);
 978     } else {
 979         return NULL;
 980     }
 981 
 982     if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj())
 983       return NULL;
 984 
 985     c = ctl->as_Proj();
 986 
 987     membar = ctl->lookup(0);
 988 
 989     if (!membar || !membar->is_MemBar())
 990       return NULL;
 991 
 992     m = mem->as_Proj();
 993 
 994     if (mem->lookup(0) != membar)
 995       return NULL;
 996 
 997     return membar->as_MemBar();
 998   }
 999 
1000   // if n is linked to a child MemBarNode by intervening Control and
1001   // Memory ProjNodes return the MemBarNode otherwise return NULL.
1002   //
1003   // The ProjNode** arguments c and m are used to return pointers to
1004   // the relevant nodes. A null argument means don't don't return a
1005   // value.
1006 
1007   MemBarNode *has_child_membar(const MemBarNode *n, ProjNode *&c, ProjNode *&m)
1008   {
1009     ProjNode *ctl = n->proj_out(TypeFunc::Control);
1010     ProjNode *mem = n->proj_out(TypeFunc::Memory);
1011 
1012     // MemBar needs to have both a Ctl and Mem projection
1013     if (! ctl || ! mem)
1014       return NULL;
1015 
1016     c = ctl;
1017     m = mem;
1018 
1019     MemBarNode *child = NULL;
1020     Node *x;
1021 
1022     for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1023       x = ctl->fast_out(i);
1024       // if we see a membar we keep hold of it. we may also see a new
1025       // arena copy of the original but it will appear later
1026       if (x->is_MemBar()) {
1027           child = x->as_MemBar();
1028           break;
1029       }
1030     }
1031 
1032     if (child == NULL)
1033       return NULL;
1034 
1035     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1036       x = mem->fast_out(i);
1037       // if we see a membar we keep hold of it. we may also see a new
1038       // arena copy of the original but it will appear later
1039       if (x == child) {
1040         return child;
1041       }
1042     }
1043     return NULL;
1044   }
1045 
1046   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1047 
1048 bool unnecessary_acquire(const Node *barrier) {
1049   // assert barrier->is_MemBar();
1050   if (UseBarriersForVolatile)
1051     // we need to plant a dmb
1052     return false;
1053 
1054   // a volatile read derived from bytecode (or also from an inlined
1055   // SHA field read via LibraryCallKit::load_field_from_object)
1056   // manifests as a LoadX[mo_acquire] followed by an acquire membar
1057   // with a bogus read dependency on it's preceding load. so in those
1058   // cases we will find the load node at the PARMS offset of the
1059   // acquire membar.  n.b. there may be an intervening DecodeN node.
1060   //
1061   // a volatile load derived from an inlined unsafe field access
1062   // manifests as a cpuorder membar with Ctl and Mem projections
1063   // feeding both an acquire membar and a LoadX[mo_acquire]. The
1064   // acquire then feeds another cpuorder membar via Ctl and Mem
1065   // projections. The load has no output dependency on these trailing
1066   // membars because subsequent nodes inserted into the graph take
1067   // their control feed from the final membar cpuorder meaning they
1068   // are all ordered after the load.
1069 
1070   Node *x = barrier->lookup(TypeFunc::Parms);
1071   if (x) {
1072     // we are starting from an acquire and it has a fake dependency
1073     //
1074     // need to check for
1075     //
1076     //   LoadX[mo_acquire]
1077     //   {  |1   }
1078     //   {DecodeN}
1079     //      |Parms
1080     //   MemBarAcquire*
1081     //
1082     // where * tags node we were passed
1083     // and |k means input k
1084     if (x->is_DecodeNarrowPtr())
1085       x = x->in(1);
1086 
1087     return (x->is_Load() && x->as_Load()->is_acquire());
1088   }
1089   
1090   // only continue if we want to try to match unsafe volatile gets
1091   if (UseBarriersForUnsafeVolatileGet)
1092     return false;
1093 
1094   // need to check for
1095   //
1096   //     MemBarCPUOrder
1097   //        ||       \\
1098   //   MemBarAcquire* LoadX[mo_acquire]
1099   //        ||
1100   //   MemBarCPUOrder
1101   //
1102   // where * tags node we were passed
1103   // and || or \\ are Ctl+Mem feeds via intermediate Proj Nodes
1104 
1105   // check for a parent MemBarCPUOrder
1106   ProjNode *ctl;
1107   ProjNode *mem;
1108   MemBarNode *parent = has_parent_membar(barrier, ctl, mem);
1109   if (!parent || parent->Opcode() != Op_MemBarCPUOrder)
1110     return false;
1111   // ensure the proj nodes both feed a LoadX[mo_acquire]
1112   LoadNode *ld = NULL;
1113   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1114     x = ctl->fast_out(i);
1115     // if we see a load we keep hold of it and stop searching
1116     if (x->is_Load()) {
1117       ld = x->as_Load();
1118       break;
1119     }
1120   }
1121   // it must be an acquiring load
1122   if (! ld || ! ld->is_acquire())
1123     return false;
1124   for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1125     x = mem->fast_out(i);
1126     // if we see the same load we drop it and stop searching
1127     if (x == ld) {
1128       ld = NULL;
1129       break;
1130     }
1131   }
1132   // we must have dropped the load
1133   if (ld)
1134     return false;
1135   // check for a child cpuorder membar
1136   MemBarNode *child  = has_child_membar(barrier->as_MemBar(), ctl, mem);
1137   if (!child || child->Opcode() != Op_MemBarCPUOrder)
1138     return false;
1139 
1140   return true;
1141 }
1142 
1143 bool needs_acquiring_load(const Node *n)
1144 {
1145   // assert n->is_Load();
1146   if (UseBarriersForVolatile)
1147     // we use a normal load and a dmb
1148     return false;
1149 
1150   LoadNode *ld = n->as_Load();
1151 
1152   if (!ld->is_acquire())
1153     return false;
1154 
1155   // check if this load is feeding an acquire membar
1156   //
1157   //   LoadX[mo_acquire]
1158   //   {  |1   }
1159   //   {DecodeN}
1160   //      |Parms
1161   //   MemBarAcquire*
1162   //
1163   // where * tags node we were passed
1164   // and |k means input k
1165 
1166   Node *start = ld;
1167   Node *mbacq = NULL;
1168 
1169   // if we hit a DecodeNarrowPtr we reset the start node and restart
1170   // the search through the outputs
1171  restart:
1172 
1173   for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) {
1174     Node *x = start->fast_out(i);
1175     if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) {
1176       mbacq = x;
1177     } else if (!mbacq &&
1178                (x->is_DecodeNarrowPtr() ||
1179                 (x->is_Mach() && x->Opcode() == Op_DecodeN))) {
1180       start = x;
1181       goto restart;
1182     }
1183   }
1184 
1185   if (mbacq) {
1186     return true;
1187   }
1188 
1189   // only continue if we want to try to match unsafe volatile gets
1190   if (UseBarriersForUnsafeVolatileGet)
1191     return false;
1192 
1193   // check if Ctl and Proj feed comes from a MemBarCPUOrder
1194   //
1195   //     MemBarCPUOrder
1196   //        ||       \\
1197   //   MemBarAcquire* LoadX[mo_acquire]
1198   //        ||
1199   //   MemBarCPUOrder
1200 
1201   MemBarNode *membar;
1202   ProjNode *ctl;
1203   ProjNode *mem;
1204 
1205   membar = has_parent_membar(ld, ctl, mem);
1206 
1207   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder)
1208     return false;
1209 
1210   // ensure that there is a CPUOrder->Acquire->CPUOrder membar chain
1211 
1212   membar = has_child_membar(membar, ctl, mem);
1213 
1214   if (!membar || !membar->Opcode() == Op_MemBarAcquire)
1215     return false;
1216 
1217   membar = has_child_membar(membar, ctl, mem);
1218   
1219   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder)
1220     return false;
1221 
1222   return true;
1223 }
1224 
1225 bool unnecessary_release(const Node *n) {
1226   // assert n->is_MemBar();
1227   if (UseBarriersForVolatile)
1228     // we need to plant a dmb
1229     return false;
1230 
1231   // ok, so we can omit this release barrier if it has been inserted
1232   // as part of a volatile store sequence
1233   //
1234   //   MemBarRelease
1235   //  {      ||      }
1236   //  {MemBarCPUOrder} -- optional
1237   //         ||     \\
1238   //         ||     StoreX[mo_release]
1239   //         | \     /
1240   //         | MergeMem
1241   //         | /
1242   //   MemBarVolatile
1243   //
1244   // where
1245   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1246   //  | \ and / indicate further routing of the Ctl and Mem feeds
1247   // 
1248   // so we need to check that
1249   //
1250   // ia) the release membar (or its dependent cpuorder membar) feeds
1251   // control to a store node (via a Control project node)
1252   //
1253   // ii) the store is ordered release
1254   //
1255   // iii) the release membar (or its dependent cpuorder membar) feeds
1256   // control to a volatile membar (via the same Control project node)
1257   //
1258   // iv) the release membar feeds memory to a merge mem and to the
1259   // same store (both via a single Memory proj node)
1260   //
1261   // v) the store outputs to the merge mem
1262   //
1263   // vi) the merge mem outputs to the same volatile membar
1264   //
1265   // n.b. if this is an inlined unsafe node then the release membar
1266   // may feed its control and memory links via an intervening cpuorder
1267   // membar. this case can be dealt with when we check the release
1268   // membar projections. if they both feed a single cpuorder membar
1269   // node continue to make the same checks as above but with the
1270   // cpuorder membar substituted for the release membar. if they don't
1271   // both feed a cpuorder membar then the check fails.
1272   //
1273   // n.b.b. for an inlined unsafe store of an object in the case where
1274   // !TypePtr::NULL_PTR->higher_equal(type(heap_base_oop)) we may see
1275   // an embedded if then else where we expect the store. this is
1276   // needed to do the right type of store depending on whether
1277   // heap_base is NULL. We could check for that but for now we can
1278   // just take the hit of on inserting a redundant dmb for this
1279   // redundant volatile membar
1280 
1281   MemBarNode *barrier = n->as_MemBar();
1282   ProjNode *ctl;
1283   ProjNode *mem;
1284   // check for an intervening cpuorder membar
1285   MemBarNode *b = has_child_membar(barrier, ctl, mem);
1286   if (b && b->Opcode() == Op_MemBarCPUOrder) {
1287     // ok, so start form the dependent cpuorder barrier
1288     barrier = b;
1289   }
1290   // check the ctl and mem flow
1291   ctl = barrier->proj_out(TypeFunc::Control);
1292   mem = barrier->proj_out(TypeFunc::Memory);
1293 
1294   // the barrier needs to have both a Ctl and Mem projection
1295   if (! ctl || ! mem)
1296     return false;
1297 
1298   Node *x = NULL;
1299   Node *mbvol = NULL;
1300   StoreNode * st = NULL;
1301 
1302   // For a normal volatile write the Ctl ProjNode should have output
1303   // to a MemBarVolatile and a Store marked as releasing
1304   //
1305   // n.b. for an inlined unsafe store of an object in the case where
1306   // !TypePtr::NULL_PTR->higher_equal(type(heap_base_oop)) we may see
1307   // an embedded if then else where we expect the store. this is
1308   // needed to do the right type of store depending on whether
1309   // heap_base is NULL. We could check for that case too but for now
1310   // we can just take the hit of inserting a dmb and a non-volatile
1311   // store to implement the volatile store
1312 
1313   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1314     x = ctl->fast_out(i);
1315     if (x->is_MemBar() && x->Opcode() == Op_MemBarVolatile) {
1316       if (mbvol) {
1317         return false;
1318       }
1319       mbvol = x;
1320     } else if (x->is_Store()) {
1321       st = x->as_Store();
1322       if (! st->is_release()) {
1323         return false;
1324       }
1325     } else if (!x->is_Mach()) {
1326       // we may see mach nodes added during matching but nothing else
1327       return false;
1328     }
1329   }
1330 
1331   if (!mbvol || !st)
1332     return false;
1333 
1334   // the Mem ProjNode should output to a MergeMem and the same Store
1335   Node *mm = NULL;
1336   for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1337     x = mem->fast_out(i);
1338     if (!mm && x->is_MergeMem()) {
1339       mm = x;
1340     } else if (x != st && !x->is_Mach()) {
1341       // we may see mach nodes added during matching but nothing else
1342       return false;
1343     }
1344   }
1345 
1346   if (!mm)
1347     return false;
1348 
1349   // the MergeMem should output to the MemBarVolatile
1350   for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
1351     x = mm->fast_out(i);
1352     if (x != mbvol && !x->is_Mach()) {
1353       // we may see mach nodes added during matching but nothing else
1354       return false;
1355     }
1356   }
1357 
1358   return true;
1359 }
1360 
1361 bool unnecessary_volatile(const Node *n) {
1362   // assert n->is_MemBar();
1363   if (UseBarriersForVolatile)
1364     // we need to plant a dmb
1365     return false;
1366 
1367   // ok, so we can omit this volatile barrier if it has been inserted
1368   // as part of a volatile store sequence
1369   //
1370   //   MemBarRelease
1371   //  {      ||      }
1372   //  {MemBarCPUOrder} -- optional
1373   //         ||     \\
1374   //         ||     StoreX[mo_release]
1375   //         | \     /
1376   //         | MergeMem
1377   //         | /
1378   //   MemBarVolatile
1379   //
1380   // where
1381   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1382   //  | \ and / indicate further routing of the Ctl and Mem feeds
1383   // 
1384   // we need to check that
1385   //
1386   // i) the volatile membar gets its control feed from a release
1387   // membar (or its dependent cpuorder membar) via a Control project
1388   // node
1389   //
1390   // ii) the release membar (or its dependent cpuorder membar) also
1391   // feeds control to a store node via the same proj node
1392   //
1393   // iii) the store is ordered release
1394   //
1395   // iv) the release membar (or its dependent cpuorder membar) feeds
1396   // memory to a merge mem and to the same store (both via a single
1397   // Memory proj node)
1398   //
1399   // v) the store outputs to the merge mem
1400   //
1401   // vi) the merge mem outputs to the volatile membar
1402   //
1403   // n.b. for an inlined unsafe store of an object in the case where
1404   // !TypePtr::NULL_PTR->higher_equal(type(heap_base_oop)) we may see
1405   // an embedded if then else where we expect the store. this is
1406   // needed to do the right type of store depending on whether
1407   // heap_base is NULL. We could check for that but for now we can
1408   // just take the hit of on inserting a redundant dmb for this
1409   // redundant volatile membar
1410 
1411   MemBarNode *mbvol = n->as_MemBar();
1412   Node *x = n->lookup(TypeFunc::Control);
1413 
1414   if (! x || !x->is_Proj())
1415     return false;
1416 
1417   ProjNode *proj = x->as_Proj();
1418 
1419   x = proj->lookup(0);
1420 
1421   if (!x || !x->is_MemBar())
1422     return false;
1423 
1424   MemBarNode *barrier = x->as_MemBar();
1425 
1426   // if the barrier is a release membar we have what we want. if it is
1427   // a cpuorder membar then we need to ensure that it is fed by a
1428   // release membar in which case we proceed to check the graph below
1429   // this cpuorder membar as the feed
1430 
1431   if (x->Opcode() != Op_MemBarRelease) {
1432     if (x->Opcode() != Op_MemBarCPUOrder)
1433       return false;
1434     ProjNode *ctl;
1435     ProjNode *mem;
1436     MemBarNode *b = has_parent_membar(x, ctl, mem);
1437     if (!b || !b->Opcode() == Op_MemBarRelease)
1438       return false;
1439   }
1440 
1441   ProjNode *ctl = barrier->proj_out(TypeFunc::Control);
1442   ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
1443 
1444   // barrier needs to have both a Ctl and Mem projection
1445   // and we need to have reached it via the Ctl projection
1446   if (! ctl || ! mem || ctl != proj)
1447     return false;
1448 
1449   StoreNode * st = NULL;
1450 
1451   // The Ctl ProjNode should have output to a MemBarVolatile and
1452   // a Store marked as releasing
1453   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1454     x = ctl->fast_out(i);
1455     if (x->is_MemBar() && x->Opcode() == Op_MemBarVolatile) {
1456       if (x != mbvol) {
1457         return false;
1458       }
1459     } else if (x->is_Store()) {
1460       st = x->as_Store();
1461       if (! st->is_release()) {
1462         return false;
1463       }
1464     } else if (!x->is_Mach()){
1465       // we may see mach nodes added during matching but nothing else
1466       return false;
1467     }
1468   }
1469 
1470   if (!st)
1471     return false;
1472 
1473   // the Mem ProjNode should output to a MergeMem and the same Store
1474   Node *mm = NULL;
1475   for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1476     x = mem->fast_out(i);
1477     if (!mm && x->is_MergeMem()) {
1478       mm = x;
1479     } else if (x != st && !x->is_Mach()) {
1480       // we may see mach nodes added during matching but nothing else
1481       return false;
1482     }
1483   }
1484 
1485   if (!mm)
1486     return false;
1487 
1488   // the MergeMem should output to the MemBarVolatile
1489   for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
1490     x = mm->fast_out(i);
1491     if (x != mbvol && !x->is_Mach()) {
1492       // we may see mach nodes added during matching but nothing else
1493       return false;
1494     }
1495   }
1496 
1497   return true;
1498 }
1499 
1500 
1501 
1502 bool needs_releasing_store(const Node *n)
1503 {
1504   // assert n->is_Store();
1505   if (UseBarriersForVolatile)
1506     // we use a normal store and dmb combination
1507     return false;
1508 
1509   StoreNode *st = n->as_Store();
1510 
1511   if (!st->is_release())
1512     return false;
1513 
1514   // check if this store is bracketed by a release (or its dependent
1515   // cpuorder membar) and a volatile membar
1516   //
1517   //   MemBarRelease
1518   //  {      ||      }
1519   //  {MemBarCPUOrder} -- optional
1520   //         ||     \\
1521   //         ||     StoreX[mo_release]
1522   //         | \     /
1523   //         | MergeMem
1524   //         | /
1525   //   MemBarVolatile
1526   //
1527   // where
1528   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1529   //  | \ and / indicate further routing of the Ctl and Mem feeds
1530   // 
1531 
1532 
1533   Node *x = st->lookup(TypeFunc::Control);
1534 
1535   if (! x || !x->is_Proj())
1536     return false;
1537 
1538   ProjNode *proj = x->as_Proj();
1539 
1540   x = proj->lookup(0);
1541 
1542   if (!x || !x->is_MemBar())
1543     return false;
1544 
1545   MemBarNode *barrier = x->as_MemBar();
1546 
1547   // if the barrier is a release membar we have what we want. if it is
1548   // a cpuorder membar then we need to ensure that it is fed by a
1549   // release membar in which case we proceed to check the graph below
1550   // this cpuorder membar as the feed
1551 
1552   if (x->Opcode() != Op_MemBarRelease) {
1553     if (x->Opcode() != Op_MemBarCPUOrder)
1554       return false;
1555     Node *ctl = x->lookup(TypeFunc::Control);
1556     Node *mem = x->lookup(TypeFunc::Memory);
1557     if (!ctl || !ctl->is_Proj() || !mem || !mem->is_Proj())
1558       return false;
1559     x = ctl->lookup(0);
1560     if (!x || !x->is_MemBar() || !x->Opcode() == Op_MemBarRelease)
1561       return false;
1562     Node *y = mem->lookup(0);
1563     if (!y || y != x)
1564       return false;
1565   }
1566 
1567   ProjNode *ctl = barrier->proj_out(TypeFunc::Control);
1568   ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
1569 
1570   // MemBarRelease needs to have both a Ctl and Mem projection
1571   // and we need to have reached it via the Ctl projection
1572   if (! ctl || ! mem || ctl != proj)
1573     return false;
1574 
1575   MemBarNode *mbvol = NULL;
1576 
1577   // The Ctl ProjNode should have output to a MemBarVolatile and
1578   // a Store marked as releasing
1579   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1580     x = ctl->fast_out(i);
1581     if (x->is_MemBar() && x->Opcode() == Op_MemBarVolatile) {
1582       mbvol = x->as_MemBar();
1583     } else if (x->is_Store()) {
1584       if (x != st) {
1585         return false;
1586       }
1587     } else if (!x->is_Mach()){
1588       return false;
1589     }
1590   }
1591 
1592   if (!mbvol)
1593     return false;
1594 
1595   // the Mem ProjNode should output to a MergeMem and the same Store
1596   Node *mm = NULL;
1597   for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1598     x = mem->fast_out(i);
1599     if (!mm && x->is_MergeMem()) {
1600       mm = x;
1601     } else if (x != st && !x->is_Mach()) {
1602       return false;
1603     }
1604   }
1605 
1606   if (!mm)
1607     return false;
1608 
1609   // the MergeMem should output to the MemBarVolatile
1610   for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
1611     x = mm->fast_out(i);
1612     if (x != mbvol && !x->is_Mach()) {
1613       return false;
1614     }
1615   }
1616 
1617   return true;
1618 }
1619 
1620 
1621 
1622 #define __ _masm.
1623 
1624 // advance declarations for helper functions to convert register
1625 // indices to register objects
1626 
1627 // the ad file has to provide implementations of certain methods
1628 // expected by the generic code
1629 //
1630 // REQUIRED FUNCTIONALITY
1631 
1632 //=============================================================================
1633 
1634 // !!!!! Special hack to get all types of calls to specify the byte offset
1635 //       from the start of the call to the point where the return address
1636 //       will point.
1637 
1638 int MachCallStaticJavaNode::ret_addr_offset()
1639 {
1640   // call should be a simple bl
1641   // unless this is a method handle invoke in which case it is
1642   // mov(rfp, sp), bl, mov(sp, rfp)
1643   int off = 4;
1644   if (_method_handle_invoke) {
1645     off += 4;
1646   }
1647   return off;
1648 }
1649 
1650 int MachCallDynamicJavaNode::ret_addr_offset()
1651 {
1652   return 16; // movz, movk, movk, bl
1653 }
1654 
1655 int MachCallRuntimeNode::ret_addr_offset() {
1656   // for generated stubs the call will be
1657   //   far_call(addr)
1658   // for real runtime callouts it will be six instructions
1659   // see aarch64_enc_java_to_runtime
1660   //   adr(rscratch2, retaddr)
1661   //   lea(rscratch1, RuntimeAddress(addr)
1662   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
1663   //   blrt rscratch1
1664   CodeBlob *cb = CodeCache::find_blob(_entry_point);
1665   if (cb) {
1666     return MacroAssembler::far_branch_size();
1667   } else {
1668     return 6 * NativeInstruction::instruction_size;
1669   }
1670 }
1671 
1672 // Indicate if the safepoint node needs the polling page as an input
1673 
1674 // the shared code plants the oop data at the start of the generated
1675 // code for the safepoint node and that needs ot be at the load
1676 // instruction itself. so we cannot plant a mov of the safepoint poll
1677 // address followed by a load. setting this to true means the mov is
1678 // scheduled as a prior instruction. that's better for scheduling
1679 // anyway.
1680 
1681 bool SafePointNode::needs_polling_address_input()
1682 {
1683   return true;
1684 }
1685 
1686 //=============================================================================
1687 
1688 #ifndef PRODUCT
1689 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1690   st->print("BREAKPOINT");
1691 }
1692 #endif
1693 
1694 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1695   MacroAssembler _masm(&cbuf);
1696   __ brk(0);
1697 }
1698 
1699 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
1700   return MachNode::size(ra_);
1701 }
1702 
1703 //=============================================================================
1704 
1705 #ifndef PRODUCT
1706   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
1707     st->print("nop \t# %d bytes pad for loops and calls", _count);
1708   }
1709 #endif
1710 
1711   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
1712     MacroAssembler _masm(&cbuf);
1713     for (int i = 0; i < _count; i++) {
1714       __ nop();
1715     }
1716   }
1717 
1718   uint MachNopNode::size(PhaseRegAlloc*) const {
1719     return _count * NativeInstruction::instruction_size;
1720   }
1721 
1722 //=============================================================================
1723 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
1724 
1725 int Compile::ConstantTable::calculate_table_base_offset() const {
1726   return 0;  // absolute addressing, no offset
1727 }
1728 
1729 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1730 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1731   ShouldNotReachHere();
1732 }
1733 
1734 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1735   // Empty encoding
1736 }
1737 
1738 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1739   return 0;
1740 }
1741 
1742 #ifndef PRODUCT
1743 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1744   st->print("-- \t// MachConstantBaseNode (empty encoding)");
1745 }
1746 #endif
1747 
1748 #ifndef PRODUCT
1749 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1750   Compile* C = ra_->C;
1751 
1752   int framesize = C->frame_slots() << LogBytesPerInt;
1753 
1754   if (C->need_stack_bang(framesize))
1755     st->print("# stack bang size=%d\n\t", framesize);
1756 
1757   if (framesize == 0) {
1758     // Is this even possible?
1759     st->print("stp  lr, rfp, [sp, #%d]!", -(2 * wordSize));
1760   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
1761     st->print("sub  sp, sp, #%d\n\t", framesize);
1762     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
1763   } else {
1764     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
1765     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1766     st->print("sub  sp, sp, rscratch1");
1767   }
1768 }
1769 #endif
1770 
1771 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1772   Compile* C = ra_->C;
1773   MacroAssembler _masm(&cbuf);
1774 
1775   // n.b. frame size includes space for return pc and rfp
1776   const long framesize = C->frame_size_in_bytes();
1777   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
1778 
1779   // insert a nop at the start of the prolog so we can patch in a
1780   // branch if we need to invalidate the method later
1781   __ nop();
1782 
1783   int bangsize = C->bang_size_in_bytes();
1784   if (C->need_stack_bang(bangsize) && UseStackBanging)
1785     __ generate_stack_overflow_check(bangsize);
1786 
1787   __ build_frame(framesize);
1788 
1789   if (NotifySimulator) {
1790     __ notify(Assembler::method_entry);
1791   }
1792 
1793   if (VerifyStackAtCalls) {
1794     Unimplemented();
1795   }
1796 
1797   C->set_frame_complete(cbuf.insts_size());
1798 
1799   if (C->has_mach_constant_base_node()) {
1800     // NOTE: We set the table base offset here because users might be
1801     // emitted before MachConstantBaseNode.
1802     Compile::ConstantTable& constant_table = C->constant_table();
1803     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1804   }
1805 }
1806 
1807 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
1808 {
1809   return MachNode::size(ra_); // too many variables; just compute it
1810                               // the hard way
1811 }
1812 
1813 int MachPrologNode::reloc() const
1814 {
1815   return 0;
1816 }
1817 
1818 //=============================================================================
1819 
1820 #ifndef PRODUCT
1821 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1822   Compile* C = ra_->C;
1823   int framesize = C->frame_slots() << LogBytesPerInt;
1824 
1825   st->print("# pop frame %d\n\t",framesize);
1826 
1827   if (framesize == 0) {
1828     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1829   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
1830     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
1831     st->print("add  sp, sp, #%d\n\t", framesize);
1832   } else {
1833     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1834     st->print("add  sp, sp, rscratch1\n\t");
1835     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1836   }
1837 
1838   if (do_polling() && C->is_method_compilation()) {
1839     st->print("# touch polling page\n\t");
1840     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
1841     st->print("ldr zr, [rscratch1]");
1842   }
1843 }
1844 #endif
1845 
1846 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1847   Compile* C = ra_->C;
1848   MacroAssembler _masm(&cbuf);
1849   int framesize = C->frame_slots() << LogBytesPerInt;
1850 
1851   __ remove_frame(framesize);
1852 
1853   if (NotifySimulator) {
1854     __ notify(Assembler::method_reentry);
1855   }
1856 
1857   if (do_polling() && C->is_method_compilation()) {
1858     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
1859   }
1860 }
1861 
1862 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1863   // Variable size. Determine dynamically.
1864   return MachNode::size(ra_);
1865 }
1866 
1867 int MachEpilogNode::reloc() const {
1868   // Return number of relocatable values contained in this instruction.
1869   return 1; // 1 for polling page.
1870 }
1871 
1872 const Pipeline * MachEpilogNode::pipeline() const {
1873   return MachNode::pipeline_class();
1874 }
1875 
1876 // This method seems to be obsolete. It is declared in machnode.hpp
1877 // and defined in all *.ad files, but it is never called. Should we
1878 // get rid of it?
1879 int MachEpilogNode::safepoint_offset() const {
1880   assert(do_polling(), "no return for this epilog node");
1881   return 4;
1882 }
1883 
1884 //=============================================================================
1885 
1886 // Figure out which register class each belongs in: rc_int, rc_float or
1887 // rc_stack.
1888 enum RC { rc_bad, rc_int, rc_float, rc_stack };
1889 
1890 static enum RC rc_class(OptoReg::Name reg) {
1891 
1892   if (reg == OptoReg::Bad) {
1893     return rc_bad;
1894   }
1895 
1896   // we have 30 int registers * 2 halves
1897   // (rscratch1 and rscratch2 are omitted)
1898 
1899   if (reg < 60) {
1900     return rc_int;
1901   }
1902 
1903   // we have 32 float register * 2 halves
1904   if (reg < 60 + 64) {
1905     return rc_float;
1906   }
1907 
1908   // Between float regs & stack is the flags regs.
1909   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
1910 
1911   return rc_stack;
1912 }
1913 
1914 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
1915   Compile* C = ra_->C;
1916 
1917   // Get registers to move.
1918   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
1919   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
1920   OptoReg::Name dst_hi = ra_->get_reg_second(this);
1921   OptoReg::Name dst_lo = ra_->get_reg_first(this);
1922 
1923   enum RC src_hi_rc = rc_class(src_hi);
1924   enum RC src_lo_rc = rc_class(src_lo);
1925   enum RC dst_hi_rc = rc_class(dst_hi);
1926   enum RC dst_lo_rc = rc_class(dst_lo);
1927 
1928   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
1929 
1930   if (src_hi != OptoReg::Bad) {
1931     assert((src_lo&1)==0 && src_lo+1==src_hi &&
1932            (dst_lo&1)==0 && dst_lo+1==dst_hi,
1933            "expected aligned-adjacent pairs");
1934   }
1935 
1936   if (src_lo == dst_lo && src_hi == dst_hi) {
1937     return 0;            // Self copy, no move.
1938   }
1939 
1940   switch (src_lo_rc) {
1941   case rc_int:
1942     if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
1943       if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
1944           (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
1945           // 64 bit
1946         if (cbuf) {
1947           MacroAssembler _masm(cbuf);
1948           __ mov(as_Register(Matcher::_regEncode[dst_lo]),
1949                  as_Register(Matcher::_regEncode[src_lo]));
1950         } else if (st) {
1951           st->print("mov  %s, %s\t# shuffle",
1952                     Matcher::regName[dst_lo],
1953                     Matcher::regName[src_lo]);
1954         }
1955       } else {
1956         // 32 bit
1957         if (cbuf) {
1958           MacroAssembler _masm(cbuf);
1959           __ movw(as_Register(Matcher::_regEncode[dst_lo]),
1960                   as_Register(Matcher::_regEncode[src_lo]));
1961         } else if (st) {
1962           st->print("movw  %s, %s\t# shuffle",
1963                     Matcher::regName[dst_lo],
1964                     Matcher::regName[src_lo]);
1965         }
1966       }
1967     } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
1968       if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
1969           (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
1970           // 64 bit
1971         if (cbuf) {
1972           MacroAssembler _masm(cbuf);
1973           __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1974                    as_Register(Matcher::_regEncode[src_lo]));
1975         } else if (st) {
1976           st->print("fmovd  %s, %s\t# shuffle",
1977                     Matcher::regName[dst_lo],
1978                     Matcher::regName[src_lo]);
1979         }
1980       } else {
1981         // 32 bit
1982         if (cbuf) {
1983           MacroAssembler _masm(cbuf);
1984           __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1985                    as_Register(Matcher::_regEncode[src_lo]));
1986         } else if (st) {
1987           st->print("fmovs  %s, %s\t# shuffle",
1988                     Matcher::regName[dst_lo],
1989                     Matcher::regName[src_lo]);
1990         }
1991       }
1992     } else {                    // gpr --> stack spill
1993       assert(dst_lo_rc == rc_stack, "spill to bad register class");
1994       int dst_offset = ra_->reg2offset(dst_lo);
1995       if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
1996           (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
1997           // 64 bit
1998         if (cbuf) {
1999           MacroAssembler _masm(cbuf);
2000           __ str(as_Register(Matcher::_regEncode[src_lo]),
2001                  Address(sp, dst_offset));
2002         } else if (st) {
2003           st->print("str  %s, [sp, #%d]\t# spill",
2004                     Matcher::regName[src_lo],
2005                     dst_offset);
2006         }
2007       } else {
2008         // 32 bit
2009         if (cbuf) {
2010           MacroAssembler _masm(cbuf);
2011           __ strw(as_Register(Matcher::_regEncode[src_lo]),
2012                  Address(sp, dst_offset));
2013         } else if (st) {
2014           st->print("strw  %s, [sp, #%d]\t# spill",
2015                     Matcher::regName[src_lo],
2016                     dst_offset);
2017         }
2018       }
2019     }
2020     return 4;
2021   case rc_float:
2022     if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
2023       if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
2024           (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
2025           // 64 bit
2026         if (cbuf) {
2027           MacroAssembler _masm(cbuf);
2028           __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
2029                    as_FloatRegister(Matcher::_regEncode[src_lo]));
2030         } else if (st) {
2031           st->print("fmovd  %s, %s\t# shuffle",
2032                     Matcher::regName[dst_lo],
2033                     Matcher::regName[src_lo]);
2034         }
2035       } else {
2036         // 32 bit
2037         if (cbuf) {
2038           MacroAssembler _masm(cbuf);
2039           __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
2040                    as_FloatRegister(Matcher::_regEncode[src_lo]));
2041         } else if (st) {
2042           st->print("fmovs  %s, %s\t# shuffle",
2043                     Matcher::regName[dst_lo],
2044                     Matcher::regName[src_lo]);
2045         }
2046       }
2047     } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
2048       if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
2049           (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
2050           // 64 bit
2051         if (cbuf) {
2052           MacroAssembler _masm(cbuf);
2053           __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2054                    as_FloatRegister(Matcher::_regEncode[src_lo]));
2055         } else if (st) {
2056           st->print("fmovd  %s, %s\t# shuffle",
2057                     Matcher::regName[dst_lo],
2058                     Matcher::regName[src_lo]);
2059         }
2060       } else {
2061         // 32 bit
2062         if (cbuf) {
2063           MacroAssembler _masm(cbuf);
2064           __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2065                    as_FloatRegister(Matcher::_regEncode[src_lo]));
2066         } else if (st) {
2067           st->print("fmovs  %s, %s\t# shuffle",
2068                     Matcher::regName[dst_lo],
2069                     Matcher::regName[src_lo]);
2070         }
2071       }
2072     } else {                    // fpr --> stack spill
2073       assert(dst_lo_rc == rc_stack, "spill to bad register class");
2074       int dst_offset = ra_->reg2offset(dst_lo);
2075       if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
2076           (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
2077           // 64 bit
2078         if (cbuf) {
2079           MacroAssembler _masm(cbuf);
2080           __ strd(as_FloatRegister(Matcher::_regEncode[src_lo]),
2081                  Address(sp, dst_offset));
2082         } else if (st) {
2083           st->print("strd  %s, [sp, #%d]\t# spill",
2084                     Matcher::regName[src_lo],
2085                     dst_offset);
2086         }
2087       } else {
2088         // 32 bit
2089         if (cbuf) {
2090           MacroAssembler _masm(cbuf);
2091           __ strs(as_FloatRegister(Matcher::_regEncode[src_lo]),
2092                  Address(sp, dst_offset));
2093         } else if (st) {
2094           st->print("strs  %s, [sp, #%d]\t# spill",
2095                     Matcher::regName[src_lo],
2096                     dst_offset);
2097         }
2098       }
2099     }
2100     return 4;
2101   case rc_stack:
2102     int src_offset = ra_->reg2offset(src_lo);
2103     if (dst_lo_rc == rc_int) {  // stack --> gpr load
2104       if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
2105           (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
2106           // 64 bit
2107         if (cbuf) {
2108           MacroAssembler _masm(cbuf);
2109           __ ldr(as_Register(Matcher::_regEncode[dst_lo]),
2110                  Address(sp, src_offset));
2111         } else if (st) {
2112           st->print("ldr  %s, [sp, %d]\t# restore",
2113                     Matcher::regName[dst_lo],
2114                     src_offset);
2115         }
2116       } else {
2117         // 32 bit
2118         if (cbuf) {
2119           MacroAssembler _masm(cbuf);
2120           __ ldrw(as_Register(Matcher::_regEncode[dst_lo]),
2121                   Address(sp, src_offset));
2122         } else if (st) {
2123           st->print("ldr  %s, [sp, %d]\t# restore",
2124                     Matcher::regName[dst_lo],
2125                    src_offset);
2126         }
2127       }
2128       return 4;
2129     } else if (dst_lo_rc == rc_float) { // stack --> fpr load
2130       if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
2131           (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
2132           // 64 bit
2133         if (cbuf) {
2134           MacroAssembler _masm(cbuf);
2135           __ ldrd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2136                  Address(sp, src_offset));
2137         } else if (st) {
2138           st->print("ldrd  %s, [sp, %d]\t# restore",
2139                     Matcher::regName[dst_lo],
2140                     src_offset);
2141         }
2142       } else {
2143         // 32 bit
2144         if (cbuf) {
2145           MacroAssembler _masm(cbuf);
2146           __ ldrs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2147                   Address(sp, src_offset));
2148         } else if (st) {
2149           st->print("ldrs  %s, [sp, %d]\t# restore",
2150                     Matcher::regName[dst_lo],
2151                    src_offset);
2152         }
2153       }
2154       return 4;
2155     } else {                    // stack --> stack copy
2156       assert(dst_lo_rc == rc_stack, "spill to bad register class");
2157       int dst_offset = ra_->reg2offset(dst_lo);
2158       if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
2159           (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
2160           // 64 bit
2161         if (cbuf) {
2162           MacroAssembler _masm(cbuf);
2163           __ ldr(rscratch1, Address(sp, src_offset));
2164           __ str(rscratch1, Address(sp, dst_offset));
2165         } else if (st) {
2166           st->print("ldr  rscratch1, [sp, %d]\t# mem-mem spill",
2167                     src_offset);
2168           st->print("\n\t");
2169           st->print("str  rscratch1, [sp, %d]",
2170                     dst_offset);
2171         }
2172       } else {
2173         // 32 bit
2174         if (cbuf) {
2175           MacroAssembler _masm(cbuf);
2176           __ ldrw(rscratch1, Address(sp, src_offset));
2177           __ strw(rscratch1, Address(sp, dst_offset));
2178         } else if (st) {
2179           st->print("ldrw  rscratch1, [sp, %d]\t# mem-mem spill",
2180                     src_offset);
2181           st->print("\n\t");
2182           st->print("strw  rscratch1, [sp, %d]",
2183                     dst_offset);
2184         }
2185       }
2186       return 8;
2187     }
2188   }
2189 
2190   assert(false," bad rc_class for spill ");
2191   Unimplemented();
2192   return 0;
2193 
2194 }
2195 
2196 #ifndef PRODUCT
2197 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2198   if (!ra_)
2199     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
2200   else
2201     implementation(NULL, ra_, false, st);
2202 }
2203 #endif
2204 
2205 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2206   implementation(&cbuf, ra_, false, NULL);
2207 }
2208 
2209 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
2210   return implementation(NULL, ra_, true, NULL);
2211 }
2212 
2213 //=============================================================================
2214 
2215 #ifndef PRODUCT
2216 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2217   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
2218   int reg = ra_->get_reg_first(this);
2219   st->print("add %s, rsp, #%d]\t# box lock",
2220             Matcher::regName[reg], offset);
2221 }
2222 #endif
2223 
2224 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2225   MacroAssembler _masm(&cbuf);
2226 
2227   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
2228   int reg    = ra_->get_encode(this);
2229 
2230   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
2231     __ add(as_Register(reg), sp, offset);
2232   } else {
2233     ShouldNotReachHere();
2234   }
2235 }
2236 
2237 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
2238   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
2239   return 4;
2240 }
2241 
2242 //=============================================================================
2243 
2244 #ifndef PRODUCT
2245 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
2246 {
2247   st->print_cr("# MachUEPNode");
2248   if (UseCompressedClassPointers) {
2249     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
2250     if (Universe::narrow_klass_shift() != 0) {
2251       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
2252     }
2253   } else {
2254    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
2255   }
2256   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
2257   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
2258 }
2259 #endif
2260 
2261 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
2262 {
2263   // This is the unverified entry point.
2264   MacroAssembler _masm(&cbuf);
2265 
2266   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
2267   Label skip;
2268   // TODO
2269   // can we avoid this skip and still use a reloc?
2270   __ br(Assembler::EQ, skip);
2271   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
2272   __ bind(skip);
2273 }
2274 
2275 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
2276 {
2277   return MachNode::size(ra_);
2278 }
2279 
2280 // REQUIRED EMIT CODE
2281 
2282 //=============================================================================
2283 
2284 // Emit exception handler code.
2285 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
2286 {
2287   // mov rscratch1 #exception_blob_entry_point
2288   // br rscratch1
2289   // Note that the code buffer's insts_mark is always relative to insts.
2290   // That's why we must use the macroassembler to generate a handler.
2291   MacroAssembler _masm(&cbuf);
2292   address base =
2293   __ start_a_stub(size_exception_handler());
2294   if (base == NULL)  return 0;  // CodeBuffer::expand failed
2295   int offset = __ offset();
2296   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
2297   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
2298   __ end_a_stub();
2299   return offset;
2300 }
2301 
2302 // Emit deopt handler code.
2303 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
2304 {
2305   // Note that the code buffer's insts_mark is always relative to insts.
2306   // That's why we must use the macroassembler to generate a handler.
2307   MacroAssembler _masm(&cbuf);
2308   address base =
2309   __ start_a_stub(size_deopt_handler());
2310   if (base == NULL)  return 0;  // CodeBuffer::expand failed
2311   int offset = __ offset();
2312 
2313   __ adr(lr, __ pc());
2314   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
2315 
2316   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
2317   __ end_a_stub();
2318   return offset;
2319 }
2320 
2321 // REQUIRED MATCHER CODE
2322 
2323 //=============================================================================
2324 
2325 const bool Matcher::match_rule_supported(int opcode) {
2326 
2327   // TODO
2328   // identify extra cases that we might want to provide match rules for
2329   // e.g. Op_StrEquals and other intrinsics
2330   if (!has_match_rule(opcode)) {
2331     return false;
2332   }
2333 
2334   return true;  // Per default match rules are supported.
2335 }
2336 
2337 int Matcher::regnum_to_fpu_offset(int regnum)
2338 {
2339   Unimplemented();
2340   return 0;
2341 }
2342 
2343 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset)
2344 {
2345   Unimplemented();
2346   return false;
2347 }
2348 
2349 const bool Matcher::isSimpleConstant64(jlong value) {
2350   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
2351   // Probably always true, even if a temp register is required.
2352   return true;
2353 }
2354 
2355 // true just means we have fast l2f conversion
2356 const bool Matcher::convL2FSupported(void) {
2357   return true;
2358 }
2359 
2360 // Vector width in bytes.
2361 const int Matcher::vector_width_in_bytes(BasicType bt) {
2362   // TODO fixme
2363   return 0;
2364 }
2365 
2366 // Limits on vector size (number of elements) loaded into vector.
2367 const int Matcher::max_vector_size(const BasicType bt) {
2368   return vector_width_in_bytes(bt)/type2aelembytes(bt);
2369 }
2370 const int Matcher::min_vector_size(const BasicType bt) {
2371   int max_size = max_vector_size(bt);
2372   // Min size which can be loaded into vector is 4 bytes.
2373   int size = (type2aelembytes(bt) == 1) ? 4 : 2;
2374   return MIN2(size,max_size);
2375 }
2376 
2377 // Vector ideal reg.
2378 const int Matcher::vector_ideal_reg(int len) {
2379   // TODO fixme
2380   return Op_RegD;
2381 }
2382 
2383 // Only lowest bits of xmm reg are used for vector shift count.
2384 const int Matcher::vector_shift_count_ideal_reg(int size) {
2385   // TODO fixme
2386   return Op_RegL;
2387 }
2388 
2389 // AES support not yet implemented
2390 const bool Matcher::pass_original_key_for_aes() {
2391   return false;
2392 }
2393 
2394 // x86 supports misaligned vectors store/load.
2395 const bool Matcher::misaligned_vectors_ok() {
2396   // TODO fixme
2397   // return !AlignVector; // can be changed by flag
2398   return false;
2399 }
2400 
2401 // false => size gets scaled to BytesPerLong, ok.
2402 const bool Matcher::init_array_count_is_in_bytes = false;
2403 
2404 // Threshold size for cleararray.
2405 const int Matcher::init_array_short_size = 18 * BytesPerLong;
2406 
2407 // Use conditional move (CMOVL)
2408 const int Matcher::long_cmove_cost() {
2409   // long cmoves are no more expensive than int cmoves
2410   return 0;
2411 }
2412 
2413 const int Matcher::float_cmove_cost() {
2414   // float cmoves are no more expensive than int cmoves
2415   return 0;
2416 }
2417 
2418 // Does the CPU require late expand (see block.cpp for description of late expand)?
2419 const bool Matcher::require_postalloc_expand = false;
2420 
2421 // Should the Matcher clone shifts on addressing modes, expecting them
2422 // to be subsumed into complex addressing expressions or compute them
2423 // into registers?  True for Intel but false for most RISCs
2424 const bool Matcher::clone_shift_expressions = false;
2425 
2426 // Do we need to mask the count passed to shift instructions or does
2427 // the cpu only look at the lower 5/6 bits anyway?
2428 const bool Matcher::need_masked_shift_count = false;
2429 
2430 // This affects two different things:
2431 //  - how Decode nodes are matched
2432 //  - how ImplicitNullCheck opportunities are recognized
2433 // If true, the matcher will try to remove all Decodes and match them
2434 // (as operands) into nodes. NullChecks are not prepared to deal with
2435 // Decodes by final_graph_reshaping().
2436 // If false, final_graph_reshaping() forces the decode behind the Cmp
2437 // for a NullCheck. The matcher matches the Decode node into a register.
2438 // Implicit_null_check optimization moves the Decode along with the
2439 // memory operation back up before the NullCheck.
2440 bool Matcher::narrow_oop_use_complex_address() {
2441   return Universe::narrow_oop_shift() == 0;
2442 }
2443 
2444 bool Matcher::narrow_klass_use_complex_address() {
2445 // TODO
2446 // decide whether we need to set this to true
2447   return false;
2448 }
2449 
2450 // Is it better to copy float constants, or load them directly from
2451 // memory?  Intel can load a float constant from a direct address,
2452 // requiring no extra registers.  Most RISCs will have to materialize
2453 // an address into a register first, so they would do better to copy
2454 // the constant from stack.
2455 const bool Matcher::rematerialize_float_constants = false;
2456 
2457 // If CPU can load and store mis-aligned doubles directly then no
2458 // fixup is needed.  Else we split the double into 2 integer pieces
2459 // and move it piece-by-piece.  Only happens when passing doubles into
2460 // C code as the Java calling convention forces doubles to be aligned.
2461 const bool Matcher::misaligned_doubles_ok = true;
2462 
2463 // No-op on amd64
2464 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
2465   Unimplemented();
2466 }
2467 
2468 // Advertise here if the CPU requires explicit rounding operations to
2469 // implement the UseStrictFP mode.
2470 const bool Matcher::strict_fp_requires_explicit_rounding = false;
2471 
2472 // Are floats converted to double when stored to stack during
2473 // deoptimization?
2474 bool Matcher::float_in_double() { return true; }
2475 
2476 // Do ints take an entire long register or just half?
2477 // The relevant question is how the int is callee-saved:
2478 // the whole long is written but de-opt'ing will have to extract
2479 // the relevant 32 bits.
2480 const bool Matcher::int_in_long = true;
2481 
2482 // Return whether or not this register is ever used as an argument.
2483 // This function is used on startup to build the trampoline stubs in
2484 // generateOptoStub.  Registers not mentioned will be killed by the VM
2485 // call in the trampoline, and arguments in those registers not be
2486 // available to the callee.
2487 bool Matcher::can_be_java_arg(int reg)
2488 {
2489   return
2490     reg ==  R0_num || reg == R0_H_num ||
2491     reg ==  R1_num || reg == R1_H_num ||
2492     reg ==  R2_num || reg == R2_H_num ||
2493     reg ==  R3_num || reg == R3_H_num ||
2494     reg ==  R4_num || reg == R4_H_num ||
2495     reg ==  R5_num || reg == R5_H_num ||
2496     reg ==  R6_num || reg == R6_H_num ||
2497     reg ==  R7_num || reg == R7_H_num ||
2498     reg ==  V0_num || reg == V0_H_num ||
2499     reg ==  V1_num || reg == V1_H_num ||
2500     reg ==  V2_num || reg == V2_H_num ||
2501     reg ==  V3_num || reg == V3_H_num ||
2502     reg ==  V4_num || reg == V4_H_num ||
2503     reg ==  V5_num || reg == V5_H_num ||
2504     reg ==  V6_num || reg == V6_H_num ||
2505     reg ==  V7_num || reg == V7_H_num;
2506 }
2507 
2508 bool Matcher::is_spillable_arg(int reg)
2509 {
2510   return can_be_java_arg(reg);
2511 }
2512 
2513 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
2514   return false;
2515 }
2516 
2517 RegMask Matcher::divI_proj_mask() {
2518   ShouldNotReachHere();
2519   return RegMask();
2520 }
2521 
2522 // Register for MODI projection of divmodI.
2523 RegMask Matcher::modI_proj_mask() {
2524   ShouldNotReachHere();
2525   return RegMask();
2526 }
2527 
2528 // Register for DIVL projection of divmodL.
2529 RegMask Matcher::divL_proj_mask() {
2530   ShouldNotReachHere();
2531   return RegMask();
2532 }
2533 
2534 // Register for MODL projection of divmodL.
2535 RegMask Matcher::modL_proj_mask() {
2536   ShouldNotReachHere();
2537   return RegMask();
2538 }
2539 
2540 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
2541   return RegMask();
2542 }
2543 
2544 // helper for encoding java_to_runtime calls on sim
2545 //
2546 // this is needed to compute the extra arguments required when
2547 // planting a call to the simulator blrt instruction. the TypeFunc
2548 // can be queried to identify the counts for integral, and floating
2549 // arguments and the return type
2550 
2551 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
2552 {
2553   int gps = 0;
2554   int fps = 0;
2555   const TypeTuple *domain = tf->domain();
2556   int max = domain->cnt();
2557   for (int i = TypeFunc::Parms; i < max; i++) {
2558     const Type *t = domain->field_at(i);
2559     switch(t->basic_type()) {
2560     case T_FLOAT:
2561     case T_DOUBLE:
2562       fps++;
2563     default:
2564       gps++;
2565     }
2566   }
2567   gpcnt = gps;
2568   fpcnt = fps;
2569   BasicType rt = tf->return_type();
2570   switch (rt) {
2571   case T_VOID:
2572     rtype = MacroAssembler::ret_type_void;
2573     break;
2574   default:
2575     rtype = MacroAssembler::ret_type_integral;
2576     break;
2577   case T_FLOAT:
2578     rtype = MacroAssembler::ret_type_float;
2579     break;
2580   case T_DOUBLE:
2581     rtype = MacroAssembler::ret_type_double;
2582     break;
2583   }
2584 }
2585 
2586 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
2587   MacroAssembler _masm(&cbuf);                                          \
2588   {                                                                     \
2589     guarantee(INDEX == -1, "mode not permitted for volatile");          \
2590     guarantee(DISP == 0, "mode not permitted for volatile");            \
2591     guarantee(SCALE == 0, "mode not permitted for volatile");           \
2592     __ INSN(REG, as_Register(BASE));                                    \
2593   }
2594 
2595 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
2596 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
2597 
2598   // Used for all non-volatile memory accesses.  The use of
2599   // $mem->opcode() to discover whether this pattern uses sign-extended
2600   // offsets is something of a kludge.
2601   static void loadStore(MacroAssembler masm, mem_insn insn,
2602                          Register reg, int opcode,
2603                          Register base, int index, int size, int disp)
2604   {
2605     Address::extend scale;
2606 
2607     // Hooboy, this is fugly.  We need a way to communicate to the
2608     // encoder that the index needs to be sign extended, so we have to
2609     // enumerate all the cases.
2610     switch (opcode) {
2611     case INDINDEXSCALEDOFFSETI2L:
2612     case INDINDEXSCALEDI2L:
2613     case INDINDEXSCALEDOFFSETI2LN:
2614     case INDINDEXSCALEDI2LN:
2615       scale = Address::sxtw(size);
2616       break;
2617     default:
2618       scale = Address::lsl(size);
2619     }
2620 
2621     if (index == -1) {
2622       (masm.*insn)(reg, Address(base, disp));
2623     } else {
2624       if (disp == 0) {
2625         (masm.*insn)(reg, Address(base, as_Register(index), scale));
2626       } else {
2627         masm.lea(rscratch1, Address(base, disp));
2628         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
2629       }
2630     }
2631   }
2632 
2633   static void loadStore(MacroAssembler masm, mem_float_insn insn,
2634                          FloatRegister reg, int opcode,
2635                          Register base, int index, int size, int disp)
2636   {
2637     Address::extend scale;
2638 
2639     switch (opcode) {
2640     case INDINDEXSCALEDOFFSETI2L:
2641     case INDINDEXSCALEDI2L:
2642     case INDINDEXSCALEDOFFSETI2LN:
2643     case INDINDEXSCALEDI2LN:
2644       scale = Address::sxtw(size);
2645       break;
2646     default:
2647       scale = Address::lsl(size);
2648     }
2649 
2650      if (index == -1) {
2651       (masm.*insn)(reg, Address(base, disp));
2652     } else {
2653       if (disp == 0) {
2654         (masm.*insn)(reg, Address(base, as_Register(index), scale));
2655       } else {
2656         masm.lea(rscratch1, Address(base, disp));
2657         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
2658       }
2659     }
2660   }
2661 
2662 %}
2663 
2664 
2665 
2666 //----------ENCODING BLOCK-----------------------------------------------------
2667 // This block specifies the encoding classes used by the compiler to
2668 // output byte streams.  Encoding classes are parameterized macros
2669 // used by Machine Instruction Nodes in order to generate the bit
2670 // encoding of the instruction.  Operands specify their base encoding
2671 // interface with the interface keyword.  There are currently
2672 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
2673 // COND_INTER.  REG_INTER causes an operand to generate a function
2674 // which returns its register number when queried.  CONST_INTER causes
2675 // an operand to generate a function which returns the value of the
2676 // constant when queried.  MEMORY_INTER causes an operand to generate
2677 // four functions which return the Base Register, the Index Register,
2678 // the Scale Value, and the Offset Value of the operand when queried.
2679 // COND_INTER causes an operand to generate six functions which return
2680 // the encoding code (ie - encoding bits for the instruction)
2681 // associated with each basic boolean condition for a conditional
2682 // instruction.
2683 //
2684 // Instructions specify two basic values for encoding.  Again, a
2685 // function is available to check if the constant displacement is an
2686 // oop. They use the ins_encode keyword to specify their encoding
2687 // classes (which must be a sequence of enc_class names, and their
2688 // parameters, specified in the encoding block), and they use the
2689 // opcode keyword to specify, in order, their primary, secondary, and
2690 // tertiary opcode.  Only the opcode sections which a particular
2691 // instruction needs for encoding need to be specified.
2692 encode %{
2693   // Build emit functions for each basic byte or larger field in the
2694   // intel encoding scheme (opcode, rm, sib, immediate), and call them
2695   // from C++ code in the enc_class source block.  Emit functions will
2696   // live in the main source block for now.  In future, we can
2697   // generalize this by adding a syntax that specifies the sizes of
2698   // fields in an order, so that the adlc can build the emit functions
2699   // automagically
2700 
2701   // catch all for unimplemented encodings
2702   enc_class enc_unimplemented %{
2703     MacroAssembler _masm(&cbuf);
2704     __ unimplemented("C2 catch all");
2705   %}
2706 
2707   // BEGIN Non-volatile memory access
2708 
2709   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
2710     Register dst_reg = as_Register($dst$$reg);
2711     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
2712                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2713   %}
2714 
2715   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
2716     Register dst_reg = as_Register($dst$$reg);
2717     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
2718                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2719   %}
2720 
2721   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
2722     Register dst_reg = as_Register($dst$$reg);
2723     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2724                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2725   %}
2726 
2727   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
2728     Register dst_reg = as_Register($dst$$reg);
2729     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2730                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2731   %}
2732 
2733   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
2734     Register dst_reg = as_Register($dst$$reg);
2735     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
2736                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2737   %}
2738 
2739   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
2740     Register dst_reg = as_Register($dst$$reg);
2741     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
2742                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2743   %}
2744 
2745   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
2746     Register dst_reg = as_Register($dst$$reg);
2747     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2748                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2749   %}
2750 
2751   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
2752     Register dst_reg = as_Register($dst$$reg);
2753     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2754                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2755   %}
2756 
2757   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
2758     Register dst_reg = as_Register($dst$$reg);
2759     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2760                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2761   %}
2762 
2763   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
2764     Register dst_reg = as_Register($dst$$reg);
2765     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2766                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2767   %}
2768 
2769   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
2770     Register dst_reg = as_Register($dst$$reg);
2771     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
2772                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2773   %}
2774 
2775   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
2776     Register dst_reg = as_Register($dst$$reg);
2777     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
2778                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2779   %}
2780 
2781   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
2782     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2783     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
2784                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2785   %}
2786 
2787   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
2788     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2789     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
2790                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2791   %}
2792 
2793   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
2794     Register src_reg = as_Register($src$$reg);
2795     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
2796                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2797   %}
2798 
2799   enc_class aarch64_enc_strb0(memory mem) %{
2800     MacroAssembler _masm(&cbuf);
2801     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2802                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2803   %}
2804 
2805   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
2806     Register src_reg = as_Register($src$$reg);
2807     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
2808                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2809   %}
2810 
2811   enc_class aarch64_enc_strh0(memory mem) %{
2812     MacroAssembler _masm(&cbuf);
2813     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
2814                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2815   %}
2816 
2817   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
2818     Register src_reg = as_Register($src$$reg);
2819     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
2820                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2821   %}
2822 
2823   enc_class aarch64_enc_strw0(memory mem) %{
2824     MacroAssembler _masm(&cbuf);
2825     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
2826                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2827   %}
2828 
2829   enc_class aarch64_enc_str(iRegL src, memory mem) %{
2830     Register src_reg = as_Register($src$$reg);
2831     // we sometimes get asked to store the stack pointer into the
2832     // current thread -- we cannot do that directly on AArch64
2833     if (src_reg == r31_sp) {
2834       MacroAssembler _masm(&cbuf);
2835       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2836       __ mov(rscratch2, sp);
2837       src_reg = rscratch2;
2838     }
2839     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
2840                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2841   %}
2842 
2843   enc_class aarch64_enc_str0(memory mem) %{
2844     MacroAssembler _masm(&cbuf);
2845     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
2846                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2847   %}
2848 
2849   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
2850     FloatRegister src_reg = as_FloatRegister($src$$reg);
2851     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
2852                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2853   %}
2854 
2855   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
2856     FloatRegister src_reg = as_FloatRegister($src$$reg);
2857     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
2858                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2859   %}
2860 
2861   // END Non-volatile memory access
2862 
2863   // volatile loads and stores
2864 
2865   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
2866     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2867                  rscratch1, stlrb);
2868   %}
2869 
2870   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
2871     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2872                  rscratch1, stlrh);
2873   %}
2874 
2875   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
2876     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2877                  rscratch1, stlrw);
2878   %}
2879 
2880 
2881   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
2882     Register dst_reg = as_Register($dst$$reg);
2883     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2884              rscratch1, ldarb);
2885     __ sxtbw(dst_reg, dst_reg);
2886   %}
2887 
2888   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
2889     Register dst_reg = as_Register($dst$$reg);
2890     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2891              rscratch1, ldarb);
2892     __ sxtb(dst_reg, dst_reg);
2893   %}
2894 
2895   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
2896     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2897              rscratch1, ldarb);
2898   %}
2899 
2900   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
2901     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2902              rscratch1, ldarb);
2903   %}
2904 
2905   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
2906     Register dst_reg = as_Register($dst$$reg);
2907     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2908              rscratch1, ldarh);
2909     __ sxthw(dst_reg, dst_reg);
2910   %}
2911 
2912   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
2913     Register dst_reg = as_Register($dst$$reg);
2914     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2915              rscratch1, ldarh);
2916     __ sxth(dst_reg, dst_reg);
2917   %}
2918 
2919   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
2920     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2921              rscratch1, ldarh);
2922   %}
2923 
2924   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
2925     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2926              rscratch1, ldarh);
2927   %}
2928 
2929   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
2930     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2931              rscratch1, ldarw);
2932   %}
2933 
2934   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
2935     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2936              rscratch1, ldarw);
2937   %}
2938 
2939   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
2940     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2941              rscratch1, ldar);
2942   %}
2943 
2944   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
2945     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2946              rscratch1, ldarw);
2947     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
2948   %}
2949 
2950   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
2951     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2952              rscratch1, ldar);
2953     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
2954   %}
2955 
2956   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
2957     Register src_reg = as_Register($src$$reg);
2958     // we sometimes get asked to store the stack pointer into the
2959     // current thread -- we cannot do that directly on AArch64
2960     if (src_reg == r31_sp) {
2961         MacroAssembler _masm(&cbuf);
2962       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2963       __ mov(rscratch2, sp);
2964       src_reg = rscratch2;
2965     }
2966     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2967                  rscratch1, stlr);
2968   %}
2969 
2970   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
2971     {
2972       MacroAssembler _masm(&cbuf);
2973       FloatRegister src_reg = as_FloatRegister($src$$reg);
2974       __ fmovs(rscratch2, src_reg);
2975     }
2976     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2977                  rscratch1, stlrw);
2978   %}
2979 
2980   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
2981     {
2982       MacroAssembler _masm(&cbuf);
2983       FloatRegister src_reg = as_FloatRegister($src$$reg);
2984       __ fmovd(rscratch2, src_reg);
2985     }
2986     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2987                  rscratch1, stlr);
2988   %}
2989 
2990   // synchronized read/update encodings
2991 
2992   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
2993     MacroAssembler _masm(&cbuf);
2994     Register dst_reg = as_Register($dst$$reg);
2995     Register base = as_Register($mem$$base);
2996     int index = $mem$$index;
2997     int scale = $mem$$scale;
2998     int disp = $mem$$disp;
2999     if (index == -1) {
3000        if (disp != 0) {
3001         __ lea(rscratch1, Address(base, disp));
3002         __ ldaxr(dst_reg, rscratch1);
3003       } else {
3004         // TODO
3005         // should we ever get anything other than this case?
3006         __ ldaxr(dst_reg, base);
3007       }
3008     } else {
3009       Register index_reg = as_Register(index);
3010       if (disp == 0) {
3011         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
3012         __ ldaxr(dst_reg, rscratch1);
3013       } else {
3014         __ lea(rscratch1, Address(base, disp));
3015         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
3016         __ ldaxr(dst_reg, rscratch1);
3017       }
3018     }
3019   %}
3020 
3021   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
3022     MacroAssembler _masm(&cbuf);
3023     Register src_reg = as_Register($src$$reg);
3024     Register base = as_Register($mem$$base);
3025     int index = $mem$$index;
3026     int scale = $mem$$scale;
3027     int disp = $mem$$disp;
3028     if (index == -1) {
3029        if (disp != 0) {
3030         __ lea(rscratch2, Address(base, disp));
3031         __ stlxr(rscratch1, src_reg, rscratch2);
3032       } else {
3033         // TODO
3034         // should we ever get anything other than this case?
3035         __ stlxr(rscratch1, src_reg, base);
3036       }
3037     } else {
3038       Register index_reg = as_Register(index);
3039       if (disp == 0) {
3040         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
3041         __ stlxr(rscratch1, src_reg, rscratch2);
3042       } else {
3043         __ lea(rscratch2, Address(base, disp));
3044         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
3045         __ stlxr(rscratch1, src_reg, rscratch2);
3046       }
3047     }
3048     __ cmpw(rscratch1, zr);
3049   %}
3050 
3051   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
3052     MacroAssembler _masm(&cbuf);
3053     Register old_reg = as_Register($oldval$$reg);
3054     Register new_reg = as_Register($newval$$reg);
3055     Register base = as_Register($mem$$base);
3056     Register addr_reg;
3057     int index = $mem$$index;
3058     int scale = $mem$$scale;
3059     int disp = $mem$$disp;
3060     if (index == -1) {
3061        if (disp != 0) {
3062         __ lea(rscratch2, Address(base, disp));
3063         addr_reg = rscratch2;
3064       } else {
3065         // TODO
3066         // should we ever get anything other than this case?
3067         addr_reg = base;
3068       }
3069     } else {
3070       Register index_reg = as_Register(index);
3071       if (disp == 0) {
3072         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
3073         addr_reg = rscratch2;
3074       } else {
3075         __ lea(rscratch2, Address(base, disp));
3076         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
3077         addr_reg = rscratch2;
3078       }
3079     }
3080     Label retry_load, done;
3081     __ bind(retry_load);
3082     __ ldxr(rscratch1, addr_reg);
3083     __ cmp(rscratch1, old_reg);
3084     __ br(Assembler::NE, done);
3085     __ stlxr(rscratch1, new_reg, addr_reg);
3086     __ cbnzw(rscratch1, retry_load);
3087     __ bind(done);
3088   %}
3089 
3090   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3091     MacroAssembler _masm(&cbuf);
3092     Register old_reg = as_Register($oldval$$reg);
3093     Register new_reg = as_Register($newval$$reg);
3094     Register base = as_Register($mem$$base);
3095     Register addr_reg;
3096     int index = $mem$$index;
3097     int scale = $mem$$scale;
3098     int disp = $mem$$disp;
3099     if (index == -1) {
3100        if (disp != 0) {
3101         __ lea(rscratch2, Address(base, disp));
3102         addr_reg = rscratch2;
3103       } else {
3104         // TODO
3105         // should we ever get anything other than this case?
3106         addr_reg = base;
3107       }
3108     } else {
3109       Register index_reg = as_Register(index);
3110       if (disp == 0) {
3111         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
3112         addr_reg = rscratch2;
3113       } else {
3114         __ lea(rscratch2, Address(base, disp));
3115         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
3116         addr_reg = rscratch2;
3117       }
3118     }
3119     Label retry_load, done;
3120     __ bind(retry_load);
3121     __ ldxrw(rscratch1, addr_reg);
3122     __ cmpw(rscratch1, old_reg);
3123     __ br(Assembler::NE, done);
3124     __ stlxrw(rscratch1, new_reg, addr_reg);
3125     __ cbnzw(rscratch1, retry_load);
3126     __ bind(done);
3127   %}
3128 
3129   // auxiliary used for CompareAndSwapX to set result register
3130   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
3131     MacroAssembler _masm(&cbuf);
3132     Register res_reg = as_Register($res$$reg);
3133     __ cset(res_reg, Assembler::EQ);
3134   %}
3135 
3136   // prefetch encodings
3137 
3138   enc_class aarch64_enc_prefetchw(memory mem) %{
3139     MacroAssembler _masm(&cbuf);
3140     Register base = as_Register($mem$$base);
3141     int index = $mem$$index;
3142     int scale = $mem$$scale;
3143     int disp = $mem$$disp;
3144     if (index == -1) {
3145       __ prfm(Address(base, disp), PSTL1KEEP);
3146       __ nop();
3147     } else {
3148       Register index_reg = as_Register(index);
3149       if (disp == 0) {
3150         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
3151       } else {
3152         __ lea(rscratch1, Address(base, disp));
3153         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
3154       }
3155     }
3156   %}
3157 
3158   enc_class aarch64_enc_clear_array_reg_reg(iRegL_R11 cnt, iRegP_R10 base) %{
3159     MacroAssembler _masm(&cbuf);
3160     Register cnt_reg = as_Register($cnt$$reg);
3161     Register base_reg = as_Register($base$$reg);
3162     // base is word aligned
3163     // cnt is count of words
3164 
3165     Label loop;
3166     Label entry;
3167 
3168 //  Algorithm:
3169 //
3170 //    scratch1 = cnt & 7;
3171 //    cnt -= scratch1;
3172 //    p += scratch1;
3173 //    switch (scratch1) {
3174 //      do {
3175 //        cnt -= 8;
3176 //          p[-8] = 0;
3177 //        case 7:
3178 //          p[-7] = 0;
3179 //        case 6:
3180 //          p[-6] = 0;
3181 //          // ...
3182 //        case 1:
3183 //          p[-1] = 0;
3184 //        case 0:
3185 //          p += 8;
3186 //      } while (cnt);
3187 //    }
3188 
3189     const int unroll = 8; // Number of str(zr) instructions we'll unroll
3190 
3191     __ andr(rscratch1, cnt_reg, unroll - 1);  // tmp1 = cnt % unroll
3192     __ sub(cnt_reg, cnt_reg, rscratch1);      // cnt -= unroll
3193     // base_reg always points to the end of the region we're about to zero
3194     __ add(base_reg, base_reg, rscratch1, Assembler::LSL, exact_log2(wordSize));
3195     __ adr(rscratch2, entry);
3196     __ sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 2);
3197     __ br(rscratch2);
3198     __ bind(loop);
3199     __ sub(cnt_reg, cnt_reg, unroll);
3200     for (int i = -unroll; i < 0; i++)
3201       __ str(zr, Address(base_reg, i * wordSize));
3202     __ bind(entry);
3203     __ add(base_reg, base_reg, unroll * wordSize);
3204     __ cbnz(cnt_reg, loop);
3205   %}
3206 
3207   /// mov envcodings
3208 
3209   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
3210     MacroAssembler _masm(&cbuf);
3211     u_int32_t con = (u_int32_t)$src$$constant;
3212     Register dst_reg = as_Register($dst$$reg);
3213     if (con == 0) {
3214       __ movw(dst_reg, zr);
3215     } else {
3216       __ movw(dst_reg, con);
3217     }
3218   %}
3219 
3220   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
3221     MacroAssembler _masm(&cbuf);
3222     Register dst_reg = as_Register($dst$$reg);
3223     u_int64_t con = (u_int64_t)$src$$constant;
3224     if (con == 0) {
3225       __ mov(dst_reg, zr);
3226     } else {
3227       __ mov(dst_reg, con);
3228     }
3229   %}
3230 
3231   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
3232     MacroAssembler _masm(&cbuf);
3233     Register dst_reg = as_Register($dst$$reg);
3234     address con = (address)$src$$constant;
3235     if (con == NULL || con == (address)1) {
3236       ShouldNotReachHere();
3237     } else {
3238       relocInfo::relocType rtype = $src->constant_reloc();
3239       if (rtype == relocInfo::oop_type) {
3240         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
3241       } else if (rtype == relocInfo::metadata_type) {
3242         __ mov_metadata(dst_reg, (Metadata*)con);
3243       } else {
3244         assert(rtype == relocInfo::none, "unexpected reloc type");
3245         if (con < (address)(uintptr_t)os::vm_page_size()) {
3246           __ mov(dst_reg, con);
3247         } else {
3248           unsigned long offset;
3249           __ adrp(dst_reg, con, offset);
3250           __ add(dst_reg, dst_reg, offset);
3251         }
3252       }
3253     }
3254   %}
3255 
3256   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
3257     MacroAssembler _masm(&cbuf);
3258     Register dst_reg = as_Register($dst$$reg);
3259     __ mov(dst_reg, zr);
3260   %}
3261 
3262   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
3263     MacroAssembler _masm(&cbuf);
3264     Register dst_reg = as_Register($dst$$reg);
3265     __ mov(dst_reg, (u_int64_t)1);
3266   %}
3267 
3268   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
3269     MacroAssembler _masm(&cbuf);
3270     address page = (address)$src$$constant;
3271     Register dst_reg = as_Register($dst$$reg);
3272     unsigned long off;
3273     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
3274     assert(off == 0, "assumed offset == 0");
3275   %}
3276 
3277   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
3278     MacroAssembler _masm(&cbuf);
3279     address page = (address)$src$$constant;
3280     Register dst_reg = as_Register($dst$$reg);
3281     unsigned long off;
3282     __ adrp(dst_reg, ExternalAddress(page), off);
3283     assert(off == 0, "assumed offset == 0");
3284   %}
3285 
3286   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
3287     MacroAssembler _masm(&cbuf);
3288     Register dst_reg = as_Register($dst$$reg);
3289     address con = (address)$src$$constant;
3290     if (con == NULL) {
3291       ShouldNotReachHere();
3292     } else {
3293       relocInfo::relocType rtype = $src->constant_reloc();
3294       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
3295       __ set_narrow_oop(dst_reg, (jobject)con);
3296     }
3297   %}
3298 
3299   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
3300     MacroAssembler _masm(&cbuf);
3301     Register dst_reg = as_Register($dst$$reg);
3302     __ mov(dst_reg, zr);
3303   %}
3304 
3305   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
3306     MacroAssembler _masm(&cbuf);
3307     Register dst_reg = as_Register($dst$$reg);
3308     address con = (address)$src$$constant;
3309     if (con == NULL) {
3310       ShouldNotReachHere();
3311     } else {
3312       relocInfo::relocType rtype = $src->constant_reloc();
3313       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
3314       __ set_narrow_klass(dst_reg, (Klass *)con);
3315     }
3316   %}
3317 
3318   // arithmetic encodings
3319 
3320   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
3321     MacroAssembler _masm(&cbuf);
3322     Register dst_reg = as_Register($dst$$reg);
3323     Register src_reg = as_Register($src1$$reg);
3324     int32_t con = (int32_t)$src2$$constant;
3325     // add has primary == 0, subtract has primary == 1
3326     if ($primary) { con = -con; }
3327     if (con < 0) {
3328       __ subw(dst_reg, src_reg, -con);
3329     } else {
3330       __ addw(dst_reg, src_reg, con);
3331     }
3332   %}
3333 
3334   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
3335     MacroAssembler _masm(&cbuf);
3336     Register dst_reg = as_Register($dst$$reg);
3337     Register src_reg = as_Register($src1$$reg);
3338     int32_t con = (int32_t)$src2$$constant;
3339     // add has primary == 0, subtract has primary == 1
3340     if ($primary) { con = -con; }
3341     if (con < 0) {
3342       __ sub(dst_reg, src_reg, -con);
3343     } else {
3344       __ add(dst_reg, src_reg, con);
3345     }
3346   %}
3347 
3348   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
3349     MacroAssembler _masm(&cbuf);
3350    Register dst_reg = as_Register($dst$$reg);
3351    Register src1_reg = as_Register($src1$$reg);
3352    Register src2_reg = as_Register($src2$$reg);
3353     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
3354   %}
3355 
3356   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
3357     MacroAssembler _masm(&cbuf);
3358    Register dst_reg = as_Register($dst$$reg);
3359    Register src1_reg = as_Register($src1$$reg);
3360    Register src2_reg = as_Register($src2$$reg);
3361     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
3362   %}
3363 
3364   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
3365     MacroAssembler _masm(&cbuf);
3366    Register dst_reg = as_Register($dst$$reg);
3367    Register src1_reg = as_Register($src1$$reg);
3368    Register src2_reg = as_Register($src2$$reg);
3369     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
3370   %}
3371 
3372   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
3373     MacroAssembler _masm(&cbuf);
3374    Register dst_reg = as_Register($dst$$reg);
3375    Register src1_reg = as_Register($src1$$reg);
3376    Register src2_reg = as_Register($src2$$reg);
3377     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
3378   %}
3379 
3380   // compare instruction encodings
3381 
3382   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
3383     MacroAssembler _masm(&cbuf);
3384     Register reg1 = as_Register($src1$$reg);
3385     Register reg2 = as_Register($src2$$reg);
3386     __ cmpw(reg1, reg2);
3387   %}
3388 
3389   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
3390     MacroAssembler _masm(&cbuf);
3391     Register reg = as_Register($src1$$reg);
3392     int32_t val = $src2$$constant;
3393     if (val >= 0) {
3394       __ subsw(zr, reg, val);
3395     } else {
3396       __ addsw(zr, reg, -val);
3397     }
3398   %}
3399 
3400   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
3401     MacroAssembler _masm(&cbuf);
3402     Register reg1 = as_Register($src1$$reg);
3403     u_int32_t val = (u_int32_t)$src2$$constant;
3404     __ movw(rscratch1, val);
3405     __ cmpw(reg1, rscratch1);
3406   %}
3407 
3408   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
3409     MacroAssembler _masm(&cbuf);
3410     Register reg1 = as_Register($src1$$reg);
3411     Register reg2 = as_Register($src2$$reg);
3412     __ cmp(reg1, reg2);
3413   %}
3414 
3415   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
3416     MacroAssembler _masm(&cbuf);
3417     Register reg = as_Register($src1$$reg);
3418     int64_t val = $src2$$constant;
3419     if (val >= 0) {
3420       __ subs(zr, reg, val);
3421     } else if (val != -val) {
3422       __ adds(zr, reg, -val);
3423     } else {
3424     // aargh, Long.MIN_VALUE is a special case
3425       __ orr(rscratch1, zr, (u_int64_t)val);
3426       __ subs(zr, reg, rscratch1);
3427     }
3428   %}
3429 
3430   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
3431     MacroAssembler _masm(&cbuf);
3432     Register reg1 = as_Register($src1$$reg);
3433     u_int64_t val = (u_int64_t)$src2$$constant;
3434     __ mov(rscratch1, val);
3435     __ cmp(reg1, rscratch1);
3436   %}
3437 
3438   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
3439     MacroAssembler _masm(&cbuf);
3440     Register reg1 = as_Register($src1$$reg);
3441     Register reg2 = as_Register($src2$$reg);
3442     __ cmp(reg1, reg2);
3443   %}
3444 
3445   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
3446     MacroAssembler _masm(&cbuf);
3447     Register reg1 = as_Register($src1$$reg);
3448     Register reg2 = as_Register($src2$$reg);
3449     __ cmpw(reg1, reg2);
3450   %}
3451 
3452   enc_class aarch64_enc_testp(iRegP src) %{
3453     MacroAssembler _masm(&cbuf);
3454     Register reg = as_Register($src$$reg);
3455     __ cmp(reg, zr);
3456   %}
3457 
3458   enc_class aarch64_enc_testn(iRegN src) %{
3459     MacroAssembler _masm(&cbuf);
3460     Register reg = as_Register($src$$reg);
3461     __ cmpw(reg, zr);
3462   %}
3463 
3464   enc_class aarch64_enc_b(label lbl) %{
3465     MacroAssembler _masm(&cbuf);
3466     Label *L = $lbl$$label;
3467     __ b(*L);
3468   %}
3469 
3470   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
3471     MacroAssembler _masm(&cbuf);
3472     Label *L = $lbl$$label;
3473     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3474   %}
3475 
3476   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
3477     MacroAssembler _masm(&cbuf);
3478     Label *L = $lbl$$label;
3479     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3480   %}
3481 
3482   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
3483   %{
3484      Register sub_reg = as_Register($sub$$reg);
3485      Register super_reg = as_Register($super$$reg);
3486      Register temp_reg = as_Register($temp$$reg);
3487      Register result_reg = as_Register($result$$reg);
3488 
3489      Label miss;
3490      MacroAssembler _masm(&cbuf);
3491      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
3492                                      NULL, &miss,
3493                                      /*set_cond_codes:*/ true);
3494      if ($primary) {
3495        __ mov(result_reg, zr);
3496      }
3497      __ bind(miss);
3498   %}
3499 
3500   enc_class aarch64_enc_java_static_call(method meth) %{
3501     MacroAssembler _masm(&cbuf);
3502 
3503     address addr = (address)$meth$$method;
3504     if (!_method) {
3505       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
3506       __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
3507     } else if (_optimized_virtual) {
3508       __ trampoline_call(Address(addr, relocInfo::opt_virtual_call_type), &cbuf);
3509     } else {
3510       __ trampoline_call(Address(addr, relocInfo::static_call_type), &cbuf);
3511     }
3512 
3513     if (_method) {
3514       // Emit stub for static call
3515       CompiledStaticCall::emit_to_interp_stub(cbuf);
3516     }
3517   %}
3518 
3519   enc_class aarch64_enc_java_handle_call(method meth) %{
3520     MacroAssembler _masm(&cbuf);
3521     relocInfo::relocType reloc;
3522 
3523     // RFP is preserved across all calls, even compiled calls.
3524     // Use it to preserve SP.
3525     __ mov(rfp, sp);
3526 
3527     const int start_offset = __ offset();
3528     address addr = (address)$meth$$method;
3529     if (!_method) {
3530       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
3531       __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
3532     } else if (_optimized_virtual) {
3533       __ trampoline_call(Address(addr, relocInfo::opt_virtual_call_type), &cbuf);
3534     } else {
3535       __ trampoline_call(Address(addr, relocInfo::static_call_type), &cbuf);
3536     }
3537 
3538     if (_method) {
3539       // Emit stub for static call
3540       CompiledStaticCall::emit_to_interp_stub(cbuf);
3541     }
3542 
3543     // now restore sp
3544     __ mov(sp, rfp);
3545   %}
3546 
3547   enc_class aarch64_enc_java_dynamic_call(method meth) %{
3548     MacroAssembler _masm(&cbuf);
3549     __ ic_call((address)$meth$$method);
3550   %}
3551 
3552   enc_class aarch64_enc_call_epilog() %{
3553     MacroAssembler _masm(&cbuf);
3554     if (VerifyStackAtCalls) {
3555       // Check that stack depth is unchanged: find majik cookie on stack
3556       __ call_Unimplemented();
3557     }
3558   %}
3559 
3560   enc_class aarch64_enc_java_to_runtime(method meth) %{
3561     MacroAssembler _masm(&cbuf);
3562 
3563     // some calls to generated routines (arraycopy code) are scheduled
3564     // by C2 as runtime calls. if so we can call them using a br (they
3565     // will be in a reachable segment) otherwise we have to use a blrt
3566     // which loads the absolute address into a register.
3567     address entry = (address)$meth$$method;
3568     CodeBlob *cb = CodeCache::find_blob(entry);
3569     if (cb) {
3570       __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
3571     } else {
3572       int gpcnt;
3573       int fpcnt;
3574       int rtype;
3575       getCallInfo(tf(), gpcnt, fpcnt, rtype);
3576       Label retaddr;
3577       __ adr(rscratch2, retaddr);
3578       __ lea(rscratch1, RuntimeAddress(entry));
3579       // Leave a breadcrumb for JavaThread::pd_last_frame().
3580       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
3581       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
3582       __ bind(retaddr);
3583       __ add(sp, sp, 2 * wordSize);
3584     }
3585   %}
3586 
3587   enc_class aarch64_enc_rethrow() %{
3588     MacroAssembler _masm(&cbuf);
3589     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
3590   %}
3591 
3592   enc_class aarch64_enc_ret() %{
3593     MacroAssembler _masm(&cbuf);
3594     __ ret(lr);
3595   %}
3596 
3597   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
3598     MacroAssembler _masm(&cbuf);
3599     Register target_reg = as_Register($jump_target$$reg);
3600     __ br(target_reg);
3601   %}
3602 
3603   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
3604     MacroAssembler _masm(&cbuf);
3605     Register target_reg = as_Register($jump_target$$reg);
3606     // exception oop should be in r0
3607     // ret addr has been popped into lr
3608     // callee expects it in r3
3609     __ mov(r3, lr);
3610     __ br(target_reg);
3611   %}
3612 
3613   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3614     MacroAssembler _masm(&cbuf);
3615     Register oop = as_Register($object$$reg);
3616     Register box = as_Register($box$$reg);
3617     Register disp_hdr = as_Register($tmp$$reg);
3618     Register tmp = as_Register($tmp2$$reg);
3619     Label cont;
3620     Label object_has_monitor;
3621     Label cas_failed;
3622 
3623     assert_different_registers(oop, box, tmp, disp_hdr);
3624 
3625     // Load markOop from object into displaced_header.
3626     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
3627 
3628     // Always do locking in runtime.
3629     if (EmitSync & 0x01) {
3630       __ cmp(oop, zr);
3631       return;
3632     }
3633 
3634     if (UseBiasedLocking) {
3635       __ biased_locking_enter(disp_hdr, oop, box, tmp, true, cont);
3636     }
3637 
3638     // Handle existing monitor
3639     if (EmitSync & 0x02) {
3640       // we can use AArch64's bit test and branch here but
3641       // markoopDesc does not define a bit index just the bit value
3642       // so assert in case the bit pos changes
3643 #     define __monitor_value_log2 1
3644       assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
3645       __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
3646 #     undef __monitor_value_log2
3647     }
3648 
3649     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
3650     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
3651 
3652     // Load Compare Value application register.
3653 
3654     // Initialize the box. (Must happen before we update the object mark!)
3655     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3656 
3657     // Compare object markOop with mark and if equal exchange scratch1
3658     // with object markOop.
3659     // Note that this is simply a CAS: it does not generate any
3660     // barriers.  These are separately generated by
3661     // membar_acquire_lock().
3662     {
3663       Label retry_load;
3664       __ bind(retry_load);
3665       __ ldxr(tmp, oop);
3666       __ cmp(tmp, disp_hdr);
3667       __ br(Assembler::NE, cas_failed);
3668       // use stlxr to ensure update is immediately visible
3669       __ stlxr(tmp, box, oop);
3670       __ cbzw(tmp, cont);
3671       __ b(retry_load);
3672     }
3673 
3674     // Formerly:
3675     // __ cmpxchgptr(/*oldv=*/disp_hdr,
3676     //               /*newv=*/box,
3677     //               /*addr=*/oop,
3678     //               /*tmp=*/tmp,
3679     //               cont,
3680     //               /*fail*/NULL);
3681 
3682     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3683 
3684     // If the compare-and-exchange succeeded, then we found an unlocked
3685     // object, will have now locked it will continue at label cont
3686 
3687     __ bind(cas_failed);
3688     // We did not see an unlocked object so try the fast recursive case.
3689 
3690     // Check if the owner is self by comparing the value in the
3691     // markOop of object (disp_hdr) with the stack pointer.
3692     __ mov(rscratch1, sp);
3693     __ sub(disp_hdr, disp_hdr, rscratch1);
3694     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
3695     // If condition is true we are cont and hence we can store 0 as the
3696     // displaced header in the box, which indicates that it is a recursive lock.
3697     __ ands(tmp/*==0?*/, disp_hdr, tmp);
3698     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3699 
3700     // Handle existing monitor.
3701     if ((EmitSync & 0x02) == 0) {
3702       __ b(cont);
3703 
3704       __ bind(object_has_monitor);
3705       // The object's monitor m is unlocked iff m->owner == NULL,
3706       // otherwise m->owner may contain a thread or a stack address.
3707       //
3708       // Try to CAS m->owner from NULL to current thread.
3709       __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
3710       __ mov(disp_hdr, zr);
3711 
3712       {
3713         Label retry_load, fail;
3714         __ bind(retry_load);
3715         __ ldxr(rscratch1, tmp);
3716         __ cmp(disp_hdr, rscratch1);
3717         __ br(Assembler::NE, fail);
3718         // use stlxr to ensure update is immediately visible
3719         __ stlxr(rscratch1, rthread, tmp);
3720         __ cbnzw(rscratch1, retry_load);
3721         __ bind(fail);
3722       }
3723 
3724       // Label next;
3725       // __ cmpxchgptr(/*oldv=*/disp_hdr,
3726       //               /*newv=*/rthread,
3727       //               /*addr=*/tmp,
3728       //               /*tmp=*/rscratch1,
3729       //               /*succeed*/next,
3730       //               /*fail*/NULL);
3731       // __ bind(next);
3732 
3733       // store a non-null value into the box.
3734       __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3735 
3736       // PPC port checks the following invariants
3737       // #ifdef ASSERT
3738       // bne(flag, cont);
3739       // We have acquired the monitor, check some invariants.
3740       // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
3741       // Invariant 1: _recursions should be 0.
3742       // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
3743       // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
3744       //                        "monitor->_recursions should be 0", -1);
3745       // Invariant 2: OwnerIsThread shouldn't be 0.
3746       // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
3747       //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
3748       //                           "monitor->OwnerIsThread shouldn't be 0", -1);
3749       // #endif
3750     }
3751 
3752     __ bind(cont);
3753     // flag == EQ indicates success
3754     // flag == NE indicates failure
3755 
3756   %}
3757 
3758   // TODO
3759   // reimplement this with custom cmpxchgptr code
3760   // which avoids some of the unnecessary branching
3761   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3762     MacroAssembler _masm(&cbuf);
3763     Register oop = as_Register($object$$reg);
3764     Register box = as_Register($box$$reg);
3765     Register disp_hdr = as_Register($tmp$$reg);
3766     Register tmp = as_Register($tmp2$$reg);
3767     Label cont;
3768     Label object_has_monitor;
3769     Label cas_failed;
3770 
3771     assert_different_registers(oop, box, tmp, disp_hdr);
3772 
3773     // Always do locking in runtime.
3774     if (EmitSync & 0x01) {
3775       __ cmp(oop, zr); // Oop can't be 0 here => always false.
3776       return;
3777     }
3778 
3779     if (UseBiasedLocking) {
3780       __ biased_locking_exit(oop, tmp, cont);
3781     }
3782 
3783     // Find the lock address and load the displaced header from the stack.
3784     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3785 
3786     // If the displaced header is 0, we have a recursive unlock.
3787     __ cmp(disp_hdr, zr);
3788     __ br(Assembler::EQ, cont);
3789 
3790 
3791     // Handle existing monitor.
3792     if ((EmitSync & 0x02) == 0) {
3793       __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
3794       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
3795     }
3796 
3797     // Check if it is still a light weight lock, this is is true if we
3798     // see the stack address of the basicLock in the markOop of the
3799     // object.
3800 
3801       {
3802         Label retry_load;
3803         __ bind(retry_load);
3804         __ ldxr(tmp, oop);
3805         __ cmp(box, tmp);
3806         __ br(Assembler::NE, cas_failed);
3807         // use stlxr to ensure update is immediately visible
3808         __ stlxr(tmp, disp_hdr, oop);
3809         __ cbzw(tmp, cont);
3810         __ b(retry_load);
3811       }
3812 
3813     // __ cmpxchgptr(/*compare_value=*/box,
3814     //               /*exchange_value=*/disp_hdr,
3815     //               /*where=*/oop,
3816     //               /*result=*/tmp,
3817     //               cont,
3818     //               /*cas_failed*/NULL);
3819     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3820 
3821     __ bind(cas_failed);
3822 
3823     // Handle existing monitor.
3824     if ((EmitSync & 0x02) == 0) {
3825       __ b(cont);
3826 
3827       __ bind(object_has_monitor);
3828       __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
3829       __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3830       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
3831       __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
3832       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
3833       __ cmp(rscratch1, zr);
3834       __ br(Assembler::NE, cont);
3835 
3836       __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
3837       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
3838       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
3839       __ cmp(rscratch1, zr);
3840       __ cbnz(rscratch1, cont);
3841       // need a release store here
3842       __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3843       __ stlr(rscratch1, tmp); // rscratch1 is zero
3844     }
3845 
3846     __ bind(cont);
3847     // flag == EQ indicates success
3848     // flag == NE indicates failure
3849   %}
3850 
3851 %}
3852 
3853 //----------FRAME--------------------------------------------------------------
3854 // Definition of frame structure and management information.
3855 //
3856 //  S T A C K   L A Y O U T    Allocators stack-slot number
3857 //                             |   (to get allocators register number
3858 //  G  Owned by    |        |  v    add OptoReg::stack0())
3859 //  r   CALLER     |        |
3860 //  o     |        +--------+      pad to even-align allocators stack-slot
3861 //  w     V        |  pad0  |        numbers; owned by CALLER
3862 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
3863 //  h     ^        |   in   |  5
3864 //        |        |  args  |  4   Holes in incoming args owned by SELF
3865 //  |     |        |        |  3
3866 //  |     |        +--------+
3867 //  V     |        | old out|      Empty on Intel, window on Sparc
3868 //        |    old |preserve|      Must be even aligned.
3869 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
3870 //        |        |   in   |  3   area for Intel ret address
3871 //     Owned by    |preserve|      Empty on Sparc.
3872 //       SELF      +--------+
3873 //        |        |  pad2  |  2   pad to align old SP
3874 //        |        +--------+  1
3875 //        |        | locks  |  0
3876 //        |        +--------+----> OptoReg::stack0(), even aligned
3877 //        |        |  pad1  | 11   pad to align new SP
3878 //        |        +--------+
3879 //        |        |        | 10
3880 //        |        | spills |  9   spills
3881 //        V        |        |  8   (pad0 slot for callee)
3882 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
3883 //        ^        |  out   |  7
3884 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
3885 //     Owned by    +--------+
3886 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
3887 //        |    new |preserve|      Must be even-aligned.
3888 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
3889 //        |        |        |
3890 //
3891 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
3892 //         known from SELF's arguments and the Java calling convention.
3893 //         Region 6-7 is determined per call site.
3894 // Note 2: If the calling convention leaves holes in the incoming argument
3895 //         area, those holes are owned by SELF.  Holes in the outgoing area
3896 //         are owned by the CALLEE.  Holes should not be nessecary in the
3897 //         incoming area, as the Java calling convention is completely under
3898 //         the control of the AD file.  Doubles can be sorted and packed to
3899 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
3900 //         varargs C calling conventions.
3901 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
3902 //         even aligned with pad0 as needed.
3903 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
3904 //           (the latter is true on Intel but is it false on AArch64?)
3905 //         region 6-11 is even aligned; it may be padded out more so that
3906 //         the region from SP to FP meets the minimum stack alignment.
3907 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3908 //         alignment.  Region 11, pad1, may be dynamically extended so that
3909 //         SP meets the minimum alignment.
3910 
3911 frame %{
3912   // What direction does stack grow in (assumed to be same for C & Java)
3913   stack_direction(TOWARDS_LOW);
3914 
3915   // These three registers define part of the calling convention
3916   // between compiled code and the interpreter.
3917 
3918   // Inline Cache Register or methodOop for I2C.
3919   inline_cache_reg(R12);
3920 
3921   // Method Oop Register when calling interpreter.
3922   interpreter_method_oop_reg(R12);
3923 
3924   // Number of stack slots consumed by locking an object
3925   sync_stack_slots(2);
3926 
3927   // Compiled code's Frame Pointer
3928   frame_pointer(R31);
3929 
3930   // Interpreter stores its frame pointer in a register which is
3931   // stored to the stack by I2CAdaptors.
3932   // I2CAdaptors convert from interpreted java to compiled java.
3933   interpreter_frame_pointer(R29);
3934 
3935   // Stack alignment requirement
3936   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
3937 
3938   // Number of stack slots between incoming argument block and the start of
3939   // a new frame.  The PROLOG must add this many slots to the stack.  The
3940   // EPILOG must remove this many slots. aarch64 needs two slots for
3941   // return address and fp.
3942   // TODO think this is correct but check
3943   in_preserve_stack_slots(4);
3944 
3945   // Number of outgoing stack slots killed above the out_preserve_stack_slots
3946   // for calls to C.  Supports the var-args backing area for register parms.
3947   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
3948 
3949   // The after-PROLOG location of the return address.  Location of
3950   // return address specifies a type (REG or STACK) and a number
3951   // representing the register number (i.e. - use a register name) or
3952   // stack slot.
3953   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3954   // Otherwise, it is above the locks and verification slot and alignment word
3955   // TODO this may well be correct but need to check why that - 2 is there
3956   // ppc port uses 0 but we definitely need to allow for fixed_slots
3957   // which folds in the space used for monitors
3958   return_addr(STACK - 2 +
3959               round_to((Compile::current()->in_preserve_stack_slots() +
3960                         Compile::current()->fixed_slots()),
3961                        stack_alignment_in_slots()));
3962 
3963   // Body of function which returns an integer array locating
3964   // arguments either in registers or in stack slots.  Passed an array
3965   // of ideal registers called "sig" and a "length" count.  Stack-slot
3966   // offsets are based on outgoing arguments, i.e. a CALLER setting up
3967   // arguments for a CALLEE.  Incoming stack arguments are
3968   // automatically biased by the preserve_stack_slots field above.
3969 
3970   calling_convention
3971   %{
3972     // No difference between ingoing/outgoing just pass false
3973     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3974   %}
3975 
3976   c_calling_convention
3977   %{
3978     // This is obviously always outgoing
3979     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
3980   %}
3981 
3982   // Location of compiled Java return values.  Same as C for now.
3983   return_value
3984   %{
3985     // TODO do we allow ideal_reg == Op_RegN???
3986     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
3987            "only return normal values");
3988 
3989     static const int lo[Op_RegL + 1] = { // enum name
3990       0,                                 // Op_Node
3991       0,                                 // Op_Set
3992       R0_num,                            // Op_RegN
3993       R0_num,                            // Op_RegI
3994       R0_num,                            // Op_RegP
3995       V0_num,                            // Op_RegF
3996       V0_num,                            // Op_RegD
3997       R0_num                             // Op_RegL
3998     };
3999 
4000     static const int hi[Op_RegL + 1] = { // enum name
4001       0,                                 // Op_Node
4002       0,                                 // Op_Set
4003       OptoReg::Bad,                       // Op_RegN
4004       OptoReg::Bad,                      // Op_RegI
4005       R0_H_num,                          // Op_RegP
4006       OptoReg::Bad,                      // Op_RegF
4007       V0_H_num,                          // Op_RegD
4008       R0_H_num                           // Op_RegL
4009     };
4010 
4011     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
4012   %}
4013 %}
4014 
4015 //----------ATTRIBUTES---------------------------------------------------------
4016 //----------Operand Attributes-------------------------------------------------
4017 op_attrib op_cost(1);        // Required cost attribute
4018 
4019 //----------Instruction Attributes---------------------------------------------
4020 ins_attrib ins_cost(INSN_COST); // Required cost attribute
4021 ins_attrib ins_size(32);        // Required size attribute (in bits)
4022 ins_attrib ins_short_branch(0); // Required flag: is this instruction
4023                                 // a non-matching short branch variant
4024                                 // of some long branch?
4025 ins_attrib ins_alignment(4);    // Required alignment attribute (must
4026                                 // be a power of 2) specifies the
4027                                 // alignment that some part of the
4028                                 // instruction (not necessarily the
4029                                 // start) requires.  If > 1, a
4030                                 // compute_padding() function must be
4031                                 // provided for the instruction
4032 
4033 //----------OPERANDS-----------------------------------------------------------
4034 // Operand definitions must precede instruction definitions for correct parsing
4035 // in the ADLC because operands constitute user defined types which are used in
4036 // instruction definitions.
4037 
4038 //----------Simple Operands----------------------------------------------------
4039 
4040 // Integer operands 32 bit
4041 // 32 bit immediate
4042 operand immI()
4043 %{
4044   match(ConI);
4045 
4046   op_cost(0);
4047   format %{ %}
4048   interface(CONST_INTER);
4049 %}
4050 
4051 // 32 bit zero
4052 operand immI0()
4053 %{
4054   predicate(n->get_int() == 0);
4055   match(ConI);
4056 
4057   op_cost(0);
4058   format %{ %}
4059   interface(CONST_INTER);
4060 %}
4061 
4062 // 32 bit unit increment
4063 operand immI_1()
4064 %{
4065   predicate(n->get_int() == 1);
4066   match(ConI);
4067 
4068   op_cost(0);
4069   format %{ %}
4070   interface(CONST_INTER);
4071 %}
4072 
4073 // 32 bit unit decrement
4074 operand immI_M1()
4075 %{
4076   predicate(n->get_int() == -1);
4077   match(ConI);
4078 
4079   op_cost(0);
4080   format %{ %}
4081   interface(CONST_INTER);
4082 %}
4083 
4084 operand immI_le_4()
4085 %{
4086   predicate(n->get_int() <= 4);
4087   match(ConI);
4088 
4089   op_cost(0);
4090   format %{ %}
4091   interface(CONST_INTER);
4092 %}
4093 
4094 operand immI_31()
4095 %{
4096   predicate(n->get_int() == 31);
4097   match(ConI);
4098 
4099   op_cost(0);
4100   format %{ %}
4101   interface(CONST_INTER);
4102 %}
4103 
4104 operand immI_8()
4105 %{
4106   predicate(n->get_int() == 8);
4107   match(ConI);
4108 
4109   op_cost(0);
4110   format %{ %}
4111   interface(CONST_INTER);
4112 %}
4113 
4114 operand immI_16()
4115 %{
4116   predicate(n->get_int() == 16);
4117   match(ConI);
4118 
4119   op_cost(0);
4120   format %{ %}
4121   interface(CONST_INTER);
4122 %}
4123 
4124 operand immI_24()
4125 %{
4126   predicate(n->get_int() == 24);
4127   match(ConI);
4128 
4129   op_cost(0);
4130   format %{ %}
4131   interface(CONST_INTER);
4132 %}
4133 
4134 operand immI_32()
4135 %{
4136   predicate(n->get_int() == 32);
4137   match(ConI);
4138 
4139   op_cost(0);
4140   format %{ %}
4141   interface(CONST_INTER);
4142 %}
4143 
4144 operand immI_48()
4145 %{
4146   predicate(n->get_int() == 48);
4147   match(ConI);
4148 
4149   op_cost(0);
4150   format %{ %}
4151   interface(CONST_INTER);
4152 %}
4153 
4154 operand immI_56()
4155 %{
4156   predicate(n->get_int() == 56);
4157   match(ConI);
4158 
4159   op_cost(0);
4160   format %{ %}
4161   interface(CONST_INTER);
4162 %}
4163 
4164 operand immI_64()
4165 %{
4166   predicate(n->get_int() == 64);
4167   match(ConI);
4168 
4169   op_cost(0);
4170   format %{ %}
4171   interface(CONST_INTER);
4172 %}
4173 
4174 operand immI_255()
4175 %{
4176   predicate(n->get_int() == 255);
4177   match(ConI);
4178 
4179   op_cost(0);
4180   format %{ %}
4181   interface(CONST_INTER);
4182 %}
4183 
4184 operand immI_65535()
4185 %{
4186   predicate(n->get_int() == 65535);
4187   match(ConI);
4188 
4189   op_cost(0);
4190   format %{ %}
4191   interface(CONST_INTER);
4192 %}
4193 
4194 operand immL_63()
4195 %{
4196   predicate(n->get_int() == 63);
4197   match(ConI);
4198 
4199   op_cost(0);
4200   format %{ %}
4201   interface(CONST_INTER);
4202 %}
4203 
4204 operand immL_255()
4205 %{
4206   predicate(n->get_int() == 255);
4207   match(ConI);
4208 
4209   op_cost(0);
4210   format %{ %}
4211   interface(CONST_INTER);
4212 %}
4213 
4214 operand immL_65535()
4215 %{
4216   predicate(n->get_long() == 65535L);
4217   match(ConL);
4218 
4219   op_cost(0);
4220   format %{ %}
4221   interface(CONST_INTER);
4222 %}
4223 
4224 operand immL_4294967295()
4225 %{
4226   predicate(n->get_long() == 4294967295L);
4227   match(ConL);
4228 
4229   op_cost(0);
4230   format %{ %}
4231   interface(CONST_INTER);
4232 %}
4233 
4234 operand immL_bitmask()
4235 %{
4236   predicate(((n->get_long() & 0xc000000000000000l) == 0)
4237             && is_power_of_2(n->get_long() + 1));
4238   match(ConL);
4239 
4240   op_cost(0);
4241   format %{ %}
4242   interface(CONST_INTER);
4243 %}
4244 
4245 operand immI_bitmask()
4246 %{
4247   predicate(((n->get_int() & 0xc0000000) == 0)
4248             && is_power_of_2(n->get_int() + 1));
4249   match(ConI);
4250 
4251   op_cost(0);
4252   format %{ %}
4253   interface(CONST_INTER);
4254 %}
4255 
4256 // Scale values for scaled offset addressing modes (up to long but not quad)
4257 operand immIScale()
4258 %{
4259   predicate(0 <= n->get_int() && (n->get_int() <= 3));
4260   match(ConI);
4261 
4262   op_cost(0);
4263   format %{ %}
4264   interface(CONST_INTER);
4265 %}
4266 
4267 // 26 bit signed offset -- for pc-relative branches
4268 operand immI26()
4269 %{
4270   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
4271   match(ConI);
4272 
4273   op_cost(0);
4274   format %{ %}
4275   interface(CONST_INTER);
4276 %}
4277 
4278 // 19 bit signed offset -- for pc-relative loads
4279 operand immI19()
4280 %{
4281   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
4282   match(ConI);
4283 
4284   op_cost(0);
4285   format %{ %}
4286   interface(CONST_INTER);
4287 %}
4288 
4289 // 12 bit unsigned offset -- for base plus immediate loads
4290 operand immIU12()
4291 %{
4292   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
4293   match(ConI);
4294 
4295   op_cost(0);
4296   format %{ %}
4297   interface(CONST_INTER);
4298 %}
4299 
4300 operand immLU12()
4301 %{
4302   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
4303   match(ConL);
4304 
4305   op_cost(0);
4306   format %{ %}
4307   interface(CONST_INTER);
4308 %}
4309 
4310 // Offset for scaled or unscaled immediate loads and stores
4311 operand immIOffset()
4312 %{
4313   predicate(Address::offset_ok_for_immed(n->get_int()));
4314   match(ConI);
4315 
4316   op_cost(0);
4317   format %{ %}
4318   interface(CONST_INTER);
4319 %}
4320 
4321 operand immLoffset()
4322 %{
4323   predicate(Address::offset_ok_for_immed(n->get_long()));
4324   match(ConL);
4325 
4326   op_cost(0);
4327   format %{ %}
4328   interface(CONST_INTER);
4329 %}
4330 
4331 // 32 bit integer valid for add sub immediate
4332 operand immIAddSub()
4333 %{
4334   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
4335   match(ConI);
4336   op_cost(0);
4337   format %{ %}
4338   interface(CONST_INTER);
4339 %}
4340 
4341 // 32 bit unsigned integer valid for logical immediate
4342 // TODO -- check this is right when e.g the mask is 0x80000000
4343 operand immILog()
4344 %{
4345   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
4346   match(ConI);
4347 
4348   op_cost(0);
4349   format %{ %}
4350   interface(CONST_INTER);
4351 %}
4352 
4353 // Integer operands 64 bit
4354 // 64 bit immediate
4355 operand immL()
4356 %{
4357   match(ConL);
4358 
4359   op_cost(0);
4360   format %{ %}
4361   interface(CONST_INTER);
4362 %}
4363 
4364 // 64 bit zero
4365 operand immL0()
4366 %{
4367   predicate(n->get_long() == 0);
4368   match(ConL);
4369 
4370   op_cost(0);
4371   format %{ %}
4372   interface(CONST_INTER);
4373 %}
4374 
4375 // 64 bit unit increment
4376 operand immL_1()
4377 %{
4378   predicate(n->get_long() == 1);
4379   match(ConL);
4380 
4381   op_cost(0);
4382   format %{ %}
4383   interface(CONST_INTER);
4384 %}
4385 
4386 // 64 bit unit decrement
4387 operand immL_M1()
4388 %{
4389   predicate(n->get_long() == -1);
4390   match(ConL);
4391 
4392   op_cost(0);
4393   format %{ %}
4394   interface(CONST_INTER);
4395 %}
4396 
4397 // 32 bit offset of pc in thread anchor
4398 
4399 operand immL_pc_off()
4400 %{
4401   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
4402                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
4403   match(ConL);
4404 
4405   op_cost(0);
4406   format %{ %}
4407   interface(CONST_INTER);
4408 %}
4409 
4410 // 64 bit integer valid for add sub immediate
4411 operand immLAddSub()
4412 %{
4413   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
4414   match(ConL);
4415   op_cost(0);
4416   format %{ %}
4417   interface(CONST_INTER);
4418 %}
4419 
4420 // 64 bit integer valid for logical immediate
4421 operand immLLog()
4422 %{
4423   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
4424   match(ConL);
4425   op_cost(0);
4426   format %{ %}
4427   interface(CONST_INTER);
4428 %}
4429 
4430 // Long Immediate: low 32-bit mask
4431 operand immL_32bits()
4432 %{
4433   predicate(n->get_long() == 0xFFFFFFFFL);
4434   match(ConL);
4435   op_cost(0);
4436   format %{ %}
4437   interface(CONST_INTER);
4438 %}
4439 
4440 // Pointer operands
4441 // Pointer Immediate
4442 operand immP()
4443 %{
4444   match(ConP);
4445 
4446   op_cost(0);
4447   format %{ %}
4448   interface(CONST_INTER);
4449 %}
4450 
4451 // NULL Pointer Immediate
4452 operand immP0()
4453 %{
4454   predicate(n->get_ptr() == 0);
4455   match(ConP);
4456 
4457   op_cost(0);
4458   format %{ %}
4459   interface(CONST_INTER);
4460 %}
4461 
4462 // Pointer Immediate One
4463 // this is used in object initialization (initial object header)
4464 operand immP_1()
4465 %{
4466   predicate(n->get_ptr() == 1);
4467   match(ConP);
4468 
4469   op_cost(0);
4470   format %{ %}
4471   interface(CONST_INTER);
4472 %}
4473 
4474 // Polling Page Pointer Immediate
4475 operand immPollPage()
4476 %{
4477   predicate((address)n->get_ptr() == os::get_polling_page());
4478   match(ConP);
4479 
4480   op_cost(0);
4481   format %{ %}
4482   interface(CONST_INTER);
4483 %}
4484 
4485 // Card Table Byte Map Base
4486 operand immByteMapBase()
4487 %{
4488   // Get base of card map
4489   predicate((jbyte*)n->get_ptr() ==
4490         ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
4491   match(ConP);
4492 
4493   op_cost(0);
4494   format %{ %}
4495   interface(CONST_INTER);
4496 %}
4497 
4498 // Pointer Immediate Minus One
4499 // this is used when we want to write the current PC to the thread anchor
4500 operand immP_M1()
4501 %{
4502   predicate(n->get_ptr() == -1);
4503   match(ConP);
4504 
4505   op_cost(0);
4506   format %{ %}
4507   interface(CONST_INTER);
4508 %}
4509 
4510 // Pointer Immediate Minus Two
4511 // this is used when we want to write the current PC to the thread anchor
4512 operand immP_M2()
4513 %{
4514   predicate(n->get_ptr() == -2);
4515   match(ConP);
4516 
4517   op_cost(0);
4518   format %{ %}
4519   interface(CONST_INTER);
4520 %}
4521 
4522 // Float and Double operands
4523 // Double Immediate
4524 operand immD()
4525 %{
4526   match(ConD);
4527   op_cost(0);
4528   format %{ %}
4529   interface(CONST_INTER);
4530 %}
4531 
4532 // Double Immediate: +0.0d
4533 operand immD0()
4534 %{
4535   predicate(jlong_cast(n->getd()) == 0);
4536   match(ConD);
4537 
4538   op_cost(0);
4539   format %{ %}
4540   interface(CONST_INTER);
4541 %}
4542 
4543 // constant 'double +0.0'.
4544 operand immDPacked()
4545 %{
4546   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
4547   match(ConD);
4548   op_cost(0);
4549   format %{ %}
4550   interface(CONST_INTER);
4551 %}
4552 
4553 // Float Immediate
4554 operand immF()
4555 %{
4556   match(ConF);
4557   op_cost(0);
4558   format %{ %}
4559   interface(CONST_INTER);
4560 %}
4561 
4562 // Float Immediate: +0.0f.
4563 operand immF0()
4564 %{
4565   predicate(jint_cast(n->getf()) == 0);
4566   match(ConF);
4567 
4568   op_cost(0);
4569   format %{ %}
4570   interface(CONST_INTER);
4571 %}
4572 
4573 //
4574 operand immFPacked()
4575 %{
4576   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
4577   match(ConF);
4578   op_cost(0);
4579   format %{ %}
4580   interface(CONST_INTER);
4581 %}
4582 
4583 // Narrow pointer operands
4584 // Narrow Pointer Immediate
4585 operand immN()
4586 %{
4587   match(ConN);
4588 
4589   op_cost(0);
4590   format %{ %}
4591   interface(CONST_INTER);
4592 %}
4593 
4594 // Narrow NULL Pointer Immediate
4595 operand immN0()
4596 %{
4597   predicate(n->get_narrowcon() == 0);
4598   match(ConN);
4599 
4600   op_cost(0);
4601   format %{ %}
4602   interface(CONST_INTER);
4603 %}
4604 
4605 operand immNKlass()
4606 %{
4607   match(ConNKlass);
4608 
4609   op_cost(0);
4610   format %{ %}
4611   interface(CONST_INTER);
4612 %}
4613 
4614 // Integer 32 bit Register Operands
4615 // Integer 32 bitRegister (excludes SP)
4616 operand iRegI()
4617 %{
4618   constraint(ALLOC_IN_RC(any_reg32));
4619   match(RegI);
4620   match(iRegINoSp);
4621   op_cost(0);
4622   format %{ %}
4623   interface(REG_INTER);
4624 %}
4625 
4626 // Integer 32 bit Register not Special
4627 operand iRegINoSp()
4628 %{
4629   constraint(ALLOC_IN_RC(no_special_reg32));
4630   match(RegI);
4631   op_cost(0);
4632   format %{ %}
4633   interface(REG_INTER);
4634 %}
4635 
4636 // Integer 64 bit Register Operands
4637 // Integer 64 bit Register (includes SP)
4638 operand iRegL()
4639 %{
4640   constraint(ALLOC_IN_RC(any_reg));
4641   match(RegL);
4642   match(iRegLNoSp);
4643   op_cost(0);
4644   format %{ %}
4645   interface(REG_INTER);
4646 %}
4647 
4648 // Integer 64 bit Register not Special
4649 operand iRegLNoSp()
4650 %{
4651   constraint(ALLOC_IN_RC(no_special_reg));
4652   match(RegL);
4653   format %{ %}
4654   interface(REG_INTER);
4655 %}
4656 
4657 // Pointer Register Operands
4658 // Pointer Register
4659 operand iRegP()
4660 %{
4661   constraint(ALLOC_IN_RC(ptr_reg));
4662   match(RegP);
4663   match(iRegPNoSp);
4664   match(iRegP_R0);
4665   //match(iRegP_R2);
4666   //match(iRegP_R4);
4667   //match(iRegP_R5);
4668   match(thread_RegP);
4669   op_cost(0);
4670   format %{ %}
4671   interface(REG_INTER);
4672 %}
4673 
4674 // Pointer 64 bit Register not Special
4675 operand iRegPNoSp()
4676 %{
4677   constraint(ALLOC_IN_RC(no_special_ptr_reg));
4678   match(RegP);
4679   // match(iRegP);
4680   // match(iRegP_R0);
4681   // match(iRegP_R2);
4682   // match(iRegP_R4);
4683   // match(iRegP_R5);
4684   // match(thread_RegP);
4685   op_cost(0);
4686   format %{ %}
4687   interface(REG_INTER);
4688 %}
4689 
4690 // Pointer 64 bit Register R0 only
4691 operand iRegP_R0()
4692 %{
4693   constraint(ALLOC_IN_RC(r0_reg));
4694   match(RegP);
4695   // match(iRegP);
4696   match(iRegPNoSp);
4697   op_cost(0);
4698   format %{ %}
4699   interface(REG_INTER);
4700 %}
4701 
4702 // Pointer 64 bit Register R1 only
4703 operand iRegP_R1()
4704 %{
4705   constraint(ALLOC_IN_RC(r1_reg));
4706   match(RegP);
4707   // match(iRegP);
4708   match(iRegPNoSp);
4709   op_cost(0);
4710   format %{ %}
4711   interface(REG_INTER);
4712 %}
4713 
4714 // Pointer 64 bit Register R2 only
4715 operand iRegP_R2()
4716 %{
4717   constraint(ALLOC_IN_RC(r2_reg));
4718   match(RegP);
4719   // match(iRegP);
4720   match(iRegPNoSp);
4721   op_cost(0);
4722   format %{ %}
4723   interface(REG_INTER);
4724 %}
4725 
4726 // Pointer 64 bit Register R3 only
4727 operand iRegP_R3()
4728 %{
4729   constraint(ALLOC_IN_RC(r3_reg));
4730   match(RegP);
4731   // match(iRegP);
4732   match(iRegPNoSp);
4733   op_cost(0);
4734   format %{ %}
4735   interface(REG_INTER);
4736 %}
4737 
4738 // Pointer 64 bit Register R4 only
4739 operand iRegP_R4()
4740 %{
4741   constraint(ALLOC_IN_RC(r4_reg));
4742   match(RegP);
4743   // match(iRegP);
4744   match(iRegPNoSp);
4745   op_cost(0);
4746   format %{ %}
4747   interface(REG_INTER);
4748 %}
4749 
4750 // Pointer 64 bit Register R5 only
4751 operand iRegP_R5()
4752 %{
4753   constraint(ALLOC_IN_RC(r5_reg));
4754   match(RegP);
4755   // match(iRegP);
4756   match(iRegPNoSp);
4757   op_cost(0);
4758   format %{ %}
4759   interface(REG_INTER);
4760 %}
4761 
4762 // Pointer 64 bit Register R10 only
4763 operand iRegP_R10()
4764 %{
4765   constraint(ALLOC_IN_RC(r10_reg));
4766   match(RegP);
4767   // match(iRegP);
4768   match(iRegPNoSp);
4769   op_cost(0);
4770   format %{ %}
4771   interface(REG_INTER);
4772 %}
4773 
4774 // Long 64 bit Register R11 only
4775 operand iRegL_R11()
4776 %{
4777   constraint(ALLOC_IN_RC(r11_reg));
4778   match(RegL);
4779   match(iRegLNoSp);
4780   op_cost(0);
4781   format %{ %}
4782   interface(REG_INTER);
4783 %}
4784 
4785 // Pointer 64 bit Register FP only
4786 operand iRegP_FP()
4787 %{
4788   constraint(ALLOC_IN_RC(fp_reg));
4789   match(RegP);
4790   // match(iRegP);
4791   op_cost(0);
4792   format %{ %}
4793   interface(REG_INTER);
4794 %}
4795 
4796 // Register R0 only
4797 operand iRegI_R0()
4798 %{
4799   constraint(ALLOC_IN_RC(int_r0_reg));
4800   match(RegI);
4801   match(iRegINoSp);
4802   op_cost(0);
4803   format %{ %}
4804   interface(REG_INTER);
4805 %}
4806 
4807 // Register R2 only
4808 operand iRegI_R2()
4809 %{
4810   constraint(ALLOC_IN_RC(int_r2_reg));
4811   match(RegI);
4812   match(iRegINoSp);
4813   op_cost(0);
4814   format %{ %}
4815   interface(REG_INTER);
4816 %}
4817 
4818 // Register R3 only
4819 operand iRegI_R3()
4820 %{
4821   constraint(ALLOC_IN_RC(int_r3_reg));
4822   match(RegI);
4823   match(iRegINoSp);
4824   op_cost(0);
4825   format %{ %}
4826   interface(REG_INTER);
4827 %}
4828 
4829 
4830 // Register R2 only
4831 operand iRegI_R4()
4832 %{
4833   constraint(ALLOC_IN_RC(int_r4_reg));
4834   match(RegI);
4835   match(iRegINoSp);
4836   op_cost(0);
4837   format %{ %}
4838   interface(REG_INTER);
4839 %}
4840 
4841 
4842 // Pointer Register Operands
4843 // Narrow Pointer Register
4844 operand iRegN()
4845 %{
4846   constraint(ALLOC_IN_RC(any_reg32));
4847   match(RegN);
4848   match(iRegNNoSp);
4849   op_cost(0);
4850   format %{ %}
4851   interface(REG_INTER);
4852 %}
4853 
4854 // Integer 64 bit Register not Special
4855 operand iRegNNoSp()
4856 %{
4857   constraint(ALLOC_IN_RC(no_special_reg32));
4858   match(RegN);
4859   op_cost(0);
4860   format %{ %}
4861   interface(REG_INTER);
4862 %}
4863 
4864 // heap base register -- used for encoding immN0
4865 
4866 operand iRegIHeapbase()
4867 %{
4868   constraint(ALLOC_IN_RC(heapbase_reg));
4869   match(RegI);
4870   op_cost(0);
4871   format %{ %}
4872   interface(REG_INTER);
4873 %}
4874 
4875 // Float Register
4876 // Float register operands
4877 operand vRegF()
4878 %{
4879   constraint(ALLOC_IN_RC(float_reg));
4880   match(RegF);
4881 
4882   op_cost(0);
4883   format %{ %}
4884   interface(REG_INTER);
4885 %}
4886 
4887 // Double Register
4888 // Double register operands
4889 operand vRegD()
4890 %{
4891   constraint(ALLOC_IN_RC(double_reg));
4892   match(RegD);
4893 
4894   op_cost(0);
4895   format %{ %}
4896   interface(REG_INTER);
4897 %}
4898 
4899 operand vRegD_V0()
4900 %{
4901   constraint(ALLOC_IN_RC(v0_reg));
4902   match(RegD);
4903   op_cost(0);
4904   format %{ %}
4905   interface(REG_INTER);
4906 %}
4907 
4908 operand vRegD_V1()
4909 %{
4910   constraint(ALLOC_IN_RC(v1_reg));
4911   match(RegD);
4912   op_cost(0);
4913   format %{ %}
4914   interface(REG_INTER);
4915 %}
4916 
4917 operand vRegD_V2()
4918 %{
4919   constraint(ALLOC_IN_RC(v2_reg));
4920   match(RegD);
4921   op_cost(0);
4922   format %{ %}
4923   interface(REG_INTER);
4924 %}
4925 
4926 operand vRegD_V3()
4927 %{
4928   constraint(ALLOC_IN_RC(v3_reg));
4929   match(RegD);
4930   op_cost(0);
4931   format %{ %}
4932   interface(REG_INTER);
4933 %}
4934 
4935 // Flags register, used as output of signed compare instructions
4936 
4937 // note that on AArch64 we also use this register as the output for
4938 // for floating point compare instructions (CmpF CmpD). this ensures
4939 // that ordered inequality tests use GT, GE, LT or LE none of which
4940 // pass through cases where the result is unordered i.e. one or both
4941 // inputs to the compare is a NaN. this means that the ideal code can
4942 // replace e.g. a GT with an LE and not end up capturing the NaN case
4943 // (where the comparison should always fail). EQ and NE tests are
4944 // always generated in ideal code so that unordered folds into the NE
4945 // case, matching the behaviour of AArch64 NE.
4946 //
4947 // This differs from x86 where the outputs of FP compares use a
4948 // special FP flags registers and where compares based on this
4949 // register are distinguished into ordered inequalities (cmpOpUCF) and
4950 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
4951 // to explicitly handle the unordered case in branches. x86 also has
4952 // to include extra CMoveX rules to accept a cmpOpUCF input.
4953 
4954 operand rFlagsReg()
4955 %{
4956   constraint(ALLOC_IN_RC(int_flags));
4957   match(RegFlags);
4958 
4959   op_cost(0);
4960   format %{ "RFLAGS" %}
4961   interface(REG_INTER);
4962 %}
4963 
4964 // Flags register, used as output of unsigned compare instructions
4965 operand rFlagsRegU()
4966 %{
4967   constraint(ALLOC_IN_RC(int_flags));
4968   match(RegFlags);
4969 
4970   op_cost(0);
4971   format %{ "RFLAGSU" %}
4972   interface(REG_INTER);
4973 %}
4974 
4975 // Special Registers
4976 
4977 // Method Register
4978 operand inline_cache_RegP(iRegP reg)
4979 %{
4980   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
4981   match(reg);
4982   match(iRegPNoSp);
4983   op_cost(0);
4984   format %{ %}
4985   interface(REG_INTER);
4986 %}
4987 
4988 operand interpreter_method_oop_RegP(iRegP reg)
4989 %{
4990   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
4991   match(reg);
4992   match(iRegPNoSp);
4993   op_cost(0);
4994   format %{ %}
4995   interface(REG_INTER);
4996 %}
4997 
4998 // Thread Register
4999 operand thread_RegP(iRegP reg)
5000 %{
5001   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
5002   match(reg);
5003   op_cost(0);
5004   format %{ %}
5005   interface(REG_INTER);
5006 %}
5007 
5008 operand lr_RegP(iRegP reg)
5009 %{
5010   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
5011   match(reg);
5012   op_cost(0);
5013   format %{ %}
5014   interface(REG_INTER);
5015 %}
5016 
5017 //----------Memory Operands----------------------------------------------------
5018 
5019 operand indirect(iRegP reg)
5020 %{
5021   constraint(ALLOC_IN_RC(ptr_reg));
5022   match(reg);
5023   op_cost(0);
5024   format %{ "[$reg]" %}
5025   interface(MEMORY_INTER) %{
5026     base($reg);
5027     index(0xffffffff);
5028     scale(0x0);
5029     disp(0x0);
5030   %}
5031 %}
5032 
5033 operand indIndexScaledOffsetI(iRegP reg, iRegL lreg, immIScale scale, immIU12 off)
5034 %{
5035   constraint(ALLOC_IN_RC(ptr_reg));
5036   match(AddP (AddP reg (LShiftL lreg scale)) off);
5037   op_cost(INSN_COST);
5038   format %{ "$reg, $lreg lsl($scale), $off" %}
5039   interface(MEMORY_INTER) %{
5040     base($reg);
5041     index($lreg);
5042     scale($scale);
5043     disp($off);
5044   %}
5045 %}
5046 
5047 operand indIndexScaledOffsetL(iRegP reg, iRegL lreg, immIScale scale, immLU12 off)
5048 %{
5049   constraint(ALLOC_IN_RC(ptr_reg));
5050   match(AddP (AddP reg (LShiftL lreg scale)) off);
5051   op_cost(INSN_COST);
5052   format %{ "$reg, $lreg lsl($scale), $off" %}
5053   interface(MEMORY_INTER) %{
5054     base($reg);
5055     index($lreg);
5056     scale($scale);
5057     disp($off);
5058   %}
5059 %}
5060 
5061 operand indIndexScaledOffsetI2L(iRegP reg, iRegI ireg, immIScale scale, immLU12 off)
5062 %{
5063   constraint(ALLOC_IN_RC(ptr_reg));
5064   match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5065   op_cost(INSN_COST);
5066   format %{ "$reg, $ireg sxtw($scale), $off I2L" %}
5067   interface(MEMORY_INTER) %{
5068     base($reg);
5069     index($ireg);
5070     scale($scale);
5071     disp($off);
5072   %}
5073 %}
5074 
5075 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
5076 %{
5077   constraint(ALLOC_IN_RC(ptr_reg));
5078   match(AddP reg (LShiftL (ConvI2L ireg) scale));
5079   op_cost(0);
5080   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
5081   interface(MEMORY_INTER) %{
5082     base($reg);
5083     index($ireg);
5084     scale($scale);
5085     disp(0x0);
5086   %}
5087 %}
5088 
5089 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
5090 %{
5091   constraint(ALLOC_IN_RC(ptr_reg));
5092   match(AddP reg (LShiftL lreg scale));
5093   op_cost(0);
5094   format %{ "$reg, $lreg lsl($scale)" %}
5095   interface(MEMORY_INTER) %{
5096     base($reg);
5097     index($lreg);
5098     scale($scale);
5099     disp(0x0);
5100   %}
5101 %}
5102 
5103 operand indIndex(iRegP reg, iRegL lreg)
5104 %{
5105   constraint(ALLOC_IN_RC(ptr_reg));
5106   match(AddP reg lreg);
5107   op_cost(0);
5108   format %{ "$reg, $lreg" %}
5109   interface(MEMORY_INTER) %{
5110     base($reg);
5111     index($lreg);
5112     scale(0x0);
5113     disp(0x0);
5114   %}
5115 %}
5116 
5117 operand indOffI(iRegP reg, immIOffset off)
5118 %{
5119   constraint(ALLOC_IN_RC(ptr_reg));
5120   match(AddP reg off);
5121   op_cost(INSN_COST);
5122   format %{ "[$reg, $off]" %}
5123   interface(MEMORY_INTER) %{
5124     base($reg);
5125     index(0xffffffff);
5126     scale(0x0);
5127     disp($off);
5128   %}
5129 %}
5130 
5131 operand indOffL(iRegP reg, immLoffset off)
5132 %{
5133   constraint(ALLOC_IN_RC(ptr_reg));
5134   match(AddP reg off);
5135   op_cost(0);
5136   format %{ "[$reg, $off]" %}
5137   interface(MEMORY_INTER) %{
5138     base($reg);
5139     index(0xffffffff);
5140     scale(0x0);
5141     disp($off);
5142   %}
5143 %}
5144 
5145 
5146 operand indirectN(iRegN reg)
5147 %{
5148   predicate(Universe::narrow_oop_shift() == 0);
5149   constraint(ALLOC_IN_RC(ptr_reg));
5150   match(DecodeN reg);
5151   op_cost(0);
5152   format %{ "[$reg]\t# narrow" %}
5153   interface(MEMORY_INTER) %{
5154     base($reg);
5155     index(0xffffffff);
5156     scale(0x0);
5157     disp(0x0);
5158   %}
5159 %}
5160 
5161 operand indIndexScaledOffsetIN(iRegN reg, iRegL lreg, immIScale scale, immIU12 off)
5162 %{
5163   predicate(Universe::narrow_oop_shift() == 0);
5164   constraint(ALLOC_IN_RC(ptr_reg));
5165   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5166   op_cost(0);
5167   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
5168   interface(MEMORY_INTER) %{
5169     base($reg);
5170     index($lreg);
5171     scale($scale);
5172     disp($off);
5173   %}
5174 %}
5175 
5176 operand indIndexScaledOffsetLN(iRegN reg, iRegL lreg, immIScale scale, immLU12 off)
5177 %{
5178   predicate(Universe::narrow_oop_shift() == 0);
5179   constraint(ALLOC_IN_RC(ptr_reg));
5180   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5181   op_cost(INSN_COST);
5182   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
5183   interface(MEMORY_INTER) %{
5184     base($reg);
5185     index($lreg);
5186     scale($scale);
5187     disp($off);
5188   %}
5189 %}
5190 
5191 operand indIndexScaledOffsetI2LN(iRegN reg, iRegI ireg, immIScale scale, immLU12 off)
5192 %{
5193   predicate(Universe::narrow_oop_shift() == 0);
5194   constraint(ALLOC_IN_RC(ptr_reg));
5195   match(AddP (AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale)) off);
5196   op_cost(INSN_COST);
5197   format %{ "$reg, $ireg sxtw($scale), $off I2L\t# narrow" %}
5198   interface(MEMORY_INTER) %{
5199     base($reg);
5200     index($ireg);
5201     scale($scale);
5202     disp($off);
5203   %}
5204 %}
5205 
5206 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
5207 %{
5208   predicate(Universe::narrow_oop_shift() == 0);
5209   constraint(ALLOC_IN_RC(ptr_reg));
5210   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
5211   op_cost(0);
5212   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
5213   interface(MEMORY_INTER) %{
5214     base($reg);
5215     index($ireg);
5216     scale($scale);
5217     disp(0x0);
5218   %}
5219 %}
5220 
5221 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
5222 %{
5223   predicate(Universe::narrow_oop_shift() == 0);
5224   constraint(ALLOC_IN_RC(ptr_reg));
5225   match(AddP (DecodeN reg) (LShiftL lreg scale));
5226   op_cost(0);
5227   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
5228   interface(MEMORY_INTER) %{
5229     base($reg);
5230     index($lreg);
5231     scale($scale);
5232     disp(0x0);
5233   %}
5234 %}
5235 
5236 operand indIndexN(iRegN reg, iRegL lreg)
5237 %{
5238   predicate(Universe::narrow_oop_shift() == 0);
5239   constraint(ALLOC_IN_RC(ptr_reg));
5240   match(AddP (DecodeN reg) lreg);
5241   op_cost(0);
5242   format %{ "$reg, $lreg\t# narrow" %}
5243   interface(MEMORY_INTER) %{
5244     base($reg);
5245     index($lreg);
5246     scale(0x0);
5247     disp(0x0);
5248   %}
5249 %}
5250 
5251 operand indOffIN(iRegN reg, immIOffset off)
5252 %{
5253   predicate(Universe::narrow_oop_shift() == 0);
5254   constraint(ALLOC_IN_RC(ptr_reg));
5255   match(AddP (DecodeN reg) off);
5256   op_cost(0);
5257   format %{ "[$reg, $off]\t# narrow" %}
5258   interface(MEMORY_INTER) %{
5259     base($reg);
5260     index(0xffffffff);
5261     scale(0x0);
5262     disp($off);
5263   %}
5264 %}
5265 
5266 operand indOffLN(iRegN reg, immLoffset off)
5267 %{
5268   predicate(Universe::narrow_oop_shift() == 0);
5269   constraint(ALLOC_IN_RC(ptr_reg));
5270   match(AddP (DecodeN reg) off);
5271   op_cost(0);
5272   format %{ "[$reg, $off]\t# narrow" %}
5273   interface(MEMORY_INTER) %{
5274     base($reg);
5275     index(0xffffffff);
5276     scale(0x0);
5277     disp($off);
5278   %}
5279 %}
5280 
5281 
5282 
5283 // AArch64 opto stubs need to write to the pc slot in the thread anchor
5284 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
5285 %{
5286   constraint(ALLOC_IN_RC(ptr_reg));
5287   match(AddP reg off);
5288   op_cost(0);
5289   format %{ "[$reg, $off]" %}
5290   interface(MEMORY_INTER) %{
5291     base($reg);
5292     index(0xffffffff);
5293     scale(0x0);
5294     disp($off);
5295   %}
5296 %}
5297 
5298 //----------Special Memory Operands--------------------------------------------
5299 // Stack Slot Operand - This operand is used for loading and storing temporary
5300 //                      values on the stack where a match requires a value to
5301 //                      flow through memory.
5302 operand stackSlotP(sRegP reg)
5303 %{
5304   constraint(ALLOC_IN_RC(stack_slots));
5305   op_cost(100);
5306   // No match rule because this operand is only generated in matching
5307   // match(RegP);
5308   format %{ "[$reg]" %}
5309   interface(MEMORY_INTER) %{
5310     base(0x1e);  // RSP
5311     index(0x0);  // No Index
5312     scale(0x0);  // No Scale
5313     disp($reg);  // Stack Offset
5314   %}
5315 %}
5316 
5317 operand stackSlotI(sRegI reg)
5318 %{
5319   constraint(ALLOC_IN_RC(stack_slots));
5320   // No match rule because this operand is only generated in matching
5321   // match(RegI);
5322   format %{ "[$reg]" %}
5323   interface(MEMORY_INTER) %{
5324     base(0x1e);  // RSP
5325     index(0x0);  // No Index
5326     scale(0x0);  // No Scale
5327     disp($reg);  // Stack Offset
5328   %}
5329 %}
5330 
5331 operand stackSlotF(sRegF reg)
5332 %{
5333   constraint(ALLOC_IN_RC(stack_slots));
5334   // No match rule because this operand is only generated in matching
5335   // match(RegF);
5336   format %{ "[$reg]" %}
5337   interface(MEMORY_INTER) %{
5338     base(0x1e);  // RSP
5339     index(0x0);  // No Index
5340     scale(0x0);  // No Scale
5341     disp($reg);  // Stack Offset
5342   %}
5343 %}
5344 
5345 operand stackSlotD(sRegD reg)
5346 %{
5347   constraint(ALLOC_IN_RC(stack_slots));
5348   // No match rule because this operand is only generated in matching
5349   // match(RegD);
5350   format %{ "[$reg]" %}
5351   interface(MEMORY_INTER) %{
5352     base(0x1e);  // RSP
5353     index(0x0);  // No Index
5354     scale(0x0);  // No Scale
5355     disp($reg);  // Stack Offset
5356   %}
5357 %}
5358 
5359 operand stackSlotL(sRegL reg)
5360 %{
5361   constraint(ALLOC_IN_RC(stack_slots));
5362   // No match rule because this operand is only generated in matching
5363   // match(RegL);
5364   format %{ "[$reg]" %}
5365   interface(MEMORY_INTER) %{
5366     base(0x1e);  // RSP
5367     index(0x0);  // No Index
5368     scale(0x0);  // No Scale
5369     disp($reg);  // Stack Offset
5370   %}
5371 %}
5372 
5373 // Operands for expressing Control Flow
5374 // NOTE: Label is a predefined operand which should not be redefined in
5375 //       the AD file. It is generically handled within the ADLC.
5376 
5377 //----------Conditional Branch Operands----------------------------------------
5378 // Comparison Op  - This is the operation of the comparison, and is limited to
5379 //                  the following set of codes:
5380 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5381 //
5382 // Other attributes of the comparison, such as unsignedness, are specified
5383 // by the comparison instruction that sets a condition code flags register.
5384 // That result is represented by a flags operand whose subtype is appropriate
5385 // to the unsignedness (etc.) of the comparison.
5386 //
5387 // Later, the instruction which matches both the Comparison Op (a Bool) and
5388 // the flags (produced by the Cmp) specifies the coding of the comparison op
5389 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5390 
5391 // used for signed integral comparisons and fp comparisons
5392 
5393 operand cmpOp()
5394 %{
5395   match(Bool);
5396 
5397   format %{ "" %}
5398   interface(COND_INTER) %{
5399     equal(0x0, "eq");
5400     not_equal(0x1, "ne");
5401     less(0xb, "lt");
5402     greater_equal(0xa, "ge");
5403     less_equal(0xd, "le");
5404     greater(0xc, "gt");
5405     overflow(0x6, "vs");
5406     no_overflow(0x7, "vc");
5407   %}
5408 %}
5409 
5410 // used for unsigned integral comparisons
5411 
5412 operand cmpOpU()
5413 %{
5414   match(Bool);
5415 
5416   format %{ "" %}
5417   interface(COND_INTER) %{
5418     equal(0x0, "eq");
5419     not_equal(0x1, "ne");
5420     less(0x3, "lo");
5421     greater_equal(0x2, "hs");
5422     less_equal(0x9, "ls");
5423     greater(0x8, "hi");
5424     overflow(0x6, "vs");
5425     no_overflow(0x7, "vc");
5426   %}
5427 %}
5428 
5429 // Special operand allowing long args to int ops to be truncated for free
5430 
5431 operand iRegL2I(iRegL reg) %{
5432 
5433   op_cost(0);
5434 
5435   match(ConvL2I reg);
5436 
5437   format %{ "l2i($reg)" %}
5438 
5439   interface(REG_INTER)
5440 %}
5441 
5442 
5443 //----------OPERAND CLASSES----------------------------------------------------
5444 // Operand Classes are groups of operands that are used as to simplify
5445 // instruction definitions by not requiring the AD writer to specify
5446 // separate instructions for every form of operand when the
5447 // instruction accepts multiple operand types with the same basic
5448 // encoding and format. The classic case of this is memory operands.
5449 
5450 // memory is used to define read/write location for load/store
5451 // instruction defs. we can turn a memory op into an Address
5452 
5453 opclass memory(indirect, indIndexScaledOffsetI,  indIndexScaledOffsetL, indIndexScaledOffsetI2L, indIndexScaled, indIndexScaledI2L, indIndex, indOffI, indOffL,
5454                indirectN, indIndexScaledOffsetIN,  indIndexScaledOffsetLN, indIndexScaledOffsetI2LN, indIndexScaledN, indIndexScaledI2LN, indIndexN, indOffIN, indOffLN);
5455 
5456 
5457 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
5458 // operations. it allows the src to be either an iRegI or a (ConvL2I
5459 // iRegL). in the latter case the l2i normally planted for a ConvL2I
5460 // can be elided because the 32-bit instruction will just employ the
5461 // lower 32 bits anyway.
5462 //
5463 // n.b. this does not elide all L2I conversions. if the truncated
5464 // value is consumed by more than one operation then the ConvL2I
5465 // cannot be bundled into the consuming nodes so an l2i gets planted
5466 // (actually a movw $dst $src) and the downstream instructions consume
5467 // the result of the l2i as an iRegI input. That's a shame since the
5468 // movw is actually redundant but its not too costly.
5469 
5470 opclass iRegIorL2I(iRegI, iRegL2I);
5471 
5472 //----------PIPELINE-----------------------------------------------------------
5473 // Rules which define the behavior of the target architectures pipeline.
5474 // Integer ALU reg operation
5475 pipeline %{
5476 
5477 attributes %{
5478   // ARM instructions are of fixed length
5479   fixed_size_instructions;        // Fixed size instructions TODO does
5480   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
5481   // ARM instructions come in 32-bit word units
5482   instruction_unit_size = 4;         // An instruction is 4 bytes long
5483   instruction_fetch_unit_size = 64;  // The processor fetches one line
5484   instruction_fetch_units = 1;       // of 64 bytes
5485 
5486   // List of nop instructions
5487   nops( MachNop );
5488 %}
5489 
5490 // We don't use an actual pipeline model so don't care about resources
5491 // or description. we do use pipeline classes to introduce fixed
5492 // latencies
5493 
5494 //----------RESOURCES----------------------------------------------------------
5495 // Resources are the functional units available to the machine
5496 
5497 resources( INS0, INS1, INS01 = INS0 | INS1,
5498            ALU0, ALU1, ALU = ALU0 | ALU1,
5499            MAC,
5500            DIV,
5501            BRANCH,
5502            LDST,
5503            NEON_FP);
5504 
5505 //----------PIPELINE DESCRIPTION-----------------------------------------------
5506 // Pipeline Description specifies the stages in the machine's pipeline
5507 
5508 pipe_desc(ISS, EX1, EX2, WR);
5509 
5510 //----------PIPELINE CLASSES---------------------------------------------------
5511 // Pipeline Classes describe the stages in which input and output are
5512 // referenced by the hardware pipeline.
5513 
5514 //------- Integer ALU operations --------------------------
5515 
5516 // Integer ALU reg-reg operation
5517 // Operands needed in EX1, result generated in EX2
5518 // Eg.  ADD     x0, x1, x2
5519 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
5520 %{
5521   single_instruction;
5522   dst    : EX2(write);
5523   src1   : EX1(read);
5524   src2   : EX1(read);
5525   INS01  : ISS; // Dual issue as instruction 0 or 1
5526   ALU    : EX2;
5527 %}
5528 
5529 // Integer ALU reg-reg operation with constant shift
5530 // Shifted register must be available in LATE_ISS instead of EX1
5531 // Eg.  ADD     x0, x1, x2, LSL #2
5532 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
5533 %{
5534   single_instruction;
5535   dst    : EX2(write);
5536   src1   : EX1(read);
5537   src2   : ISS(read);
5538   INS01  : ISS;
5539   ALU    : EX2;
5540 %}
5541 
5542 // Integer ALU reg operation with constant shift
5543 // Eg.  LSL     x0, x1, #shift
5544 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
5545 %{
5546   single_instruction;
5547   dst    : EX2(write);
5548   src1   : ISS(read);
5549   INS01  : ISS;
5550   ALU    : EX2;
5551 %}
5552 
5553 // Integer ALU reg-reg operation with variable shift
5554 // Both operands must be available in LATE_ISS instead of EX1
5555 // Result is available in EX1 instead of EX2
5556 // Eg.  LSLV    x0, x1, x2
5557 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
5558 %{
5559   single_instruction;
5560   dst    : EX1(write);
5561   src1   : ISS(read);
5562   src2   : ISS(read);
5563   INS01  : ISS;
5564   ALU    : EX1;
5565 %}
5566 
5567 // Integer ALU reg-reg operation with extract
5568 // As for _vshift above, but result generated in EX2
5569 // Eg.  EXTR    x0, x1, x2, #N
5570 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
5571 %{
5572   single_instruction;
5573   dst    : EX2(write);
5574   src1   : ISS(read);
5575   src2   : ISS(read);
5576   INS1   : ISS; // Can only dual issue as Instruction 1
5577   ALU    : EX1;
5578 %}
5579 
5580 // Integer ALU reg operation
5581 // Eg.  NEG     x0, x1
5582 pipe_class ialu_reg(iRegI dst, iRegI src)
5583 %{
5584   single_instruction;
5585   dst    : EX2(write);
5586   src    : EX1(read);
5587   INS01  : ISS;
5588   ALU    : EX2;
5589 %}
5590 
5591 // Integer ALU reg mmediate operation
5592 // Eg.  ADD     x0, x1, #N
5593 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
5594 %{
5595   single_instruction;
5596   dst    : EX2(write);
5597   src1   : EX1(read);
5598   INS01  : ISS;
5599   ALU    : EX2;
5600 %}
5601 
5602 // Integer ALU immediate operation (no source operands)
5603 // Eg.  MOV     x0, #N
5604 pipe_class ialu_imm(iRegI dst)
5605 %{
5606   single_instruction;
5607   dst    : EX1(write);
5608   INS01  : ISS;
5609   ALU    : EX1;
5610 %}
5611 
5612 //------- Compare operation -------------------------------
5613 
5614 // Compare reg-reg
5615 // Eg.  CMP     x0, x1
5616 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
5617 %{
5618   single_instruction;
5619 //  fixed_latency(16);
5620   cr     : EX2(write);
5621   op1    : EX1(read);
5622   op2    : EX1(read);
5623   INS01  : ISS;
5624   ALU    : EX2;
5625 %}
5626 
5627 // Compare reg-reg
5628 // Eg.  CMP     x0, #N
5629 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
5630 %{
5631   single_instruction;
5632 //  fixed_latency(16);
5633   cr     : EX2(write);
5634   op1    : EX1(read);
5635   INS01  : ISS;
5636   ALU    : EX2;
5637 %}
5638 
5639 //------- Conditional instructions ------------------------
5640 
5641 // Conditional no operands
5642 // Eg.  CSINC   x0, zr, zr, <cond>
5643 pipe_class icond_none(iRegI dst, rFlagsReg cr)
5644 %{
5645   single_instruction;
5646   cr     : EX1(read);
5647   dst    : EX2(write);
5648   INS01  : ISS;
5649   ALU    : EX2;
5650 %}
5651 
5652 // Conditional 2 operand
5653 // EG.  CSEL    X0, X1, X2, <cond>
5654 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
5655 %{
5656   single_instruction;
5657   cr     : EX1(read);
5658   src1   : EX1(read);
5659   src2   : EX1(read);
5660   dst    : EX2(write);
5661   INS01  : ISS;
5662   ALU    : EX2;
5663 %}
5664 
5665 // Conditional 2 operand
5666 // EG.  CSEL    X0, X1, X2, <cond>
5667 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
5668 %{
5669   single_instruction;
5670   cr     : EX1(read);
5671   src    : EX1(read);
5672   dst    : EX2(write);
5673   INS01  : ISS;
5674   ALU    : EX2;
5675 %}
5676 
5677 //------- Multiply pipeline operations --------------------
5678 
5679 // Multiply reg-reg
5680 // Eg.  MUL     w0, w1, w2
5681 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
5682 %{
5683   single_instruction;
5684   dst    : WR(write);
5685   src1   : ISS(read);
5686   src2   : ISS(read);
5687   INS01  : ISS;
5688   MAC    : WR;
5689 %}
5690 
5691 // Multiply accumulate
5692 // Eg.  MADD    w0, w1, w2, w3
5693 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
5694 %{
5695   single_instruction;
5696   dst    : WR(write);
5697   src1   : ISS(read);
5698   src2   : ISS(read);
5699   src3   : ISS(read);
5700   INS01  : ISS;
5701   MAC    : WR;
5702 %}
5703 
5704 // Eg.  MUL     w0, w1, w2
5705 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
5706 %{
5707   single_instruction;
5708   fixed_latency(3); // Maximum latency for 64 bit mul
5709   dst    : WR(write);
5710   src1   : ISS(read);
5711   src2   : ISS(read);
5712   INS01  : ISS;
5713   MAC    : WR;
5714 %}
5715 
5716 // Multiply accumulate
5717 // Eg.  MADD    w0, w1, w2, w3
5718 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
5719 %{
5720   single_instruction;
5721   fixed_latency(3); // Maximum latency for 64 bit mul
5722   dst    : WR(write);
5723   src1   : ISS(read);
5724   src2   : ISS(read);
5725   src3   : ISS(read);
5726   INS01  : ISS;
5727   MAC    : WR;
5728 %}
5729 
5730 //------- Divide pipeline operations --------------------
5731 
5732 // Eg.  SDIV    w0, w1, w2
5733 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
5734 %{
5735   single_instruction;
5736   fixed_latency(8); // Maximum latency for 32 bit divide
5737   dst    : WR(write);
5738   src1   : ISS(read);
5739   src2   : ISS(read);
5740   INS0   : ISS; // Can only dual issue as instruction 0
5741   DIV    : WR;
5742 %}
5743 
5744 // Eg.  SDIV    x0, x1, x2
5745 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
5746 %{
5747   single_instruction;
5748   fixed_latency(16); // Maximum latency for 64 bit divide
5749   dst    : WR(write);
5750   src1   : ISS(read);
5751   src2   : ISS(read);
5752   INS0   : ISS; // Can only dual issue as instruction 0
5753   DIV    : WR;
5754 %}
5755 
5756 //------- Load pipeline operations ------------------------
5757 
5758 // Load - prefetch
5759 // Eg.  PFRM    <mem>
5760 pipe_class iload_prefetch(memory mem)
5761 %{
5762   single_instruction;
5763   mem    : ISS(read);
5764   INS01  : ISS;
5765   LDST   : WR;
5766 %}
5767 
5768 // Load - reg, mem
5769 // Eg.  LDR     x0, <mem>
5770 pipe_class iload_reg_mem(iRegI dst, memory mem)
5771 %{
5772   single_instruction;
5773   dst    : WR(write);
5774   mem    : ISS(read);
5775   INS01  : ISS;
5776   LDST   : WR;
5777 %}
5778 
5779 // Load - reg, reg
5780 // Eg.  LDR     x0, [sp, x1]
5781 pipe_class iload_reg_reg(iRegI dst, iRegI src)
5782 %{
5783   single_instruction;
5784   dst    : WR(write);
5785   src    : ISS(read);
5786   INS01  : ISS;
5787   LDST   : WR;
5788 %}
5789 
5790 //------- Store pipeline operations -----------------------
5791 
5792 // Store - zr, mem
5793 // Eg.  STR     zr, <mem>
5794 pipe_class istore_mem(memory mem)
5795 %{
5796   single_instruction;
5797   mem    : ISS(read);
5798   INS01  : ISS;
5799   LDST   : WR;
5800 %}
5801 
5802 // Store - reg, mem
5803 // Eg.  STR     x0, <mem>
5804 pipe_class istore_reg_mem(iRegI src, memory mem)
5805 %{
5806   single_instruction;
5807   mem    : ISS(read);
5808   src    : EX2(read);
5809   INS01  : ISS;
5810   LDST   : WR;
5811 %}
5812 
5813 // Store - reg, reg
5814 // Eg. STR      x0, [sp, x1]
5815 pipe_class istore_reg_reg(iRegI dst, iRegI src)
5816 %{
5817   single_instruction;
5818   dst    : ISS(read);
5819   src    : EX2(read);
5820   INS01  : ISS;
5821   LDST   : WR;
5822 %}
5823 
5824 //------- Store pipeline operations -----------------------
5825 
5826 // Branch
5827 pipe_class pipe_branch()
5828 %{
5829   single_instruction;
5830   INS01  : ISS;
5831   BRANCH : EX1;
5832 %}
5833 
5834 // Conditional branch
5835 pipe_class pipe_branch_cond(rFlagsReg cr)
5836 %{
5837   single_instruction;
5838   cr     : EX1(read);
5839   INS01  : ISS;
5840   BRANCH : EX1;
5841 %}
5842 
5843 // Compare & Branch
5844 // EG.  CBZ/CBNZ
5845 pipe_class pipe_cmp_branch(iRegI op1)
5846 %{
5847   single_instruction;
5848   op1    : EX1(read);
5849   INS01  : ISS;
5850   BRANCH : EX1;
5851 %}
5852 
5853 //------- Synchronisation operations ----------------------
5854 
5855 // Any operation requiring serialization.
5856 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
5857 pipe_class pipe_serial()
5858 %{
5859   single_instruction;
5860   force_serialization;
5861   fixed_latency(16);
5862   INS01  : ISS(2); // Cannot dual issue with any other instruction
5863   LDST   : WR;
5864 %}
5865 
5866 // Generic big/slow expanded idiom - also serialized
5867 pipe_class pipe_slow()
5868 %{
5869   instruction_count(10);
5870   multiple_bundles;
5871   force_serialization;
5872   fixed_latency(16);
5873   INS01  : ISS(2); // Cannot dual issue with any other instruction
5874   LDST   : WR;
5875 %}
5876 
5877 // Empty pipeline class
5878 pipe_class pipe_class_empty()
5879 %{
5880   single_instruction;
5881   fixed_latency(0);
5882 %}
5883 
5884 // Default pipeline class.
5885 pipe_class pipe_class_default()
5886 %{
5887   single_instruction;
5888   fixed_latency(2);
5889 %}
5890 
5891 // Pipeline class for compares.
5892 pipe_class pipe_class_compare()
5893 %{
5894   single_instruction;
5895   fixed_latency(16);
5896 %}
5897 
5898 // Pipeline class for memory operations.
5899 pipe_class pipe_class_memory()
5900 %{
5901   single_instruction;
5902   fixed_latency(16);
5903 %}
5904 
5905 // Pipeline class for call.
5906 pipe_class pipe_class_call()
5907 %{
5908   single_instruction;
5909   fixed_latency(100);
5910 %}
5911 
5912 // Define the class for the Nop node.
5913 define %{
5914    MachNop = pipe_class_empty;
5915 %}
5916 
5917 %}
5918 //----------INSTRUCTIONS-------------------------------------------------------
5919 //
5920 // match      -- States which machine-independent subtree may be replaced
5921 //               by this instruction.
5922 // ins_cost   -- The estimated cost of this instruction is used by instruction
5923 //               selection to identify a minimum cost tree of machine
5924 //               instructions that matches a tree of machine-independent
5925 //               instructions.
5926 // format     -- A string providing the disassembly for this instruction.
5927 //               The value of an instruction's operand may be inserted
5928 //               by referring to it with a '$' prefix.
5929 // opcode     -- Three instruction opcodes may be provided.  These are referred
5930 //               to within an encode class as $primary, $secondary, and $tertiary
5931 //               rrspectively.  The primary opcode is commonly used to
5932 //               indicate the type of machine instruction, while secondary
5933 //               and tertiary are often used for prefix options or addressing
5934 //               modes.
5935 // ins_encode -- A list of encode classes with parameters. The encode class
5936 //               name must have been defined in an 'enc_class' specification
5937 //               in the encode section of the architecture description.
5938 
5939 // ============================================================================
5940 // Memory (Load/Store) Instructions
5941 
5942 // Load Instructions
5943 
5944 // Load Byte (8 bit signed)
5945 instruct loadB(iRegINoSp dst, memory mem)
5946 %{
5947   match(Set dst (LoadB mem));
5948   predicate(!needs_acquiring_load(n));
5949 
5950   ins_cost(4 * INSN_COST);
5951   format %{ "ldrsbw  $dst, $mem\t# byte" %}
5952 
5953   ins_encode(aarch64_enc_ldrsbw(dst, mem));
5954 
5955   ins_pipe(iload_reg_mem);
5956 %}
5957 
5958 // Load Byte (8 bit signed) into long
5959 instruct loadB2L(iRegLNoSp dst, memory mem)
5960 %{
5961   match(Set dst (ConvI2L (LoadB mem)));
5962   predicate(!needs_acquiring_load(n->in(1)));
5963 
5964   ins_cost(4 * INSN_COST);
5965   format %{ "ldrsb  $dst, $mem\t# byte" %}
5966 
5967   ins_encode(aarch64_enc_ldrsb(dst, mem));
5968 
5969   ins_pipe(iload_reg_mem);
5970 %}
5971 
5972 // Load Byte (8 bit unsigned)
5973 instruct loadUB(iRegINoSp dst, memory mem)
5974 %{
5975   match(Set dst (LoadUB mem));
5976   predicate(!needs_acquiring_load(n));
5977 
5978   ins_cost(4 * INSN_COST);
5979   format %{ "ldrbw  $dst, $mem\t# byte" %}
5980 
5981   ins_encode(aarch64_enc_ldrb(dst, mem));
5982 
5983   ins_pipe(iload_reg_mem);
5984 %}
5985 
5986 // Load Byte (8 bit unsigned) into long
5987 instruct loadUB2L(iRegLNoSp dst, memory mem)
5988 %{
5989   match(Set dst (ConvI2L (LoadUB mem)));
5990   predicate(!needs_acquiring_load(n->in(1)));
5991 
5992   ins_cost(4 * INSN_COST);
5993   format %{ "ldrb  $dst, $mem\t# byte" %}
5994 
5995   ins_encode(aarch64_enc_ldrb(dst, mem));
5996 
5997   ins_pipe(iload_reg_mem);
5998 %}
5999 
6000 // Load Short (16 bit signed)
6001 instruct loadS(iRegINoSp dst, memory mem)
6002 %{
6003   match(Set dst (LoadS mem));
6004   predicate(!needs_acquiring_load(n));
6005 
6006   ins_cost(4 * INSN_COST);
6007   format %{ "ldrshw  $dst, $mem\t# short" %}
6008 
6009   ins_encode(aarch64_enc_ldrshw(dst, mem));
6010 
6011   ins_pipe(iload_reg_mem);
6012 %}
6013 
6014 // Load Short (16 bit signed) into long
6015 instruct loadS2L(iRegLNoSp dst, memory mem)
6016 %{
6017   match(Set dst (ConvI2L (LoadS mem)));
6018   predicate(!needs_acquiring_load(n->in(1)));
6019 
6020   ins_cost(4 * INSN_COST);
6021   format %{ "ldrsh  $dst, $mem\t# short" %}
6022 
6023   ins_encode(aarch64_enc_ldrsh(dst, mem));
6024 
6025   ins_pipe(iload_reg_mem);
6026 %}
6027 
6028 // Load Char (16 bit unsigned)
6029 instruct loadUS(iRegINoSp dst, memory mem)
6030 %{
6031   match(Set dst (LoadUS mem));
6032   predicate(!needs_acquiring_load(n));
6033 
6034   ins_cost(4 * INSN_COST);
6035   format %{ "ldrh  $dst, $mem\t# short" %}
6036 
6037   ins_encode(aarch64_enc_ldrh(dst, mem));
6038 
6039   ins_pipe(iload_reg_mem);
6040 %}
6041 
6042 // Load Short/Char (16 bit unsigned) into long
6043 instruct loadUS2L(iRegLNoSp dst, memory mem)
6044 %{
6045   match(Set dst (ConvI2L (LoadUS mem)));
6046   predicate(!needs_acquiring_load(n->in(1)));
6047 
6048   ins_cost(4 * INSN_COST);
6049   format %{ "ldrh  $dst, $mem\t# short" %}
6050 
6051   ins_encode(aarch64_enc_ldrh(dst, mem));
6052 
6053   ins_pipe(iload_reg_mem);
6054 %}
6055 
6056 // Load Integer (32 bit signed)
6057 instruct loadI(iRegINoSp dst, memory mem)
6058 %{
6059   match(Set dst (LoadI mem));
6060   predicate(!needs_acquiring_load(n));
6061 
6062   ins_cost(4 * INSN_COST);
6063   format %{ "ldrw  $dst, $mem\t# int" %}
6064 
6065   ins_encode(aarch64_enc_ldrw(dst, mem));
6066 
6067   ins_pipe(iload_reg_mem);
6068 %}
6069 
6070 // Load Integer (32 bit signed) into long
6071 instruct loadI2L(iRegLNoSp dst, memory mem)
6072 %{
6073   match(Set dst (ConvI2L (LoadI mem)));
6074   predicate(!needs_acquiring_load(n->in(1)));
6075 
6076   ins_cost(4 * INSN_COST);
6077   format %{ "ldrsw  $dst, $mem\t# int" %}
6078 
6079   ins_encode(aarch64_enc_ldrsw(dst, mem));
6080 
6081   ins_pipe(iload_reg_mem);
6082 %}
6083 
6084 // Load Integer (32 bit unsigned) into long
6085 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
6086 %{
6087   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
6088   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
6089 
6090   ins_cost(4 * INSN_COST);
6091   format %{ "ldrw  $dst, $mem\t# int" %}
6092 
6093   ins_encode(aarch64_enc_ldrw(dst, mem));
6094 
6095   ins_pipe(iload_reg_mem);
6096 %}
6097 
6098 // Load Long (64 bit signed)
6099 instruct loadL(iRegLNoSp dst, memory mem)
6100 %{
6101   match(Set dst (LoadL mem));
6102   predicate(!needs_acquiring_load(n));
6103 
6104   ins_cost(4 * INSN_COST);
6105   format %{ "ldr  $dst, $mem\t# int" %}
6106 
6107   ins_encode(aarch64_enc_ldr(dst, mem));
6108 
6109   ins_pipe(iload_reg_mem);
6110 %}
6111 
6112 // Load Range
6113 instruct loadRange(iRegINoSp dst, memory mem)
6114 %{
6115   match(Set dst (LoadRange mem));
6116 
6117   ins_cost(4 * INSN_COST);
6118   format %{ "ldrw  $dst, $mem\t# range" %}
6119 
6120   ins_encode(aarch64_enc_ldrw(dst, mem));
6121 
6122   ins_pipe(iload_reg_mem);
6123 %}
6124 
6125 // Load Pointer
6126 instruct loadP(iRegPNoSp dst, memory mem)
6127 %{
6128   match(Set dst (LoadP mem));
6129   predicate(!needs_acquiring_load(n));
6130 
6131   ins_cost(4 * INSN_COST);
6132   format %{ "ldr  $dst, $mem\t# ptr" %}
6133 
6134   ins_encode(aarch64_enc_ldr(dst, mem));
6135 
6136   ins_pipe(iload_reg_mem);
6137 %}
6138 
6139 // Load Compressed Pointer
6140 instruct loadN(iRegNNoSp dst, memory mem)
6141 %{
6142   match(Set dst (LoadN mem));
6143   predicate(!needs_acquiring_load(n));
6144 
6145   ins_cost(4 * INSN_COST);
6146   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
6147 
6148   ins_encode(aarch64_enc_ldrw(dst, mem));
6149 
6150   ins_pipe(iload_reg_mem);
6151 %}
6152 
6153 // Load Klass Pointer
6154 instruct loadKlass(iRegPNoSp dst, memory mem)
6155 %{
6156   match(Set dst (LoadKlass mem));
6157   predicate(!needs_acquiring_load(n));
6158 
6159   ins_cost(4 * INSN_COST);
6160   format %{ "ldr  $dst, $mem\t# class" %}
6161 
6162   ins_encode(aarch64_enc_ldr(dst, mem));
6163 
6164   ins_pipe(iload_reg_mem);
6165 %}
6166 
6167 // Load Narrow Klass Pointer
6168 instruct loadNKlass(iRegNNoSp dst, memory mem)
6169 %{
6170   match(Set dst (LoadNKlass mem));
6171   predicate(!needs_acquiring_load(n));
6172 
6173   ins_cost(4 * INSN_COST);
6174   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
6175 
6176   ins_encode(aarch64_enc_ldrw(dst, mem));
6177 
6178   ins_pipe(iload_reg_mem);
6179 %}
6180 
6181 // Load Float
6182 instruct loadF(vRegF dst, memory mem)
6183 %{
6184   match(Set dst (LoadF mem));
6185   predicate(!needs_acquiring_load(n));
6186 
6187   ins_cost(4 * INSN_COST);
6188   format %{ "ldrs  $dst, $mem\t# float" %}
6189 
6190   ins_encode( aarch64_enc_ldrs(dst, mem) );
6191 
6192   ins_pipe(pipe_class_memory);
6193 %}
6194 
6195 // Load Double
6196 instruct loadD(vRegD dst, memory mem)
6197 %{
6198   match(Set dst (LoadD mem));
6199   predicate(!needs_acquiring_load(n));
6200 
6201   ins_cost(4 * INSN_COST);
6202   format %{ "ldrd  $dst, $mem\t# double" %}
6203 
6204   ins_encode( aarch64_enc_ldrd(dst, mem) );
6205 
6206   ins_pipe(pipe_class_memory);
6207 %}
6208 
6209 
6210 // Load Int Constant
6211 instruct loadConI(iRegINoSp dst, immI src)
6212 %{
6213   match(Set dst src);
6214 
6215   ins_cost(INSN_COST);
6216   format %{ "mov $dst, $src\t# int" %}
6217 
6218   ins_encode( aarch64_enc_movw_imm(dst, src) );
6219 
6220   ins_pipe(ialu_imm);
6221 %}
6222 
6223 // Load Long Constant
6224 instruct loadConL(iRegLNoSp dst, immL src)
6225 %{
6226   match(Set dst src);
6227 
6228   ins_cost(INSN_COST);
6229   format %{ "mov $dst, $src\t# long" %}
6230 
6231   ins_encode( aarch64_enc_mov_imm(dst, src) );
6232 
6233   ins_pipe(ialu_imm);
6234 %}
6235 
6236 // Load Pointer Constant
6237 
6238 instruct loadConP(iRegPNoSp dst, immP con)
6239 %{
6240   match(Set dst con);
6241 
6242   ins_cost(INSN_COST * 4);
6243   format %{
6244     "mov  $dst, $con\t# ptr\n\t"
6245   %}
6246 
6247   ins_encode(aarch64_enc_mov_p(dst, con));
6248 
6249   ins_pipe(ialu_imm);
6250 %}
6251 
6252 // Load Null Pointer Constant
6253 
6254 instruct loadConP0(iRegPNoSp dst, immP0 con)
6255 %{
6256   match(Set dst con);
6257 
6258   ins_cost(INSN_COST);
6259   format %{ "mov  $dst, $con\t# NULL ptr" %}
6260 
6261   ins_encode(aarch64_enc_mov_p0(dst, con));
6262 
6263   ins_pipe(ialu_imm);
6264 %}
6265 
6266 // Load Pointer Constant One
6267 
6268 instruct loadConP1(iRegPNoSp dst, immP_1 con)
6269 %{
6270   match(Set dst con);
6271 
6272   ins_cost(INSN_COST);
6273   format %{ "mov  $dst, $con\t# NULL ptr" %}
6274 
6275   ins_encode(aarch64_enc_mov_p1(dst, con));
6276 
6277   ins_pipe(ialu_imm);
6278 %}
6279 
6280 // Load Poll Page Constant
6281 
6282 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
6283 %{
6284   match(Set dst con);
6285 
6286   ins_cost(INSN_COST);
6287   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
6288 
6289   ins_encode(aarch64_enc_mov_poll_page(dst, con));
6290 
6291   ins_pipe(ialu_imm);
6292 %}
6293 
6294 // Load Byte Map Base Constant
6295 
6296 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
6297 %{
6298   match(Set dst con);
6299 
6300   ins_cost(INSN_COST);
6301   format %{ "adr  $dst, $con\t# Byte Map Base" %}
6302 
6303   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
6304 
6305   ins_pipe(ialu_imm);
6306 %}
6307 
6308 // Load Narrow Pointer Constant
6309 
6310 instruct loadConN(iRegNNoSp dst, immN con)
6311 %{
6312   match(Set dst con);
6313 
6314   ins_cost(INSN_COST * 4);
6315   format %{ "mov  $dst, $con\t# compressed ptr" %}
6316 
6317   ins_encode(aarch64_enc_mov_n(dst, con));
6318 
6319   ins_pipe(ialu_imm);
6320 %}
6321 
6322 // Load Narrow Null Pointer Constant
6323 
6324 instruct loadConN0(iRegNNoSp dst, immN0 con)
6325 %{
6326   match(Set dst con);
6327 
6328   ins_cost(INSN_COST);
6329   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
6330 
6331   ins_encode(aarch64_enc_mov_n0(dst, con));
6332 
6333   ins_pipe(ialu_imm);
6334 %}
6335 
6336 // Load Narrow Klass Constant
6337 
6338 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
6339 %{
6340   match(Set dst con);
6341 
6342   ins_cost(INSN_COST);
6343   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
6344 
6345   ins_encode(aarch64_enc_mov_nk(dst, con));
6346 
6347   ins_pipe(ialu_imm);
6348 %}
6349 
6350 // Load Packed Float Constant
6351 
6352 instruct loadConF_packed(vRegF dst, immFPacked con) %{
6353   match(Set dst con);
6354   ins_cost(INSN_COST * 4);
6355   format %{ "fmovs  $dst, $con"%}
6356   ins_encode %{
6357     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
6358   %}
6359 
6360   ins_pipe(pipe_class_default);
6361 %}
6362 
6363 // Load Float Constant
6364 
6365 instruct loadConF(vRegF dst, immF con) %{
6366   match(Set dst con);
6367 
6368   ins_cost(INSN_COST * 4);
6369 
6370   format %{
6371     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
6372   %}
6373 
6374   ins_encode %{
6375     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
6376   %}
6377 
6378   ins_pipe(pipe_class_default);
6379 %}
6380 
6381 // Load Packed Double Constant
6382 
6383 instruct loadConD_packed(vRegD dst, immDPacked con) %{
6384   match(Set dst con);
6385   ins_cost(INSN_COST);
6386   format %{ "fmovd  $dst, $con"%}
6387   ins_encode %{
6388     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
6389   %}
6390 
6391   ins_pipe(pipe_class_default);
6392 %}
6393 
6394 // Load Double Constant
6395 
6396 instruct loadConD(vRegD dst, immD con) %{
6397   match(Set dst con);
6398 
6399   ins_cost(INSN_COST * 5);
6400   format %{
6401     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
6402   %}
6403 
6404   ins_encode %{
6405     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
6406   %}
6407 
6408   ins_pipe(pipe_class_default);
6409 %}
6410 
6411 // Store Instructions
6412 
6413 // Store CMS card-mark Immediate
6414 instruct storeimmCM0(immI0 zero, memory mem)
6415 %{
6416   match(Set mem (StoreCM mem zero));
6417 
6418   ins_cost(INSN_COST);
6419   format %{ "strb zr, $mem\t# byte" %}
6420 
6421   ins_encode(aarch64_enc_strb0(mem));
6422 
6423   ins_pipe(istore_mem);
6424 %}
6425 
6426 // Store Byte
6427 instruct storeB(iRegIorL2I src, memory mem)
6428 %{
6429   match(Set mem (StoreB mem src));
6430   predicate(!needs_releasing_store(n));
6431 
6432   ins_cost(INSN_COST);
6433   format %{ "strb  $src, $mem\t# byte" %}
6434 
6435   ins_encode(aarch64_enc_strb(src, mem));
6436 
6437   ins_pipe(istore_reg_mem);
6438 %}
6439 
6440 
6441 instruct storeimmB0(immI0 zero, memory mem)
6442 %{
6443   match(Set mem (StoreB mem zero));
6444   predicate(!needs_releasing_store(n));
6445 
6446   ins_cost(INSN_COST);
6447   format %{ "strb zr, $mem\t# byte" %}
6448 
6449   ins_encode(aarch64_enc_strb0(mem));
6450 
6451   ins_pipe(istore_mem);
6452 %}
6453 
6454 // Store Char/Short
6455 instruct storeC(iRegIorL2I src, memory mem)
6456 %{
6457   match(Set mem (StoreC mem src));
6458   predicate(!needs_releasing_store(n));
6459 
6460   ins_cost(INSN_COST);
6461   format %{ "strh  $src, $mem\t# short" %}
6462 
6463   ins_encode(aarch64_enc_strh(src, mem));
6464 
6465   ins_pipe(istore_reg_mem);
6466 %}
6467 
6468 instruct storeimmC0(immI0 zero, memory mem)
6469 %{
6470   match(Set mem (StoreC mem zero));
6471   predicate(!needs_releasing_store(n));
6472 
6473   ins_cost(INSN_COST);
6474   format %{ "strh  zr, $mem\t# short" %}
6475 
6476   ins_encode(aarch64_enc_strh0(mem));
6477 
6478   ins_pipe(istore_mem);
6479 %}
6480 
6481 // Store Integer
6482 
6483 instruct storeI(iRegIorL2I src, memory mem)
6484 %{
6485   match(Set mem(StoreI mem src));
6486   predicate(!needs_releasing_store(n));
6487 
6488   ins_cost(INSN_COST);
6489   format %{ "strw  $src, $mem\t# int" %}
6490 
6491   ins_encode(aarch64_enc_strw(src, mem));
6492 
6493   ins_pipe(istore_reg_mem);
6494 %}
6495 
6496 instruct storeimmI0(immI0 zero, memory mem)
6497 %{
6498   match(Set mem(StoreI mem zero));
6499   predicate(!needs_releasing_store(n));
6500 
6501   ins_cost(INSN_COST);
6502   format %{ "strw  zr, $mem\t# int" %}
6503 
6504   ins_encode(aarch64_enc_strw0(mem));
6505 
6506   ins_pipe(istore_mem);
6507 %}
6508 
6509 // Store Long (64 bit signed)
6510 instruct storeL(iRegL src, memory mem)
6511 %{
6512   match(Set mem (StoreL mem src));
6513   predicate(!needs_releasing_store(n));
6514 
6515   ins_cost(INSN_COST);
6516   format %{ "str  $src, $mem\t# int" %}
6517 
6518   ins_encode(aarch64_enc_str(src, mem));
6519 
6520   ins_pipe(istore_reg_mem);
6521 %}
6522 
6523 // Store Long (64 bit signed)
6524 instruct storeimmL0(immL0 zero, memory mem)
6525 %{
6526   match(Set mem (StoreL mem zero));
6527   predicate(!needs_releasing_store(n));
6528 
6529   ins_cost(INSN_COST);
6530   format %{ "str  zr, $mem\t# int" %}
6531 
6532   ins_encode(aarch64_enc_str0(mem));
6533 
6534   ins_pipe(istore_mem);
6535 %}
6536 
6537 // Store Pointer
6538 instruct storeP(iRegP src, memory mem)
6539 %{
6540   match(Set mem (StoreP mem src));
6541   predicate(!needs_releasing_store(n));
6542 
6543   ins_cost(INSN_COST);
6544   format %{ "str  $src, $mem\t# ptr" %}
6545 
6546   ins_encode(aarch64_enc_str(src, mem));
6547 
6548   ins_pipe(istore_reg_mem);
6549 %}
6550 
6551 // Store Pointer
6552 instruct storeimmP0(immP0 zero, memory mem)
6553 %{
6554   match(Set mem (StoreP mem zero));
6555   predicate(!needs_releasing_store(n));
6556 
6557   ins_cost(INSN_COST);
6558   format %{ "str zr, $mem\t# ptr" %}
6559 
6560   ins_encode(aarch64_enc_str0(mem));
6561 
6562   ins_pipe(istore_mem);
6563 %}
6564 
6565 // Store Compressed Pointer
6566 instruct storeN(iRegN src, memory mem)
6567 %{
6568   match(Set mem (StoreN mem src));
6569   predicate(!needs_releasing_store(n));
6570 
6571   ins_cost(INSN_COST);
6572   format %{ "strw  $src, $mem\t# compressed ptr" %}
6573 
6574   ins_encode(aarch64_enc_strw(src, mem));
6575 
6576   ins_pipe(istore_reg_mem);
6577 %}
6578 
6579 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
6580 %{
6581   match(Set mem (StoreN mem zero));
6582   predicate(Universe::narrow_oop_base() == NULL &&
6583             Universe::narrow_klass_base() == NULL &&
6584             (!needs_releasing_store(n)));
6585 
6586   ins_cost(INSN_COST);
6587   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
6588 
6589   ins_encode(aarch64_enc_strw(heapbase, mem));
6590 
6591   ins_pipe(istore_reg_mem);
6592 %}
6593 
6594 // Store Float
6595 instruct storeF(vRegF src, memory mem)
6596 %{
6597   match(Set mem (StoreF mem src));
6598   predicate(!needs_releasing_store(n));
6599 
6600   ins_cost(INSN_COST);
6601   format %{ "strs  $src, $mem\t# float" %}
6602 
6603   ins_encode( aarch64_enc_strs(src, mem) );
6604 
6605   ins_pipe(pipe_class_memory);
6606 %}
6607 
6608 // TODO
6609 // implement storeImmF0 and storeFImmPacked
6610 
6611 // Store Double
6612 instruct storeD(vRegD src, memory mem)
6613 %{
6614   match(Set mem (StoreD mem src));
6615   predicate(!needs_releasing_store(n));
6616 
6617   ins_cost(INSN_COST);
6618   format %{ "strd  $src, $mem\t# double" %}
6619 
6620   ins_encode( aarch64_enc_strd(src, mem) );
6621 
6622   ins_pipe(pipe_class_memory);
6623 %}
6624 
6625 // Store Compressed Klass Pointer
6626 instruct storeNKlass(iRegN src, memory mem)
6627 %{
6628   predicate(!needs_releasing_store(n));
6629   match(Set mem (StoreNKlass mem src));
6630 
6631   ins_cost(INSN_COST);
6632   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
6633 
6634   ins_encode(aarch64_enc_strw(src, mem));
6635 
6636   ins_pipe(istore_reg_mem);
6637 %}
6638 
6639 // TODO
6640 // implement storeImmD0 and storeDImmPacked
6641 
6642 // prefetch instructions
6643 // Must be safe to execute with invalid address (cannot fault).
6644 
6645 instruct prefetchalloc( memory mem ) %{
6646   match(PrefetchAllocation mem);
6647 
6648   ins_cost(INSN_COST);
6649   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
6650 
6651   ins_encode( aarch64_enc_prefetchw(mem) );
6652 
6653   ins_pipe(iload_prefetch);
6654 %}
6655 
6656 //  ---------------- volatile loads and stores ----------------
6657 
6658 // Load Byte (8 bit signed)
6659 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
6660 %{
6661   match(Set dst (LoadB mem));
6662 
6663   ins_cost(VOLATILE_REF_COST);
6664   format %{ "ldarsb  $dst, $mem\t# byte" %}
6665 
6666   ins_encode(aarch64_enc_ldarsb(dst, mem));
6667 
6668   ins_pipe(pipe_serial);
6669 %}
6670 
6671 // Load Byte (8 bit signed) into long
6672 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
6673 %{
6674   match(Set dst (ConvI2L (LoadB mem)));
6675 
6676   ins_cost(VOLATILE_REF_COST);
6677   format %{ "ldarsb  $dst, $mem\t# byte" %}
6678 
6679   ins_encode(aarch64_enc_ldarsb(dst, mem));
6680 
6681   ins_pipe(pipe_serial);
6682 %}
6683 
6684 // Load Byte (8 bit unsigned)
6685 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
6686 %{
6687   match(Set dst (LoadUB mem));
6688 
6689   ins_cost(VOLATILE_REF_COST);
6690   format %{ "ldarb  $dst, $mem\t# byte" %}
6691 
6692   ins_encode(aarch64_enc_ldarb(dst, mem));
6693 
6694   ins_pipe(pipe_serial);
6695 %}
6696 
6697 // Load Byte (8 bit unsigned) into long
6698 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
6699 %{
6700   match(Set dst (ConvI2L (LoadUB mem)));
6701 
6702   ins_cost(VOLATILE_REF_COST);
6703   format %{ "ldarb  $dst, $mem\t# byte" %}
6704 
6705   ins_encode(aarch64_enc_ldarb(dst, mem));
6706 
6707   ins_pipe(pipe_serial);
6708 %}
6709 
6710 // Load Short (16 bit signed)
6711 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
6712 %{
6713   match(Set dst (LoadS mem));
6714 
6715   ins_cost(VOLATILE_REF_COST);
6716   format %{ "ldarshw  $dst, $mem\t# short" %}
6717 
6718   ins_encode(aarch64_enc_ldarshw(dst, mem));
6719 
6720   ins_pipe(pipe_serial);
6721 %}
6722 
6723 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
6724 %{
6725   match(Set dst (LoadUS mem));
6726 
6727   ins_cost(VOLATILE_REF_COST);
6728   format %{ "ldarhw  $dst, $mem\t# short" %}
6729 
6730   ins_encode(aarch64_enc_ldarhw(dst, mem));
6731 
6732   ins_pipe(pipe_serial);
6733 %}
6734 
6735 // Load Short/Char (16 bit unsigned) into long
6736 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
6737 %{
6738   match(Set dst (ConvI2L (LoadUS mem)));
6739 
6740   ins_cost(VOLATILE_REF_COST);
6741   format %{ "ldarh  $dst, $mem\t# short" %}
6742 
6743   ins_encode(aarch64_enc_ldarh(dst, mem));
6744 
6745   ins_pipe(pipe_serial);
6746 %}
6747 
6748 // Load Short/Char (16 bit signed) into long
6749 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
6750 %{
6751   match(Set dst (ConvI2L (LoadS mem)));
6752 
6753   ins_cost(VOLATILE_REF_COST);
6754   format %{ "ldarh  $dst, $mem\t# short" %}
6755 
6756   ins_encode(aarch64_enc_ldarsh(dst, mem));
6757 
6758   ins_pipe(pipe_serial);
6759 %}
6760 
6761 // Load Integer (32 bit signed)
6762 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
6763 %{
6764   match(Set dst (LoadI mem));
6765 
6766   ins_cost(VOLATILE_REF_COST);
6767   format %{ "ldarw  $dst, $mem\t# int" %}
6768 
6769   ins_encode(aarch64_enc_ldarw(dst, mem));
6770 
6771   ins_pipe(pipe_serial);
6772 %}
6773 
6774 // Load Integer (32 bit unsigned) into long
6775 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
6776 %{
6777   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
6778 
6779   ins_cost(VOLATILE_REF_COST);
6780   format %{ "ldarw  $dst, $mem\t# int" %}
6781 
6782   ins_encode(aarch64_enc_ldarw(dst, mem));
6783 
6784   ins_pipe(pipe_serial);
6785 %}
6786 
6787 // Load Long (64 bit signed)
6788 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
6789 %{
6790   match(Set dst (LoadL mem));
6791 
6792   ins_cost(VOLATILE_REF_COST);
6793   format %{ "ldar  $dst, $mem\t# int" %}
6794 
6795   ins_encode(aarch64_enc_ldar(dst, mem));
6796 
6797   ins_pipe(pipe_serial);
6798 %}
6799 
6800 // Load Pointer
6801 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
6802 %{
6803   match(Set dst (LoadP mem));
6804 
6805   ins_cost(VOLATILE_REF_COST);
6806   format %{ "ldar  $dst, $mem\t# ptr" %}
6807 
6808   ins_encode(aarch64_enc_ldar(dst, mem));
6809 
6810   ins_pipe(pipe_serial);
6811 %}
6812 
6813 // Load Compressed Pointer
6814 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
6815 %{
6816   match(Set dst (LoadN mem));
6817 
6818   ins_cost(VOLATILE_REF_COST);
6819   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
6820 
6821   ins_encode(aarch64_enc_ldarw(dst, mem));
6822 
6823   ins_pipe(pipe_serial);
6824 %}
6825 
6826 // Load Float
6827 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
6828 %{
6829   match(Set dst (LoadF mem));
6830 
6831   ins_cost(VOLATILE_REF_COST);
6832   format %{ "ldars  $dst, $mem\t# float" %}
6833 
6834   ins_encode( aarch64_enc_fldars(dst, mem) );
6835 
6836   ins_pipe(pipe_serial);
6837 %}
6838 
6839 // Load Double
6840 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
6841 %{
6842   match(Set dst (LoadD mem));
6843 
6844   ins_cost(VOLATILE_REF_COST);
6845   format %{ "ldard  $dst, $mem\t# double" %}
6846 
6847   ins_encode( aarch64_enc_fldard(dst, mem) );
6848 
6849   ins_pipe(pipe_serial);
6850 %}
6851 
6852 // Store Byte
6853 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
6854 %{
6855   match(Set mem (StoreB mem src));
6856 
6857   ins_cost(VOLATILE_REF_COST);
6858   format %{ "stlrb  $src, $mem\t# byte" %}
6859 
6860   ins_encode(aarch64_enc_stlrb(src, mem));
6861 
6862   ins_pipe(pipe_class_memory);
6863 %}
6864 
6865 // Store Char/Short
6866 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
6867 %{
6868   match(Set mem (StoreC mem src));
6869 
6870   ins_cost(VOLATILE_REF_COST);
6871   format %{ "stlrh  $src, $mem\t# short" %}
6872 
6873   ins_encode(aarch64_enc_stlrh(src, mem));
6874 
6875   ins_pipe(pipe_class_memory);
6876 %}
6877 
6878 // Store Integer
6879 
6880 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
6881 %{
6882   match(Set mem(StoreI mem src));
6883 
6884   ins_cost(VOLATILE_REF_COST);
6885   format %{ "stlrw  $src, $mem\t# int" %}
6886 
6887   ins_encode(aarch64_enc_stlrw(src, mem));
6888 
6889   ins_pipe(pipe_class_memory);
6890 %}
6891 
6892 // Store Long (64 bit signed)
6893 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
6894 %{
6895   match(Set mem (StoreL mem src));
6896 
6897   ins_cost(VOLATILE_REF_COST);
6898   format %{ "stlr  $src, $mem\t# int" %}
6899 
6900   ins_encode(aarch64_enc_stlr(src, mem));
6901 
6902   ins_pipe(pipe_class_memory);
6903 %}
6904 
6905 // Store Pointer
6906 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
6907 %{
6908   match(Set mem (StoreP mem src));
6909 
6910   ins_cost(VOLATILE_REF_COST);
6911   format %{ "stlr  $src, $mem\t# ptr" %}
6912 
6913   ins_encode(aarch64_enc_stlr(src, mem));
6914 
6915   ins_pipe(pipe_class_memory);
6916 %}
6917 
6918 // Store Compressed Pointer
6919 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
6920 %{
6921   match(Set mem (StoreN mem src));
6922 
6923   ins_cost(VOLATILE_REF_COST);
6924   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
6925 
6926   ins_encode(aarch64_enc_stlrw(src, mem));
6927 
6928   ins_pipe(pipe_class_memory);
6929 %}
6930 
6931 // Store Float
6932 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
6933 %{
6934   match(Set mem (StoreF mem src));
6935 
6936   ins_cost(VOLATILE_REF_COST);
6937   format %{ "stlrs  $src, $mem\t# float" %}
6938 
6939   ins_encode( aarch64_enc_fstlrs(src, mem) );
6940 
6941   ins_pipe(pipe_class_memory);
6942 %}
6943 
6944 // TODO
6945 // implement storeImmF0 and storeFImmPacked
6946 
6947 // Store Double
6948 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
6949 %{
6950   match(Set mem (StoreD mem src));
6951 
6952   ins_cost(VOLATILE_REF_COST);
6953   format %{ "stlrd  $src, $mem\t# double" %}
6954 
6955   ins_encode( aarch64_enc_fstlrd(src, mem) );
6956 
6957   ins_pipe(pipe_class_memory);
6958 %}
6959 
6960 //  ---------------- end of volatile loads and stores ----------------
6961 
6962 // ============================================================================
6963 // BSWAP Instructions
6964 
6965 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
6966   match(Set dst (ReverseBytesI src));
6967 
6968   ins_cost(INSN_COST);
6969   format %{ "revw  $dst, $src" %}
6970 
6971   ins_encode %{
6972     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
6973   %}
6974 
6975   ins_pipe(ialu_reg);
6976 %}
6977 
6978 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
6979   match(Set dst (ReverseBytesL src));
6980 
6981   ins_cost(INSN_COST);
6982   format %{ "rev  $dst, $src" %}
6983 
6984   ins_encode %{
6985     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
6986   %}
6987 
6988   ins_pipe(ialu_reg);
6989 %}
6990 
6991 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
6992   match(Set dst (ReverseBytesUS src));
6993 
6994   ins_cost(INSN_COST);
6995   format %{ "rev16w  $dst, $src" %}
6996 
6997   ins_encode %{
6998     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
6999   %}
7000 
7001   ins_pipe(ialu_reg);
7002 %}
7003 
7004 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
7005   match(Set dst (ReverseBytesS src));
7006 
7007   ins_cost(INSN_COST);
7008   format %{ "rev16w  $dst, $src\n\t"
7009             "sbfmw $dst, $dst, #0, #15" %}
7010 
7011   ins_encode %{
7012     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7013     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
7014   %}
7015 
7016   ins_pipe(ialu_reg);
7017 %}
7018 
7019 // ============================================================================
7020 // Zero Count Instructions
7021 
7022 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7023   match(Set dst (CountLeadingZerosI src));
7024 
7025   ins_cost(INSN_COST);
7026   format %{ "clzw  $dst, $src" %}
7027   ins_encode %{
7028     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
7029   %}
7030 
7031   ins_pipe(ialu_reg);
7032 %}
7033 
7034 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
7035   match(Set dst (CountLeadingZerosL src));
7036 
7037   ins_cost(INSN_COST);
7038   format %{ "clz   $dst, $src" %}
7039   ins_encode %{
7040     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
7041   %}
7042 
7043   ins_pipe(ialu_reg);
7044 %}
7045 
7046 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7047   match(Set dst (CountTrailingZerosI src));
7048 
7049   ins_cost(INSN_COST * 2);
7050   format %{ "rbitw  $dst, $src\n\t"
7051             "clzw   $dst, $dst" %}
7052   ins_encode %{
7053     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
7054     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
7055   %}
7056 
7057   ins_pipe(ialu_reg);
7058 %}
7059 
7060 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
7061   match(Set dst (CountTrailingZerosL src));
7062 
7063   ins_cost(INSN_COST * 2);
7064   format %{ "rbit   $dst, $src\n\t"
7065             "clz    $dst, $dst" %}
7066   ins_encode %{
7067     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
7068     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
7069   %}
7070 
7071   ins_pipe(ialu_reg);
7072 %}
7073 
7074 // ============================================================================
7075 // MemBar Instruction
7076 
7077 instruct load_fence() %{
7078   match(LoadFence);
7079   ins_cost(VOLATILE_REF_COST);
7080 
7081   format %{ "load_fence" %}
7082 
7083   ins_encode %{
7084     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7085   %}
7086   ins_pipe(pipe_serial);
7087 %}
7088 
7089 instruct unnecessary_membar_acquire() %{
7090   predicate(unnecessary_acquire(n));
7091   match(MemBarAcquire);
7092   ins_cost(0);
7093 
7094   format %{ "membar_acquire (elided)" %}
7095 
7096   ins_encode %{
7097     __ block_comment("membar_acquire (elided)");
7098   %}
7099 
7100   ins_pipe(pipe_class_empty);
7101 %}
7102 
7103 instruct membar_acquire() %{
7104   match(MemBarAcquire);
7105   ins_cost(VOLATILE_REF_COST);
7106 
7107   format %{ "membar_acquire" %}
7108 
7109   ins_encode %{
7110     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7111   %}
7112 
7113   ins_pipe(pipe_serial);
7114 %}
7115 
7116 
7117 instruct membar_acquire_lock() %{
7118   match(MemBarAcquireLock);
7119   ins_cost(VOLATILE_REF_COST);
7120 
7121   format %{ "membar_acquire_lock" %}
7122 
7123   ins_encode %{
7124     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7125   %}
7126 
7127   ins_pipe(pipe_serial);
7128 %}
7129 
7130 instruct store_fence() %{
7131   match(StoreFence);
7132   ins_cost(VOLATILE_REF_COST);
7133 
7134   format %{ "store_fence" %}
7135 
7136   ins_encode %{
7137     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7138   %}
7139   ins_pipe(pipe_serial);
7140 %}
7141 
7142 instruct unnecessary_membar_release() %{
7143   predicate(unnecessary_release(n));
7144   match(MemBarRelease);
7145   ins_cost(0);
7146 
7147   format %{ "membar_release (elided)" %}
7148 
7149   ins_encode %{
7150     __ block_comment("membar_release (elided)");
7151   %}
7152   ins_pipe(pipe_serial);
7153 %}
7154 
7155 instruct membar_release() %{
7156   match(MemBarRelease);
7157   ins_cost(VOLATILE_REF_COST);
7158 
7159   format %{ "membar_release" %}
7160 
7161   ins_encode %{
7162     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7163   %}
7164   ins_pipe(pipe_serial);
7165 %}
7166 
7167 instruct membar_storestore() %{
7168   match(MemBarStoreStore);
7169   ins_cost(VOLATILE_REF_COST);
7170 
7171   format %{ "MEMBAR-store-store" %}
7172 
7173   ins_encode %{
7174     __ membar(Assembler::StoreStore);
7175   %}
7176   ins_pipe(pipe_serial);
7177 %}
7178 
7179 instruct membar_release_lock() %{
7180   match(MemBarReleaseLock);
7181   ins_cost(VOLATILE_REF_COST);
7182 
7183   format %{ "membar_release_lock" %}
7184 
7185   ins_encode %{
7186     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7187   %}
7188 
7189   ins_pipe(pipe_serial);
7190 %}
7191 
7192 instruct unnecessary_membar_volatile() %{
7193   predicate(unnecessary_volatile(n));
7194   match(MemBarVolatile);
7195   ins_cost(0);
7196 
7197   format %{ "membar_volatile (elided)" %}
7198 
7199   ins_encode %{
7200     __ block_comment("membar_volatile (elided)");
7201   %}
7202 
7203   ins_pipe(pipe_serial);
7204 %}
7205 
7206 instruct membar_volatile() %{
7207   match(MemBarVolatile);
7208   ins_cost(VOLATILE_REF_COST*100);
7209 
7210   format %{ "membar_volatile" %}
7211 
7212   ins_encode %{
7213     __ membar(Assembler::StoreLoad);
7214   %}
7215 
7216   ins_pipe(pipe_serial);
7217 %}
7218 
7219 // ============================================================================
7220 // Cast/Convert Instructions
7221 
7222 instruct castX2P(iRegPNoSp dst, iRegL src) %{
7223   match(Set dst (CastX2P src));
7224 
7225   ins_cost(INSN_COST);
7226   format %{ "mov $dst, $src\t# long -> ptr" %}
7227 
7228   ins_encode %{
7229     if ($dst$$reg != $src$$reg) {
7230       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
7231     }
7232   %}
7233 
7234   ins_pipe(ialu_reg);
7235 %}
7236 
7237 instruct castP2X(iRegLNoSp dst, iRegP src) %{
7238   match(Set dst (CastP2X src));
7239 
7240   ins_cost(INSN_COST);
7241   format %{ "mov $dst, $src\t# ptr -> long" %}
7242 
7243   ins_encode %{
7244     if ($dst$$reg != $src$$reg) {
7245       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
7246     }
7247   %}
7248 
7249   ins_pipe(ialu_reg);
7250 %}
7251 
7252 // Convert oop into int for vectors alignment masking
7253 instruct convP2I(iRegINoSp dst, iRegP src) %{
7254   match(Set dst (ConvL2I (CastP2X src)));
7255 
7256   ins_cost(INSN_COST);
7257   format %{ "movw $dst, $src\t# ptr -> int" %}
7258   ins_encode %{
7259     __ movw($dst$$Register, $src$$Register);
7260   %}
7261 
7262   ins_pipe(ialu_reg);
7263 %}
7264 
7265 // Convert compressed oop into int for vectors alignment masking
7266 // in case of 32bit oops (heap < 4Gb).
7267 instruct convN2I(iRegINoSp dst, iRegN src)
7268 %{
7269   predicate(Universe::narrow_oop_shift() == 0);
7270   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
7271 
7272   ins_cost(INSN_COST);
7273   format %{ "mov dst, $src\t# compressed ptr -> int" %}
7274   ins_encode %{
7275     __ movw($dst$$Register, $src$$Register);
7276   %}
7277 
7278   ins_pipe(ialu_reg);
7279 %}
7280 
7281 
7282 // Convert oop pointer into compressed form
7283 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
7284   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
7285   match(Set dst (EncodeP src));
7286   effect(KILL cr);
7287   ins_cost(INSN_COST * 3);
7288   format %{ "encode_heap_oop $dst, $src" %}
7289   ins_encode %{
7290     Register s = $src$$Register;
7291     Register d = $dst$$Register;
7292     __ encode_heap_oop(d, s);
7293   %}
7294   ins_pipe(ialu_reg);
7295 %}
7296 
7297 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
7298   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
7299   match(Set dst (EncodeP src));
7300   ins_cost(INSN_COST * 3);
7301   format %{ "encode_heap_oop_not_null $dst, $src" %}
7302   ins_encode %{
7303     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
7304   %}
7305   ins_pipe(ialu_reg);
7306 %}
7307 
7308 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
7309   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
7310             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
7311   match(Set dst (DecodeN src));
7312   ins_cost(INSN_COST * 3);
7313   format %{ "decode_heap_oop $dst, $src" %}
7314   ins_encode %{
7315     Register s = $src$$Register;
7316     Register d = $dst$$Register;
7317     __ decode_heap_oop(d, s);
7318   %}
7319   ins_pipe(ialu_reg);
7320 %}
7321 
7322 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
7323   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
7324             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
7325   match(Set dst (DecodeN src));
7326   ins_cost(INSN_COST * 3);
7327   format %{ "decode_heap_oop_not_null $dst, $src" %}
7328   ins_encode %{
7329     Register s = $src$$Register;
7330     Register d = $dst$$Register;
7331     __ decode_heap_oop_not_null(d, s);
7332   %}
7333   ins_pipe(ialu_reg);
7334 %}
7335 
7336 // n.b. AArch64 implementations of encode_klass_not_null and
7337 // decode_klass_not_null do not modify the flags register so, unlike
7338 // Intel, we don't kill CR as a side effect here
7339 
7340 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
7341   match(Set dst (EncodePKlass src));
7342 
7343   ins_cost(INSN_COST * 3);
7344   format %{ "encode_klass_not_null $dst,$src" %}
7345 
7346   ins_encode %{
7347     Register src_reg = as_Register($src$$reg);
7348     Register dst_reg = as_Register($dst$$reg);
7349     __ encode_klass_not_null(dst_reg, src_reg);
7350   %}
7351 
7352    ins_pipe(ialu_reg);
7353 %}
7354 
7355 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
7356   match(Set dst (DecodeNKlass src));
7357 
7358   ins_cost(INSN_COST * 3);
7359   format %{ "decode_klass_not_null $dst,$src" %}
7360 
7361   ins_encode %{
7362     Register src_reg = as_Register($src$$reg);
7363     Register dst_reg = as_Register($dst$$reg);
7364     if (dst_reg != src_reg) {
7365       __ decode_klass_not_null(dst_reg, src_reg);
7366     } else {
7367       __ decode_klass_not_null(dst_reg);
7368     }
7369   %}
7370 
7371    ins_pipe(ialu_reg);
7372 %}
7373 
7374 instruct checkCastPP(iRegPNoSp dst)
7375 %{
7376   match(Set dst (CheckCastPP dst));
7377 
7378   size(0);
7379   format %{ "# checkcastPP of $dst" %}
7380   ins_encode(/* empty encoding */);
7381   ins_pipe(pipe_class_empty);
7382 %}
7383 
7384 instruct castPP(iRegPNoSp dst)
7385 %{
7386   match(Set dst (CastPP dst));
7387 
7388   size(0);
7389   format %{ "# castPP of $dst" %}
7390   ins_encode(/* empty encoding */);
7391   ins_pipe(pipe_class_empty);
7392 %}
7393 
7394 instruct castII(iRegI dst)
7395 %{
7396   match(Set dst (CastII dst));
7397 
7398   size(0);
7399   format %{ "# castII of $dst" %}
7400   ins_encode(/* empty encoding */);
7401   ins_cost(0);
7402   ins_pipe(pipe_class_empty);
7403 %}
7404 
7405 // ============================================================================
7406 // Atomic operation instructions
7407 //
7408 // Intel and SPARC both implement Ideal Node LoadPLocked and
7409 // Store{PIL}Conditional instructions using a normal load for the
7410 // LoadPLocked and a CAS for the Store{PIL}Conditional.
7411 //
7412 // The ideal code appears only to use LoadPLocked/StorePLocked as a
7413 // pair to lock object allocations from Eden space when not using
7414 // TLABs.
7415 //
7416 // There does not appear to be a Load{IL}Locked Ideal Node and the
7417 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
7418 // and to use StoreIConditional only for 32-bit and StoreLConditional
7419 // only for 64-bit.
7420 //
7421 // We implement LoadPLocked and StorePLocked instructions using,
7422 // respectively the AArch64 hw load-exclusive and store-conditional
7423 // instructions. Whereas we must implement each of
7424 // Store{IL}Conditional using a CAS which employs a pair of
7425 // instructions comprising a load-exclusive followed by a
7426 // store-conditional.
7427 
7428 
7429 // Locked-load (linked load) of the current heap-top
7430 // used when updating the eden heap top
7431 // implemented using ldaxr on AArch64
7432 
7433 instruct loadPLocked(iRegPNoSp dst, indirect mem)
7434 %{
7435   match(Set dst (LoadPLocked mem));
7436 
7437   ins_cost(VOLATILE_REF_COST);
7438 
7439   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
7440 
7441   ins_encode(aarch64_enc_ldaxr(dst, mem));
7442 
7443   ins_pipe(pipe_serial);
7444 %}
7445 
7446 // Conditional-store of the updated heap-top.
7447 // Used during allocation of the shared heap.
7448 // Sets flag (EQ) on success.
7449 // implemented using stlxr on AArch64.
7450 
7451 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
7452 %{
7453   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
7454 
7455   ins_cost(VOLATILE_REF_COST);
7456 
7457  // TODO
7458  // do we need to do a store-conditional release or can we just use a
7459  // plain store-conditional?
7460 
7461   format %{
7462     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
7463     "cmpw rscratch1, zr\t# EQ on successful write"
7464   %}
7465 
7466   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
7467 
7468   ins_pipe(pipe_serial);
7469 %}
7470 
7471 // this has to be implemented as a CAS
7472 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
7473 %{
7474   match(Set cr (StoreLConditional mem (Binary oldval newval)));
7475 
7476   ins_cost(VOLATILE_REF_COST);
7477 
7478   format %{
7479     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
7480     "cmpw rscratch1, zr\t# EQ on successful write"
7481   %}
7482 
7483   ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval));
7484 
7485   ins_pipe(pipe_slow);
7486 %}
7487 
7488 // this has to be implemented as a CAS
7489 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
7490 %{
7491   match(Set cr (StoreIConditional mem (Binary oldval newval)));
7492 
7493   ins_cost(VOLATILE_REF_COST);
7494 
7495   format %{
7496     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
7497     "cmpw rscratch1, zr\t# EQ on successful write"
7498   %}
7499 
7500   ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval));
7501 
7502   ins_pipe(pipe_slow);
7503 %}
7504 
7505 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
7506 // can't match them
7507 
7508 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
7509 
7510   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
7511 
7512   effect(KILL cr);
7513 
7514  format %{
7515     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
7516     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
7517  %}
7518 
7519  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
7520             aarch64_enc_cset_eq(res));
7521 
7522   ins_pipe(pipe_slow);
7523 %}
7524 
7525 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
7526 
7527   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
7528 
7529   effect(KILL cr);
7530 
7531  format %{
7532     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
7533     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
7534  %}
7535 
7536  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
7537             aarch64_enc_cset_eq(res));
7538 
7539   ins_pipe(pipe_slow);
7540 %}
7541 
7542 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
7543 
7544   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
7545 
7546   effect(KILL cr);
7547 
7548  format %{
7549     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
7550     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
7551  %}
7552 
7553  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
7554             aarch64_enc_cset_eq(res));
7555 
7556   ins_pipe(pipe_slow);
7557 %}
7558 
7559 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
7560 
7561   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
7562 
7563   effect(KILL cr);
7564 
7565  format %{
7566     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
7567     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
7568  %}
7569 
7570  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
7571             aarch64_enc_cset_eq(res));
7572 
7573   ins_pipe(pipe_slow);
7574 %}
7575 
7576 
7577 instruct get_and_setI(indirect mem, iRegINoSp newv, iRegI prev) %{
7578   match(Set prev (GetAndSetI mem newv));
7579   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
7580   ins_encode %{
7581     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
7582   %}
7583   ins_pipe(pipe_serial);
7584 %}
7585 
7586 instruct get_and_setL(indirect mem, iRegLNoSp newv, iRegL prev) %{
7587   match(Set prev (GetAndSetL mem newv));
7588   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
7589   ins_encode %{
7590     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
7591   %}
7592   ins_pipe(pipe_serial);
7593 %}
7594 
7595 instruct get_and_setN(indirect mem, iRegNNoSp newv, iRegI prev) %{
7596   match(Set prev (GetAndSetN mem newv));
7597   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
7598   ins_encode %{
7599     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
7600   %}
7601   ins_pipe(pipe_serial);
7602 %}
7603 
7604 instruct get_and_setP(indirect mem, iRegPNoSp newv, iRegP prev) %{
7605   match(Set prev (GetAndSetP mem newv));
7606   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
7607   ins_encode %{
7608     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
7609   %}
7610   ins_pipe(pipe_serial);
7611 %}
7612 
7613 
7614 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
7615   match(Set newval (GetAndAddL mem incr));
7616   ins_cost(INSN_COST * 10);
7617   format %{ "get_and_addL $newval, [$mem], $incr" %}
7618   ins_encode %{
7619     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
7620   %}
7621   ins_pipe(pipe_serial);
7622 %}
7623 
7624 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
7625   predicate(n->as_LoadStore()->result_not_used());
7626   match(Set dummy (GetAndAddL mem incr));
7627   ins_cost(INSN_COST * 9);
7628   format %{ "get_and_addL [$mem], $incr" %}
7629   ins_encode %{
7630     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
7631   %}
7632   ins_pipe(pipe_serial);
7633 %}
7634 
7635 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
7636   match(Set newval (GetAndAddL mem incr));
7637   ins_cost(INSN_COST * 10);
7638   format %{ "get_and_addL $newval, [$mem], $incr" %}
7639   ins_encode %{
7640     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
7641   %}
7642   ins_pipe(pipe_serial);
7643 %}
7644 
7645 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
7646   predicate(n->as_LoadStore()->result_not_used());
7647   match(Set dummy (GetAndAddL mem incr));
7648   ins_cost(INSN_COST * 9);
7649   format %{ "get_and_addL [$mem], $incr" %}
7650   ins_encode %{
7651     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
7652   %}
7653   ins_pipe(pipe_serial);
7654 %}
7655 
7656 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
7657   match(Set newval (GetAndAddI mem incr));
7658   ins_cost(INSN_COST * 10);
7659   format %{ "get_and_addI $newval, [$mem], $incr" %}
7660   ins_encode %{
7661     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
7662   %}
7663   ins_pipe(pipe_serial);
7664 %}
7665 
7666 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
7667   predicate(n->as_LoadStore()->result_not_used());
7668   match(Set dummy (GetAndAddI mem incr));
7669   ins_cost(INSN_COST * 9);
7670   format %{ "get_and_addI [$mem], $incr" %}
7671   ins_encode %{
7672     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
7673   %}
7674   ins_pipe(pipe_serial);
7675 %}
7676 
7677 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
7678   match(Set newval (GetAndAddI mem incr));
7679   ins_cost(INSN_COST * 10);
7680   format %{ "get_and_addI $newval, [$mem], $incr" %}
7681   ins_encode %{
7682     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
7683   %}
7684   ins_pipe(pipe_serial);
7685 %}
7686 
7687 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
7688   predicate(n->as_LoadStore()->result_not_used());
7689   match(Set dummy (GetAndAddI mem incr));
7690   ins_cost(INSN_COST * 9);
7691   format %{ "get_and_addI [$mem], $incr" %}
7692   ins_encode %{
7693     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
7694   %}
7695   ins_pipe(pipe_serial);
7696 %}
7697 
7698 // Manifest a CmpL result in an integer register.
7699 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
7700 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
7701 %{
7702   match(Set dst (CmpL3 src1 src2));
7703   effect(KILL flags);
7704 
7705   ins_cost(INSN_COST * 6);
7706   format %{
7707       "cmp $src1, $src2"
7708       "csetw $dst, ne"
7709       "cnegw $dst, lt"
7710   %}
7711   // format %{ "CmpL3 $dst, $src1, $src2" %}
7712   ins_encode %{
7713     __ cmp($src1$$Register, $src2$$Register);
7714     __ csetw($dst$$Register, Assembler::NE);
7715     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
7716   %}
7717 
7718   ins_pipe(pipe_class_default);
7719 %}
7720 
7721 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
7722 %{
7723   match(Set dst (CmpL3 src1 src2));
7724   effect(KILL flags);
7725 
7726   ins_cost(INSN_COST * 6);
7727   format %{
7728       "cmp $src1, $src2"
7729       "csetw $dst, ne"
7730       "cnegw $dst, lt"
7731   %}
7732   ins_encode %{
7733     int32_t con = (int32_t)$src2$$constant;
7734      if (con < 0) {
7735       __ adds(zr, $src1$$Register, -con);
7736     } else {
7737       __ subs(zr, $src1$$Register, con);
7738     }
7739     __ csetw($dst$$Register, Assembler::NE);
7740     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
7741   %}
7742 
7743   ins_pipe(pipe_class_default);
7744 %}
7745 
7746 // ============================================================================
7747 // Conditional Move Instructions
7748 
7749 // n.b. we have identical rules for both a signed compare op (cmpOp)
7750 // and an unsigned compare op (cmpOpU). it would be nice if we could
7751 // define an op class which merged both inputs and use it to type the
7752 // argument to a single rule. unfortunatelyt his fails because the
7753 // opclass does not live up to the COND_INTER interface of its
7754 // component operands. When the generic code tries to negate the
7755 // operand it ends up running the generci Machoper::negate method
7756 // which throws a ShouldNotHappen. So, we have to provide two flavours
7757 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
7758 
7759 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
7760   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
7761 
7762   ins_cost(INSN_COST * 2);
7763   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
7764 
7765   ins_encode %{
7766     __ cselw(as_Register($dst$$reg),
7767              as_Register($src2$$reg),
7768              as_Register($src1$$reg),
7769              (Assembler::Condition)$cmp$$cmpcode);
7770   %}
7771 
7772   ins_pipe(icond_reg_reg);
7773 %}
7774 
7775 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
7776   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
7777 
7778   ins_cost(INSN_COST * 2);
7779   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
7780 
7781   ins_encode %{
7782     __ cselw(as_Register($dst$$reg),
7783              as_Register($src2$$reg),
7784              as_Register($src1$$reg),
7785              (Assembler::Condition)$cmp$$cmpcode);
7786   %}
7787 
7788   ins_pipe(icond_reg_reg);
7789 %}
7790 
7791 // special cases where one arg is zero
7792 
7793 // n.b. this is selected in preference to the rule above because it
7794 // avoids loading constant 0 into a source register
7795 
7796 // TODO
7797 // we ought only to be able to cull one of these variants as the ideal
7798 // transforms ought always to order the zero consistently (to left/right?)
7799 
7800 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
7801   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
7802 
7803   ins_cost(INSN_COST * 2);
7804   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
7805 
7806   ins_encode %{
7807     __ cselw(as_Register($dst$$reg),
7808              as_Register($src$$reg),
7809              zr,
7810              (Assembler::Condition)$cmp$$cmpcode);
7811   %}
7812 
7813   ins_pipe(icond_reg);
7814 %}
7815 
7816 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
7817   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
7818 
7819   ins_cost(INSN_COST * 2);
7820   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
7821 
7822   ins_encode %{
7823     __ cselw(as_Register($dst$$reg),
7824              as_Register($src$$reg),
7825              zr,
7826              (Assembler::Condition)$cmp$$cmpcode);
7827   %}
7828 
7829   ins_pipe(icond_reg);
7830 %}
7831 
7832 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
7833   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
7834 
7835   ins_cost(INSN_COST * 2);
7836   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
7837 
7838   ins_encode %{
7839     __ cselw(as_Register($dst$$reg),
7840              zr,
7841              as_Register($src$$reg),
7842              (Assembler::Condition)$cmp$$cmpcode);
7843   %}
7844 
7845   ins_pipe(icond_reg);
7846 %}
7847 
7848 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
7849   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
7850 
7851   ins_cost(INSN_COST * 2);
7852   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
7853 
7854   ins_encode %{
7855     __ cselw(as_Register($dst$$reg),
7856              zr,
7857              as_Register($src$$reg),
7858              (Assembler::Condition)$cmp$$cmpcode);
7859   %}
7860 
7861   ins_pipe(icond_reg);
7862 %}
7863 
7864 // special case for creating a boolean 0 or 1
7865 
7866 // n.b. this is selected in preference to the rule above because it
7867 // avoids loading constants 0 and 1 into a source register
7868 
7869 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
7870   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
7871 
7872   ins_cost(INSN_COST * 2);
7873   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
7874 
7875   ins_encode %{
7876     // equivalently
7877     // cset(as_Register($dst$$reg),
7878     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
7879     __ csincw(as_Register($dst$$reg),
7880              zr,
7881              zr,
7882              (Assembler::Condition)$cmp$$cmpcode);
7883   %}
7884 
7885   ins_pipe(icond_none);
7886 %}
7887 
7888 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
7889   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
7890 
7891   ins_cost(INSN_COST * 2);
7892   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
7893 
7894   ins_encode %{
7895     // equivalently
7896     // cset(as_Register($dst$$reg),
7897     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
7898     __ csincw(as_Register($dst$$reg),
7899              zr,
7900              zr,
7901              (Assembler::Condition)$cmp$$cmpcode);
7902   %}
7903 
7904   ins_pipe(icond_none);
7905 %}
7906 
7907 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
7908   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
7909 
7910   ins_cost(INSN_COST * 2);
7911   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
7912 
7913   ins_encode %{
7914     __ csel(as_Register($dst$$reg),
7915             as_Register($src2$$reg),
7916             as_Register($src1$$reg),
7917             (Assembler::Condition)$cmp$$cmpcode);
7918   %}
7919 
7920   ins_pipe(icond_reg_reg);
7921 %}
7922 
7923 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
7924   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
7925 
7926   ins_cost(INSN_COST * 2);
7927   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
7928 
7929   ins_encode %{
7930     __ csel(as_Register($dst$$reg),
7931             as_Register($src2$$reg),
7932             as_Register($src1$$reg),
7933             (Assembler::Condition)$cmp$$cmpcode);
7934   %}
7935 
7936   ins_pipe(icond_reg_reg);
7937 %}
7938 
7939 // special cases where one arg is zero
7940 
7941 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
7942   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
7943 
7944   ins_cost(INSN_COST * 2);
7945   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
7946 
7947   ins_encode %{
7948     __ csel(as_Register($dst$$reg),
7949             zr,
7950             as_Register($src$$reg),
7951             (Assembler::Condition)$cmp$$cmpcode);
7952   %}
7953 
7954   ins_pipe(icond_reg);
7955 %}
7956 
7957 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
7958   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
7959 
7960   ins_cost(INSN_COST * 2);
7961   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
7962 
7963   ins_encode %{
7964     __ csel(as_Register($dst$$reg),
7965             zr,
7966             as_Register($src$$reg),
7967             (Assembler::Condition)$cmp$$cmpcode);
7968   %}
7969 
7970   ins_pipe(icond_reg);
7971 %}
7972 
7973 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
7974   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
7975 
7976   ins_cost(INSN_COST * 2);
7977   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
7978 
7979   ins_encode %{
7980     __ csel(as_Register($dst$$reg),
7981             as_Register($src$$reg),
7982             zr,
7983             (Assembler::Condition)$cmp$$cmpcode);
7984   %}
7985 
7986   ins_pipe(icond_reg);
7987 %}
7988 
7989 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
7990   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
7991 
7992   ins_cost(INSN_COST * 2);
7993   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
7994 
7995   ins_encode %{
7996     __ csel(as_Register($dst$$reg),
7997             as_Register($src$$reg),
7998             zr,
7999             (Assembler::Condition)$cmp$$cmpcode);
8000   %}
8001 
8002   ins_pipe(icond_reg);
8003 %}
8004 
8005 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
8006   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
8007 
8008   ins_cost(INSN_COST * 2);
8009   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
8010 
8011   ins_encode %{
8012     __ csel(as_Register($dst$$reg),
8013             as_Register($src2$$reg),
8014             as_Register($src1$$reg),
8015             (Assembler::Condition)$cmp$$cmpcode);
8016   %}
8017 
8018   ins_pipe(icond_reg_reg);
8019 %}
8020 
8021 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
8022   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
8023 
8024   ins_cost(INSN_COST * 2);
8025   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
8026 
8027   ins_encode %{
8028     __ csel(as_Register($dst$$reg),
8029             as_Register($src2$$reg),
8030             as_Register($src1$$reg),
8031             (Assembler::Condition)$cmp$$cmpcode);
8032   %}
8033 
8034   ins_pipe(icond_reg_reg);
8035 %}
8036 
8037 // special cases where one arg is zero
8038 
8039 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
8040   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
8041 
8042   ins_cost(INSN_COST * 2);
8043   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
8044 
8045   ins_encode %{
8046     __ csel(as_Register($dst$$reg),
8047             zr,
8048             as_Register($src$$reg),
8049             (Assembler::Condition)$cmp$$cmpcode);
8050   %}
8051 
8052   ins_pipe(icond_reg);
8053 %}
8054 
8055 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
8056   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
8057 
8058   ins_cost(INSN_COST * 2);
8059   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
8060 
8061   ins_encode %{
8062     __ csel(as_Register($dst$$reg),
8063             zr,
8064             as_Register($src$$reg),
8065             (Assembler::Condition)$cmp$$cmpcode);
8066   %}
8067 
8068   ins_pipe(icond_reg);
8069 %}
8070 
8071 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
8072   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
8073 
8074   ins_cost(INSN_COST * 2);
8075   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
8076 
8077   ins_encode %{
8078     __ csel(as_Register($dst$$reg),
8079             as_Register($src$$reg),
8080             zr,
8081             (Assembler::Condition)$cmp$$cmpcode);
8082   %}
8083 
8084   ins_pipe(icond_reg);
8085 %}
8086 
8087 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
8088   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
8089 
8090   ins_cost(INSN_COST * 2);
8091   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
8092 
8093   ins_encode %{
8094     __ csel(as_Register($dst$$reg),
8095             as_Register($src$$reg),
8096             zr,
8097             (Assembler::Condition)$cmp$$cmpcode);
8098   %}
8099 
8100   ins_pipe(icond_reg);
8101 %}
8102 
8103 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
8104   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
8105 
8106   ins_cost(INSN_COST * 2);
8107   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
8108 
8109   ins_encode %{
8110     __ cselw(as_Register($dst$$reg),
8111              as_Register($src2$$reg),
8112              as_Register($src1$$reg),
8113              (Assembler::Condition)$cmp$$cmpcode);
8114   %}
8115 
8116   ins_pipe(icond_reg_reg);
8117 %}
8118 
8119 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
8120   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
8121 
8122   ins_cost(INSN_COST * 2);
8123   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
8124 
8125   ins_encode %{
8126     __ cselw(as_Register($dst$$reg),
8127              as_Register($src2$$reg),
8128              as_Register($src1$$reg),
8129              (Assembler::Condition)$cmp$$cmpcode);
8130   %}
8131 
8132   ins_pipe(icond_reg_reg);
8133 %}
8134 
8135 // special cases where one arg is zero
8136 
8137 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
8138   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
8139 
8140   ins_cost(INSN_COST * 2);
8141   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
8142 
8143   ins_encode %{
8144     __ cselw(as_Register($dst$$reg),
8145              zr,
8146              as_Register($src$$reg),
8147              (Assembler::Condition)$cmp$$cmpcode);
8148   %}
8149 
8150   ins_pipe(icond_reg);
8151 %}
8152 
8153 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
8154   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
8155 
8156   ins_cost(INSN_COST * 2);
8157   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
8158 
8159   ins_encode %{
8160     __ cselw(as_Register($dst$$reg),
8161              zr,
8162              as_Register($src$$reg),
8163              (Assembler::Condition)$cmp$$cmpcode);
8164   %}
8165 
8166   ins_pipe(icond_reg);
8167 %}
8168 
8169 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
8170   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
8171 
8172   ins_cost(INSN_COST * 2);
8173   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
8174 
8175   ins_encode %{
8176     __ cselw(as_Register($dst$$reg),
8177              as_Register($src$$reg),
8178              zr,
8179              (Assembler::Condition)$cmp$$cmpcode);
8180   %}
8181 
8182   ins_pipe(icond_reg);
8183 %}
8184 
8185 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
8186   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
8187 
8188   ins_cost(INSN_COST * 2);
8189   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
8190 
8191   ins_encode %{
8192     __ cselw(as_Register($dst$$reg),
8193              as_Register($src$$reg),
8194              zr,
8195              (Assembler::Condition)$cmp$$cmpcode);
8196   %}
8197 
8198   ins_pipe(icond_reg);
8199 %}
8200 
8201 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
8202 %{
8203   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
8204 
8205   ins_cost(INSN_COST * 3);
8206 
8207   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
8208   ins_encode %{
8209     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
8210     __ fcsels(as_FloatRegister($dst$$reg),
8211               as_FloatRegister($src2$$reg),
8212               as_FloatRegister($src1$$reg),
8213               cond);
8214   %}
8215 
8216   ins_pipe(pipe_class_default);
8217 %}
8218 
8219 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
8220 %{
8221   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
8222 
8223   ins_cost(INSN_COST * 3);
8224 
8225   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
8226   ins_encode %{
8227     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
8228     __ fcsels(as_FloatRegister($dst$$reg),
8229               as_FloatRegister($src2$$reg),
8230               as_FloatRegister($src1$$reg),
8231               cond);
8232   %}
8233 
8234   ins_pipe(pipe_class_default);
8235 %}
8236 
8237 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
8238 %{
8239   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
8240 
8241   ins_cost(INSN_COST * 3);
8242 
8243   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
8244   ins_encode %{
8245     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
8246     __ fcseld(as_FloatRegister($dst$$reg),
8247               as_FloatRegister($src2$$reg),
8248               as_FloatRegister($src1$$reg),
8249               cond);
8250   %}
8251 
8252   ins_pipe(pipe_class_default);
8253 %}
8254 
8255 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
8256 %{
8257   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
8258 
8259   ins_cost(INSN_COST * 3);
8260 
8261   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
8262   ins_encode %{
8263     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
8264     __ fcseld(as_FloatRegister($dst$$reg),
8265               as_FloatRegister($src2$$reg),
8266               as_FloatRegister($src1$$reg),
8267               cond);
8268   %}
8269 
8270   ins_pipe(pipe_class_default);
8271 %}
8272 
8273 // ============================================================================
8274 // Arithmetic Instructions
8275 //
8276 
8277 // Integer Addition
8278 
8279 // TODO
8280 // these currently employ operations which do not set CR and hence are
8281 // not flagged as killing CR but we would like to isolate the cases
8282 // where we want to set flags from those where we don't. need to work
8283 // out how to do that.
8284 
8285 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8286   match(Set dst (AddI src1 src2));
8287 
8288   ins_cost(INSN_COST);
8289   format %{ "addw  $dst, $src1, $src2" %}
8290 
8291   ins_encode %{
8292     __ addw(as_Register($dst$$reg),
8293             as_Register($src1$$reg),
8294             as_Register($src2$$reg));
8295   %}
8296 
8297   ins_pipe(ialu_reg_reg);
8298 %}
8299 
8300 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
8301   match(Set dst (AddI src1 src2));
8302 
8303   ins_cost(INSN_COST);
8304   format %{ "addw $dst, $src1, $src2" %}
8305 
8306   // use opcode to indicate that this is an add not a sub
8307   opcode(0x0);
8308 
8309   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
8310 
8311   ins_pipe(ialu_reg_imm);
8312 %}
8313 
8314 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
8315   match(Set dst (AddI (ConvL2I src1) src2));
8316 
8317   ins_cost(INSN_COST);
8318   format %{ "addw $dst, $src1, $src2" %}
8319 
8320   // use opcode to indicate that this is an add not a sub
8321   opcode(0x0);
8322 
8323   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
8324 
8325   ins_pipe(ialu_reg_imm);
8326 %}
8327 
8328 // Pointer Addition
8329 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
8330   match(Set dst (AddP src1 src2));
8331 
8332   ins_cost(INSN_COST);
8333   format %{ "add $dst, $src1, $src2\t# ptr" %}
8334 
8335   ins_encode %{
8336     __ add(as_Register($dst$$reg),
8337            as_Register($src1$$reg),
8338            as_Register($src2$$reg));
8339   %}
8340 
8341   ins_pipe(ialu_reg_reg);
8342 %}
8343 
8344 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
8345   match(Set dst (AddP src1 (ConvI2L src2)));
8346 
8347   ins_cost(INSN_COST);
8348   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
8349 
8350   ins_encode %{
8351     __ add(as_Register($dst$$reg),
8352            as_Register($src1$$reg),
8353            as_Register($src2$$reg), ext::sxtw);
8354   %}
8355 
8356   ins_pipe(ialu_reg_reg);
8357 %}
8358 
8359 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
8360   match(Set dst (AddP src1 (LShiftL src2 scale)));
8361 
8362   ins_cost(1.9 * INSN_COST);
8363   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
8364 
8365   ins_encode %{
8366     __ lea(as_Register($dst$$reg),
8367            Address(as_Register($src1$$reg), as_Register($src2$$reg),
8368                    Address::lsl($scale$$constant)));
8369   %}
8370 
8371   ins_pipe(ialu_reg_reg_shift);
8372 %}
8373 
8374 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
8375   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
8376 
8377   ins_cost(1.9 * INSN_COST);
8378   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
8379 
8380   ins_encode %{
8381     __ lea(as_Register($dst$$reg),
8382            Address(as_Register($src1$$reg), as_Register($src2$$reg),
8383                    Address::sxtw($scale$$constant)));
8384   %}
8385 
8386   ins_pipe(ialu_reg_reg_shift);
8387 %}
8388 
8389 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
8390   match(Set dst (LShiftL (ConvI2L src) scale));
8391 
8392   ins_cost(INSN_COST);
8393   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
8394 
8395   ins_encode %{
8396     __ sbfiz(as_Register($dst$$reg),
8397           as_Register($src$$reg),
8398           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
8399   %}
8400 
8401   ins_pipe(ialu_reg_shift);
8402 %}
8403 
8404 // Pointer Immediate Addition
8405 // n.b. this needs to be more expensive than using an indirect memory
8406 // operand
8407 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
8408   match(Set dst (AddP src1 src2));
8409 
8410   ins_cost(INSN_COST);
8411   format %{ "add $dst, $src1, $src2\t# ptr" %}
8412 
8413   // use opcode to indicate that this is an add not a sub
8414   opcode(0x0);
8415 
8416   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
8417 
8418   ins_pipe(ialu_reg_imm);
8419 %}
8420 
8421 // Long Addition
8422 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
8423 
8424   match(Set dst (AddL src1 src2));
8425 
8426   ins_cost(INSN_COST);
8427   format %{ "add  $dst, $src1, $src2" %}
8428 
8429   ins_encode %{
8430     __ add(as_Register($dst$$reg),
8431            as_Register($src1$$reg),
8432            as_Register($src2$$reg));
8433   %}
8434 
8435   ins_pipe(ialu_reg_reg);
8436 %}
8437 
8438 // No constant pool entries requiredLong Immediate Addition.
8439 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
8440   match(Set dst (AddL src1 src2));
8441 
8442   ins_cost(INSN_COST);
8443   format %{ "add $dst, $src1, $src2" %}
8444 
8445   // use opcode to indicate that this is an add not a sub
8446   opcode(0x0);
8447 
8448   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
8449 
8450   ins_pipe(ialu_reg_imm);
8451 %}
8452 
8453 // Integer Subtraction
8454 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8455   match(Set dst (SubI src1 src2));
8456 
8457   ins_cost(INSN_COST);
8458   format %{ "subw  $dst, $src1, $src2" %}
8459 
8460   ins_encode %{
8461     __ subw(as_Register($dst$$reg),
8462             as_Register($src1$$reg),
8463             as_Register($src2$$reg));
8464   %}
8465 
8466   ins_pipe(ialu_reg_reg);
8467 %}
8468 
8469 // Immediate Subtraction
8470 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
8471   match(Set dst (SubI src1 src2));
8472 
8473   ins_cost(INSN_COST);
8474   format %{ "subw $dst, $src1, $src2" %}
8475 
8476   // use opcode to indicate that this is a sub not an add
8477   opcode(0x1);
8478 
8479   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
8480 
8481   ins_pipe(ialu_reg_imm);
8482 %}
8483 
8484 // Long Subtraction
8485 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
8486 
8487   match(Set dst (SubL src1 src2));
8488 
8489   ins_cost(INSN_COST);
8490   format %{ "sub  $dst, $src1, $src2" %}
8491 
8492   ins_encode %{
8493     __ sub(as_Register($dst$$reg),
8494            as_Register($src1$$reg),
8495            as_Register($src2$$reg));
8496   %}
8497 
8498   ins_pipe(ialu_reg_reg);
8499 %}
8500 
8501 // No constant pool entries requiredLong Immediate Subtraction.
8502 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
8503   match(Set dst (SubL src1 src2));
8504 
8505   ins_cost(INSN_COST);
8506   format %{ "sub$dst, $src1, $src2" %}
8507 
8508   // use opcode to indicate that this is a sub not an add
8509   opcode(0x1);
8510 
8511   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
8512 
8513   ins_pipe(ialu_reg_imm);
8514 %}
8515 
8516 // Integer Negation (special case for sub)
8517 
8518 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
8519   match(Set dst (SubI zero src));
8520 
8521   ins_cost(INSN_COST);
8522   format %{ "negw $dst, $src\t# int" %}
8523 
8524   ins_encode %{
8525     __ negw(as_Register($dst$$reg),
8526             as_Register($src$$reg));
8527   %}
8528 
8529   ins_pipe(ialu_reg);
8530 %}
8531 
8532 // Long Negation
8533 
8534 instruct negL_reg(iRegLNoSp dst, iRegIorL2I src, immL0 zero, rFlagsReg cr) %{
8535   match(Set dst (SubL zero src));
8536 
8537   ins_cost(INSN_COST);
8538   format %{ "neg $dst, $src\t# long" %}
8539 
8540   ins_encode %{
8541     __ neg(as_Register($dst$$reg),
8542            as_Register($src$$reg));
8543   %}
8544 
8545   ins_pipe(ialu_reg);
8546 %}
8547 
8548 // Integer Multiply
8549 
8550 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8551   match(Set dst (MulI src1 src2));
8552 
8553   ins_cost(INSN_COST * 3);
8554   format %{ "mulw  $dst, $src1, $src2" %}
8555 
8556   ins_encode %{
8557     __ mulw(as_Register($dst$$reg),
8558             as_Register($src1$$reg),
8559             as_Register($src2$$reg));
8560   %}
8561 
8562   ins_pipe(imul_reg_reg);
8563 %}
8564 
8565 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8566   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
8567 
8568   ins_cost(INSN_COST * 3);
8569   format %{ "smull  $dst, $src1, $src2" %}
8570 
8571   ins_encode %{
8572     __ smull(as_Register($dst$$reg),
8573              as_Register($src1$$reg),
8574              as_Register($src2$$reg));
8575   %}
8576 
8577   ins_pipe(imul_reg_reg);
8578 %}
8579 
8580 // Long Multiply
8581 
8582 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
8583   match(Set dst (MulL src1 src2));
8584 
8585   ins_cost(INSN_COST * 5);
8586   format %{ "mul  $dst, $src1, $src2" %}
8587 
8588   ins_encode %{
8589     __ mul(as_Register($dst$$reg),
8590            as_Register($src1$$reg),
8591            as_Register($src2$$reg));
8592   %}
8593 
8594   ins_pipe(lmul_reg_reg);
8595 %}
8596 
8597 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
8598 %{
8599   match(Set dst (MulHiL src1 src2));
8600 
8601   ins_cost(INSN_COST * 7);
8602   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
8603 
8604   ins_encode %{
8605     __ smulh(as_Register($dst$$reg),
8606              as_Register($src1$$reg),
8607              as_Register($src2$$reg));
8608   %}
8609 
8610   ins_pipe(lmul_reg_reg);
8611 %}
8612 
8613 // Combined Integer Multiply & Add/Sub
8614 
8615 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
8616   match(Set dst (AddI src3 (MulI src1 src2)));
8617 
8618   ins_cost(INSN_COST * 3);
8619   format %{ "madd  $dst, $src1, $src2, $src3" %}
8620 
8621   ins_encode %{
8622     __ maddw(as_Register($dst$$reg),
8623              as_Register($src1$$reg),
8624              as_Register($src2$$reg),
8625              as_Register($src3$$reg));
8626   %}
8627 
8628   ins_pipe(imac_reg_reg);
8629 %}
8630 
8631 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
8632   match(Set dst (SubI src3 (MulI src1 src2)));
8633 
8634   ins_cost(INSN_COST * 3);
8635   format %{ "msub  $dst, $src1, $src2, $src3" %}
8636 
8637   ins_encode %{
8638     __ msubw(as_Register($dst$$reg),
8639              as_Register($src1$$reg),
8640              as_Register($src2$$reg),
8641              as_Register($src3$$reg));
8642   %}
8643 
8644   ins_pipe(imac_reg_reg);
8645 %}
8646 
8647 // Combined Long Multiply & Add/Sub
8648 
8649 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
8650   match(Set dst (AddL src3 (MulL src1 src2)));
8651 
8652   ins_cost(INSN_COST * 5);
8653   format %{ "madd  $dst, $src1, $src2, $src3" %}
8654 
8655   ins_encode %{
8656     __ madd(as_Register($dst$$reg),
8657             as_Register($src1$$reg),
8658             as_Register($src2$$reg),
8659             as_Register($src3$$reg));
8660   %}
8661 
8662   ins_pipe(lmac_reg_reg);
8663 %}
8664 
8665 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
8666   match(Set dst (SubL src3 (MulL src1 src2)));
8667 
8668   ins_cost(INSN_COST * 5);
8669   format %{ "msub  $dst, $src1, $src2, $src3" %}
8670 
8671   ins_encode %{
8672     __ msub(as_Register($dst$$reg),
8673             as_Register($src1$$reg),
8674             as_Register($src2$$reg),
8675             as_Register($src3$$reg));
8676   %}
8677 
8678   ins_pipe(lmac_reg_reg);
8679 %}
8680 
8681 // Integer Divide
8682 
8683 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8684   match(Set dst (DivI src1 src2));
8685 
8686   ins_cost(INSN_COST * 19);
8687   format %{ "sdivw  $dst, $src1, $src2" %}
8688 
8689   ins_encode(aarch64_enc_divw(dst, src1, src2));
8690   ins_pipe(idiv_reg_reg);
8691 %}
8692 
8693 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
8694   match(Set dst (URShiftI (RShiftI src1 div1) div2));
8695   ins_cost(INSN_COST);
8696   format %{ "lsrw $dst, $src1, $div1" %}
8697   ins_encode %{
8698     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
8699   %}
8700   ins_pipe(ialu_reg_shift);
8701 %}
8702 
8703 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
8704   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
8705   ins_cost(INSN_COST);
8706   format %{ "addw $dst, $src, LSR $div1" %}
8707 
8708   ins_encode %{
8709     __ addw(as_Register($dst$$reg),
8710               as_Register($src$$reg),
8711               as_Register($src$$reg),
8712               Assembler::LSR, 31);
8713   %}
8714   ins_pipe(ialu_reg);
8715 %}
8716 
8717 // Long Divide
8718 
8719 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
8720   match(Set dst (DivL src1 src2));
8721 
8722   ins_cost(INSN_COST * 35);
8723   format %{ "sdiv   $dst, $src1, $src2" %}
8724 
8725   ins_encode(aarch64_enc_div(dst, src1, src2));
8726   ins_pipe(ldiv_reg_reg);
8727 %}
8728 
8729 instruct signExtractL(iRegLNoSp dst, iRegL src1, immL_63 div1, immL_63 div2) %{
8730   match(Set dst (URShiftL (RShiftL src1 div1) div2));
8731   ins_cost(INSN_COST);
8732   format %{ "lsr $dst, $src1, $div1" %}
8733   ins_encode %{
8734     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
8735   %}
8736   ins_pipe(ialu_reg_shift);
8737 %}
8738 
8739 instruct div2RoundL(iRegLNoSp dst, iRegL src, immL_63 div1, immL_63 div2) %{
8740   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
8741   ins_cost(INSN_COST);
8742   format %{ "add $dst, $src, $div1" %}
8743 
8744   ins_encode %{
8745     __ add(as_Register($dst$$reg),
8746               as_Register($src$$reg),
8747               as_Register($src$$reg),
8748               Assembler::LSR, 63);
8749   %}
8750   ins_pipe(ialu_reg);
8751 %}
8752 
8753 // Integer Remainder
8754 
8755 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8756   match(Set dst (ModI src1 src2));
8757 
8758   ins_cost(INSN_COST * 22);
8759   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
8760             "msubw($dst, rscratch1, $src2, $src1" %}
8761 
8762   ins_encode(aarch64_enc_modw(dst, src1, src2));
8763   ins_pipe(idiv_reg_reg);
8764 %}
8765 
8766 // Long Remainder
8767 
8768 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
8769   match(Set dst (ModL src1 src2));
8770 
8771   ins_cost(INSN_COST * 38);
8772   format %{ "sdiv   rscratch1, $src1, $src2\n"
8773             "msub($dst, rscratch1, $src2, $src1" %}
8774 
8775   ins_encode(aarch64_enc_mod(dst, src1, src2));
8776   ins_pipe(ldiv_reg_reg);
8777 %}
8778 
8779 // Integer Shifts
8780 
8781 // Shift Left Register
8782 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8783   match(Set dst (LShiftI src1 src2));
8784 
8785   ins_cost(INSN_COST * 2);
8786   format %{ "lslvw  $dst, $src1, $src2" %}
8787 
8788   ins_encode %{
8789     __ lslvw(as_Register($dst$$reg),
8790              as_Register($src1$$reg),
8791              as_Register($src2$$reg));
8792   %}
8793 
8794   ins_pipe(ialu_reg_reg_vshift);
8795 %}
8796 
8797 // Shift Left Immediate
8798 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
8799   match(Set dst (LShiftI src1 src2));
8800 
8801   ins_cost(INSN_COST);
8802   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
8803 
8804   ins_encode %{
8805     __ lslw(as_Register($dst$$reg),
8806             as_Register($src1$$reg),
8807             $src2$$constant & 0x1f);
8808   %}
8809 
8810   ins_pipe(ialu_reg_shift);
8811 %}
8812 
8813 // Shift Right Logical Register
8814 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8815   match(Set dst (URShiftI src1 src2));
8816 
8817   ins_cost(INSN_COST * 2);
8818   format %{ "lsrvw  $dst, $src1, $src2" %}
8819 
8820   ins_encode %{
8821     __ lsrvw(as_Register($dst$$reg),
8822              as_Register($src1$$reg),
8823              as_Register($src2$$reg));
8824   %}
8825 
8826   ins_pipe(ialu_reg_reg_vshift);
8827 %}
8828 
8829 // Shift Right Logical Immediate
8830 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
8831   match(Set dst (URShiftI src1 src2));
8832 
8833   ins_cost(INSN_COST);
8834   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
8835 
8836   ins_encode %{
8837     __ lsrw(as_Register($dst$$reg),
8838             as_Register($src1$$reg),
8839             $src2$$constant & 0x1f);
8840   %}
8841 
8842   ins_pipe(ialu_reg_shift);
8843 %}
8844 
8845 // Shift Right Arithmetic Register
8846 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8847   match(Set dst (RShiftI src1 src2));
8848 
8849   ins_cost(INSN_COST * 2);
8850   format %{ "asrvw  $dst, $src1, $src2" %}
8851 
8852   ins_encode %{
8853     __ asrvw(as_Register($dst$$reg),
8854              as_Register($src1$$reg),
8855              as_Register($src2$$reg));
8856   %}
8857 
8858   ins_pipe(ialu_reg_reg_vshift);
8859 %}
8860 
8861 // Shift Right Arithmetic Immediate
8862 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
8863   match(Set dst (RShiftI src1 src2));
8864 
8865   ins_cost(INSN_COST);
8866   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
8867 
8868   ins_encode %{
8869     __ asrw(as_Register($dst$$reg),
8870             as_Register($src1$$reg),
8871             $src2$$constant & 0x1f);
8872   %}
8873 
8874   ins_pipe(ialu_reg_shift);
8875 %}
8876 
8877 // Combined Int Mask and Right Shift (using UBFM)
8878 // TODO
8879 
8880 // Long Shifts
8881 
8882 // Shift Left Register
8883 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
8884   match(Set dst (LShiftL src1 src2));
8885 
8886   ins_cost(INSN_COST * 2);
8887   format %{ "lslv  $dst, $src1, $src2" %}
8888 
8889   ins_encode %{
8890     __ lslv(as_Register($dst$$reg),
8891             as_Register($src1$$reg),
8892             as_Register($src2$$reg));
8893   %}
8894 
8895   ins_pipe(ialu_reg_reg_vshift);
8896 %}
8897 
8898 // Shift Left Immediate
8899 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
8900   match(Set dst (LShiftL src1 src2));
8901 
8902   ins_cost(INSN_COST);
8903   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
8904 
8905   ins_encode %{
8906     __ lsl(as_Register($dst$$reg),
8907             as_Register($src1$$reg),
8908             $src2$$constant & 0x3f);
8909   %}
8910 
8911   ins_pipe(ialu_reg_shift);
8912 %}
8913 
8914 // Shift Right Logical Register
8915 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
8916   match(Set dst (URShiftL src1 src2));
8917 
8918   ins_cost(INSN_COST * 2);
8919   format %{ "lsrv  $dst, $src1, $src2" %}
8920 
8921   ins_encode %{
8922     __ lsrv(as_Register($dst$$reg),
8923             as_Register($src1$$reg),
8924             as_Register($src2$$reg));
8925   %}
8926 
8927   ins_pipe(ialu_reg_reg_vshift);
8928 %}
8929 
8930 // Shift Right Logical Immediate
8931 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
8932   match(Set dst (URShiftL src1 src2));
8933 
8934   ins_cost(INSN_COST);
8935   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
8936 
8937   ins_encode %{
8938     __ lsr(as_Register($dst$$reg),
8939            as_Register($src1$$reg),
8940            $src2$$constant & 0x3f);
8941   %}
8942 
8943   ins_pipe(ialu_reg_shift);
8944 %}
8945 
8946 // A special-case pattern for card table stores.
8947 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
8948   match(Set dst (URShiftL (CastP2X src1) src2));
8949 
8950   ins_cost(INSN_COST);
8951   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
8952 
8953   ins_encode %{
8954     __ lsr(as_Register($dst$$reg),
8955            as_Register($src1$$reg),
8956            $src2$$constant & 0x3f);
8957   %}
8958 
8959   ins_pipe(ialu_reg_shift);
8960 %}
8961 
8962 // Shift Right Arithmetic Register
8963 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
8964   match(Set dst (RShiftL src1 src2));
8965 
8966   ins_cost(INSN_COST * 2);
8967   format %{ "asrv  $dst, $src1, $src2" %}
8968 
8969   ins_encode %{
8970     __ asrv(as_Register($dst$$reg),
8971             as_Register($src1$$reg),
8972             as_Register($src2$$reg));
8973   %}
8974 
8975   ins_pipe(ialu_reg_reg_vshift);
8976 %}
8977 
8978 // Shift Right Arithmetic Immediate
8979 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
8980   match(Set dst (RShiftL src1 src2));
8981 
8982   ins_cost(INSN_COST);
8983   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
8984 
8985   ins_encode %{
8986     __ asr(as_Register($dst$$reg),
8987            as_Register($src1$$reg),
8988            $src2$$constant & 0x3f);
8989   %}
8990 
8991   ins_pipe(ialu_reg_shift);
8992 %}
8993 
8994 // BEGIN This section of the file is automatically generated. Do not edit --------------
8995 
8996 instruct regL_not_reg(iRegLNoSp dst,
8997                          iRegL src1, immL_M1 m1,
8998                          rFlagsReg cr) %{
8999   match(Set dst (XorL src1 m1));
9000   ins_cost(INSN_COST);
9001   format %{ "eon  $dst, $src1, zr" %}
9002 
9003   ins_encode %{
9004     __ eon(as_Register($dst$$reg),
9005               as_Register($src1$$reg),
9006               zr,
9007               Assembler::LSL, 0);
9008   %}
9009 
9010   ins_pipe(ialu_reg);
9011 %}
9012 instruct regI_not_reg(iRegINoSp dst,
9013                          iRegIorL2I src1, immI_M1 m1,
9014                          rFlagsReg cr) %{
9015   match(Set dst (XorI src1 m1));
9016   ins_cost(INSN_COST);
9017   format %{ "eonw  $dst, $src1, zr" %}
9018 
9019   ins_encode %{
9020     __ eonw(as_Register($dst$$reg),
9021               as_Register($src1$$reg),
9022               zr,
9023               Assembler::LSL, 0);
9024   %}
9025 
9026   ins_pipe(ialu_reg);
9027 %}
9028 
9029 instruct AndI_reg_not_reg(iRegINoSp dst,
9030                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
9031                          rFlagsReg cr) %{
9032   match(Set dst (AndI src1 (XorI src2 m1)));
9033   ins_cost(INSN_COST);
9034   format %{ "bicw  $dst, $src1, $src2" %}
9035 
9036   ins_encode %{
9037     __ bicw(as_Register($dst$$reg),
9038               as_Register($src1$$reg),
9039               as_Register($src2$$reg),
9040               Assembler::LSL, 0);
9041   %}
9042 
9043   ins_pipe(ialu_reg_reg);
9044 %}
9045 
9046 instruct AndL_reg_not_reg(iRegLNoSp dst,
9047                          iRegL src1, iRegL src2, immL_M1 m1,
9048                          rFlagsReg cr) %{
9049   match(Set dst (AndL src1 (XorL src2 m1)));
9050   ins_cost(INSN_COST);
9051   format %{ "bic  $dst, $src1, $src2" %}
9052 
9053   ins_encode %{
9054     __ bic(as_Register($dst$$reg),
9055               as_Register($src1$$reg),
9056               as_Register($src2$$reg),
9057               Assembler::LSL, 0);
9058   %}
9059 
9060   ins_pipe(ialu_reg_reg);
9061 %}
9062 
9063 instruct OrI_reg_not_reg(iRegINoSp dst,
9064                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
9065                          rFlagsReg cr) %{
9066   match(Set dst (OrI src1 (XorI src2 m1)));
9067   ins_cost(INSN_COST);
9068   format %{ "ornw  $dst, $src1, $src2" %}
9069 
9070   ins_encode %{
9071     __ ornw(as_Register($dst$$reg),
9072               as_Register($src1$$reg),
9073               as_Register($src2$$reg),
9074               Assembler::LSL, 0);
9075   %}
9076 
9077   ins_pipe(ialu_reg_reg);
9078 %}
9079 
9080 instruct OrL_reg_not_reg(iRegLNoSp dst,
9081                          iRegL src1, iRegL src2, immL_M1 m1,
9082                          rFlagsReg cr) %{
9083   match(Set dst (OrL src1 (XorL src2 m1)));
9084   ins_cost(INSN_COST);
9085   format %{ "orn  $dst, $src1, $src2" %}
9086 
9087   ins_encode %{
9088     __ orn(as_Register($dst$$reg),
9089               as_Register($src1$$reg),
9090               as_Register($src2$$reg),
9091               Assembler::LSL, 0);
9092   %}
9093 
9094   ins_pipe(ialu_reg_reg);
9095 %}
9096 
9097 instruct XorI_reg_not_reg(iRegINoSp dst,
9098                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
9099                          rFlagsReg cr) %{
9100   match(Set dst (XorI m1 (XorI src2 src1)));
9101   ins_cost(INSN_COST);
9102   format %{ "eonw  $dst, $src1, $src2" %}
9103 
9104   ins_encode %{
9105     __ eonw(as_Register($dst$$reg),
9106               as_Register($src1$$reg),
9107               as_Register($src2$$reg),
9108               Assembler::LSL, 0);
9109   %}
9110 
9111   ins_pipe(ialu_reg_reg);
9112 %}
9113 
9114 instruct XorL_reg_not_reg(iRegLNoSp dst,
9115                          iRegL src1, iRegL src2, immL_M1 m1,
9116                          rFlagsReg cr) %{
9117   match(Set dst (XorL m1 (XorL src2 src1)));
9118   ins_cost(INSN_COST);
9119   format %{ "eon  $dst, $src1, $src2" %}
9120 
9121   ins_encode %{
9122     __ eon(as_Register($dst$$reg),
9123               as_Register($src1$$reg),
9124               as_Register($src2$$reg),
9125               Assembler::LSL, 0);
9126   %}
9127 
9128   ins_pipe(ialu_reg_reg);
9129 %}
9130 
9131 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
9132                          iRegIorL2I src1, iRegIorL2I src2,
9133                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9134   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
9135   ins_cost(1.9 * INSN_COST);
9136   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
9137 
9138   ins_encode %{
9139     __ bicw(as_Register($dst$$reg),
9140               as_Register($src1$$reg),
9141               as_Register($src2$$reg),
9142               Assembler::LSR,
9143               $src3$$constant & 0x3f);
9144   %}
9145 
9146   ins_pipe(ialu_reg_reg_shift);
9147 %}
9148 
9149 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
9150                          iRegL src1, iRegL src2,
9151                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9152   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
9153   ins_cost(1.9 * INSN_COST);
9154   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
9155 
9156   ins_encode %{
9157     __ bic(as_Register($dst$$reg),
9158               as_Register($src1$$reg),
9159               as_Register($src2$$reg),
9160               Assembler::LSR,
9161               $src3$$constant & 0x3f);
9162   %}
9163 
9164   ins_pipe(ialu_reg_reg_shift);
9165 %}
9166 
9167 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
9168                          iRegIorL2I src1, iRegIorL2I src2,
9169                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9170   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
9171   ins_cost(1.9 * INSN_COST);
9172   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
9173 
9174   ins_encode %{
9175     __ bicw(as_Register($dst$$reg),
9176               as_Register($src1$$reg),
9177               as_Register($src2$$reg),
9178               Assembler::ASR,
9179               $src3$$constant & 0x3f);
9180   %}
9181 
9182   ins_pipe(ialu_reg_reg_shift);
9183 %}
9184 
9185 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
9186                          iRegL src1, iRegL src2,
9187                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9188   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
9189   ins_cost(1.9 * INSN_COST);
9190   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
9191 
9192   ins_encode %{
9193     __ bic(as_Register($dst$$reg),
9194               as_Register($src1$$reg),
9195               as_Register($src2$$reg),
9196               Assembler::ASR,
9197               $src3$$constant & 0x3f);
9198   %}
9199 
9200   ins_pipe(ialu_reg_reg_shift);
9201 %}
9202 
9203 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
9204                          iRegIorL2I src1, iRegIorL2I src2,
9205                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9206   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
9207   ins_cost(1.9 * INSN_COST);
9208   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
9209 
9210   ins_encode %{
9211     __ bicw(as_Register($dst$$reg),
9212               as_Register($src1$$reg),
9213               as_Register($src2$$reg),
9214               Assembler::LSL,
9215               $src3$$constant & 0x3f);
9216   %}
9217 
9218   ins_pipe(ialu_reg_reg_shift);
9219 %}
9220 
9221 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
9222                          iRegL src1, iRegL src2,
9223                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9224   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
9225   ins_cost(1.9 * INSN_COST);
9226   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
9227 
9228   ins_encode %{
9229     __ bic(as_Register($dst$$reg),
9230               as_Register($src1$$reg),
9231               as_Register($src2$$reg),
9232               Assembler::LSL,
9233               $src3$$constant & 0x3f);
9234   %}
9235 
9236   ins_pipe(ialu_reg_reg_shift);
9237 %}
9238 
9239 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
9240                          iRegIorL2I src1, iRegIorL2I src2,
9241                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9242   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
9243   ins_cost(1.9 * INSN_COST);
9244   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
9245 
9246   ins_encode %{
9247     __ eonw(as_Register($dst$$reg),
9248               as_Register($src1$$reg),
9249               as_Register($src2$$reg),
9250               Assembler::LSR,
9251               $src3$$constant & 0x3f);
9252   %}
9253 
9254   ins_pipe(ialu_reg_reg_shift);
9255 %}
9256 
9257 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
9258                          iRegL src1, iRegL src2,
9259                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9260   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
9261   ins_cost(1.9 * INSN_COST);
9262   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
9263 
9264   ins_encode %{
9265     __ eon(as_Register($dst$$reg),
9266               as_Register($src1$$reg),
9267               as_Register($src2$$reg),
9268               Assembler::LSR,
9269               $src3$$constant & 0x3f);
9270   %}
9271 
9272   ins_pipe(ialu_reg_reg_shift);
9273 %}
9274 
9275 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
9276                          iRegIorL2I src1, iRegIorL2I src2,
9277                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9278   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
9279   ins_cost(1.9 * INSN_COST);
9280   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
9281 
9282   ins_encode %{
9283     __ eonw(as_Register($dst$$reg),
9284               as_Register($src1$$reg),
9285               as_Register($src2$$reg),
9286               Assembler::ASR,
9287               $src3$$constant & 0x3f);
9288   %}
9289 
9290   ins_pipe(ialu_reg_reg_shift);
9291 %}
9292 
9293 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
9294                          iRegL src1, iRegL src2,
9295                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9296   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
9297   ins_cost(1.9 * INSN_COST);
9298   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
9299 
9300   ins_encode %{
9301     __ eon(as_Register($dst$$reg),
9302               as_Register($src1$$reg),
9303               as_Register($src2$$reg),
9304               Assembler::ASR,
9305               $src3$$constant & 0x3f);
9306   %}
9307 
9308   ins_pipe(ialu_reg_reg_shift);
9309 %}
9310 
9311 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
9312                          iRegIorL2I src1, iRegIorL2I src2,
9313                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9314   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
9315   ins_cost(1.9 * INSN_COST);
9316   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
9317 
9318   ins_encode %{
9319     __ eonw(as_Register($dst$$reg),
9320               as_Register($src1$$reg),
9321               as_Register($src2$$reg),
9322               Assembler::LSL,
9323               $src3$$constant & 0x3f);
9324   %}
9325 
9326   ins_pipe(ialu_reg_reg_shift);
9327 %}
9328 
9329 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
9330                          iRegL src1, iRegL src2,
9331                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9332   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
9333   ins_cost(1.9 * INSN_COST);
9334   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
9335 
9336   ins_encode %{
9337     __ eon(as_Register($dst$$reg),
9338               as_Register($src1$$reg),
9339               as_Register($src2$$reg),
9340               Assembler::LSL,
9341               $src3$$constant & 0x3f);
9342   %}
9343 
9344   ins_pipe(ialu_reg_reg_shift);
9345 %}
9346 
9347 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
9348                          iRegIorL2I src1, iRegIorL2I src2,
9349                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9350   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
9351   ins_cost(1.9 * INSN_COST);
9352   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
9353 
9354   ins_encode %{
9355     __ ornw(as_Register($dst$$reg),
9356               as_Register($src1$$reg),
9357               as_Register($src2$$reg),
9358               Assembler::LSR,
9359               $src3$$constant & 0x3f);
9360   %}
9361 
9362   ins_pipe(ialu_reg_reg_shift);
9363 %}
9364 
9365 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
9366                          iRegL src1, iRegL src2,
9367                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9368   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
9369   ins_cost(1.9 * INSN_COST);
9370   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
9371 
9372   ins_encode %{
9373     __ orn(as_Register($dst$$reg),
9374               as_Register($src1$$reg),
9375               as_Register($src2$$reg),
9376               Assembler::LSR,
9377               $src3$$constant & 0x3f);
9378   %}
9379 
9380   ins_pipe(ialu_reg_reg_shift);
9381 %}
9382 
9383 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
9384                          iRegIorL2I src1, iRegIorL2I src2,
9385                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9386   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
9387   ins_cost(1.9 * INSN_COST);
9388   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
9389 
9390   ins_encode %{
9391     __ ornw(as_Register($dst$$reg),
9392               as_Register($src1$$reg),
9393               as_Register($src2$$reg),
9394               Assembler::ASR,
9395               $src3$$constant & 0x3f);
9396   %}
9397 
9398   ins_pipe(ialu_reg_reg_shift);
9399 %}
9400 
9401 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
9402                          iRegL src1, iRegL src2,
9403                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9404   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
9405   ins_cost(1.9 * INSN_COST);
9406   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
9407 
9408   ins_encode %{
9409     __ orn(as_Register($dst$$reg),
9410               as_Register($src1$$reg),
9411               as_Register($src2$$reg),
9412               Assembler::ASR,
9413               $src3$$constant & 0x3f);
9414   %}
9415 
9416   ins_pipe(ialu_reg_reg_shift);
9417 %}
9418 
9419 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
9420                          iRegIorL2I src1, iRegIorL2I src2,
9421                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9422   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
9423   ins_cost(1.9 * INSN_COST);
9424   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
9425 
9426   ins_encode %{
9427     __ ornw(as_Register($dst$$reg),
9428               as_Register($src1$$reg),
9429               as_Register($src2$$reg),
9430               Assembler::LSL,
9431               $src3$$constant & 0x3f);
9432   %}
9433 
9434   ins_pipe(ialu_reg_reg_shift);
9435 %}
9436 
9437 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
9438                          iRegL src1, iRegL src2,
9439                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9440   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
9441   ins_cost(1.9 * INSN_COST);
9442   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
9443 
9444   ins_encode %{
9445     __ orn(as_Register($dst$$reg),
9446               as_Register($src1$$reg),
9447               as_Register($src2$$reg),
9448               Assembler::LSL,
9449               $src3$$constant & 0x3f);
9450   %}
9451 
9452   ins_pipe(ialu_reg_reg_shift);
9453 %}
9454 
9455 instruct AndI_reg_URShift_reg(iRegINoSp dst,
9456                          iRegIorL2I src1, iRegIorL2I src2,
9457                          immI src3, rFlagsReg cr) %{
9458   match(Set dst (AndI src1 (URShiftI src2 src3)));
9459 
9460   ins_cost(1.9 * INSN_COST);
9461   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
9462 
9463   ins_encode %{
9464     __ andw(as_Register($dst$$reg),
9465               as_Register($src1$$reg),
9466               as_Register($src2$$reg),
9467               Assembler::LSR,
9468               $src3$$constant & 0x3f);
9469   %}
9470 
9471   ins_pipe(ialu_reg_reg_shift);
9472 %}
9473 
9474 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
9475                          iRegL src1, iRegL src2,
9476                          immI src3, rFlagsReg cr) %{
9477   match(Set dst (AndL src1 (URShiftL src2 src3)));
9478 
9479   ins_cost(1.9 * INSN_COST);
9480   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
9481 
9482   ins_encode %{
9483     __ andr(as_Register($dst$$reg),
9484               as_Register($src1$$reg),
9485               as_Register($src2$$reg),
9486               Assembler::LSR,
9487               $src3$$constant & 0x3f);
9488   %}
9489 
9490   ins_pipe(ialu_reg_reg_shift);
9491 %}
9492 
9493 instruct AndI_reg_RShift_reg(iRegINoSp dst,
9494                          iRegIorL2I src1, iRegIorL2I src2,
9495                          immI src3, rFlagsReg cr) %{
9496   match(Set dst (AndI src1 (RShiftI src2 src3)));
9497 
9498   ins_cost(1.9 * INSN_COST);
9499   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
9500 
9501   ins_encode %{
9502     __ andw(as_Register($dst$$reg),
9503               as_Register($src1$$reg),
9504               as_Register($src2$$reg),
9505               Assembler::ASR,
9506               $src3$$constant & 0x3f);
9507   %}
9508 
9509   ins_pipe(ialu_reg_reg_shift);
9510 %}
9511 
9512 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
9513                          iRegL src1, iRegL src2,
9514                          immI src3, rFlagsReg cr) %{
9515   match(Set dst (AndL src1 (RShiftL src2 src3)));
9516 
9517   ins_cost(1.9 * INSN_COST);
9518   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
9519 
9520   ins_encode %{
9521     __ andr(as_Register($dst$$reg),
9522               as_Register($src1$$reg),
9523               as_Register($src2$$reg),
9524               Assembler::ASR,
9525               $src3$$constant & 0x3f);
9526   %}
9527 
9528   ins_pipe(ialu_reg_reg_shift);
9529 %}
9530 
9531 instruct AndI_reg_LShift_reg(iRegINoSp dst,
9532                          iRegIorL2I src1, iRegIorL2I src2,
9533                          immI src3, rFlagsReg cr) %{
9534   match(Set dst (AndI src1 (LShiftI src2 src3)));
9535 
9536   ins_cost(1.9 * INSN_COST);
9537   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
9538 
9539   ins_encode %{
9540     __ andw(as_Register($dst$$reg),
9541               as_Register($src1$$reg),
9542               as_Register($src2$$reg),
9543               Assembler::LSL,
9544               $src3$$constant & 0x3f);
9545   %}
9546 
9547   ins_pipe(ialu_reg_reg_shift);
9548 %}
9549 
9550 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
9551                          iRegL src1, iRegL src2,
9552                          immI src3, rFlagsReg cr) %{
9553   match(Set dst (AndL src1 (LShiftL src2 src3)));
9554 
9555   ins_cost(1.9 * INSN_COST);
9556   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
9557 
9558   ins_encode %{
9559     __ andr(as_Register($dst$$reg),
9560               as_Register($src1$$reg),
9561               as_Register($src2$$reg),
9562               Assembler::LSL,
9563               $src3$$constant & 0x3f);
9564   %}
9565 
9566   ins_pipe(ialu_reg_reg_shift);
9567 %}
9568 
9569 instruct XorI_reg_URShift_reg(iRegINoSp dst,
9570                          iRegIorL2I src1, iRegIorL2I src2,
9571                          immI src3, rFlagsReg cr) %{
9572   match(Set dst (XorI src1 (URShiftI src2 src3)));
9573 
9574   ins_cost(1.9 * INSN_COST);
9575   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
9576 
9577   ins_encode %{
9578     __ eorw(as_Register($dst$$reg),
9579               as_Register($src1$$reg),
9580               as_Register($src2$$reg),
9581               Assembler::LSR,
9582               $src3$$constant & 0x3f);
9583   %}
9584 
9585   ins_pipe(ialu_reg_reg_shift);
9586 %}
9587 
9588 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
9589                          iRegL src1, iRegL src2,
9590                          immI src3, rFlagsReg cr) %{
9591   match(Set dst (XorL src1 (URShiftL src2 src3)));
9592 
9593   ins_cost(1.9 * INSN_COST);
9594   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
9595 
9596   ins_encode %{
9597     __ eor(as_Register($dst$$reg),
9598               as_Register($src1$$reg),
9599               as_Register($src2$$reg),
9600               Assembler::LSR,
9601               $src3$$constant & 0x3f);
9602   %}
9603 
9604   ins_pipe(ialu_reg_reg_shift);
9605 %}
9606 
9607 instruct XorI_reg_RShift_reg(iRegINoSp dst,
9608                          iRegIorL2I src1, iRegIorL2I src2,
9609                          immI src3, rFlagsReg cr) %{
9610   match(Set dst (XorI src1 (RShiftI src2 src3)));
9611 
9612   ins_cost(1.9 * INSN_COST);
9613   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
9614 
9615   ins_encode %{
9616     __ eorw(as_Register($dst$$reg),
9617               as_Register($src1$$reg),
9618               as_Register($src2$$reg),
9619               Assembler::ASR,
9620               $src3$$constant & 0x3f);
9621   %}
9622 
9623   ins_pipe(ialu_reg_reg_shift);
9624 %}
9625 
9626 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
9627                          iRegL src1, iRegL src2,
9628                          immI src3, rFlagsReg cr) %{
9629   match(Set dst (XorL src1 (RShiftL src2 src3)));
9630 
9631   ins_cost(1.9 * INSN_COST);
9632   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
9633 
9634   ins_encode %{
9635     __ eor(as_Register($dst$$reg),
9636               as_Register($src1$$reg),
9637               as_Register($src2$$reg),
9638               Assembler::ASR,
9639               $src3$$constant & 0x3f);
9640   %}
9641 
9642   ins_pipe(ialu_reg_reg_shift);
9643 %}
9644 
9645 instruct XorI_reg_LShift_reg(iRegINoSp dst,
9646                          iRegIorL2I src1, iRegIorL2I src2,
9647                          immI src3, rFlagsReg cr) %{
9648   match(Set dst (XorI src1 (LShiftI src2 src3)));
9649 
9650   ins_cost(1.9 * INSN_COST);
9651   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
9652 
9653   ins_encode %{
9654     __ eorw(as_Register($dst$$reg),
9655               as_Register($src1$$reg),
9656               as_Register($src2$$reg),
9657               Assembler::LSL,
9658               $src3$$constant & 0x3f);
9659   %}
9660 
9661   ins_pipe(ialu_reg_reg_shift);
9662 %}
9663 
9664 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
9665                          iRegL src1, iRegL src2,
9666                          immI src3, rFlagsReg cr) %{
9667   match(Set dst (XorL src1 (LShiftL src2 src3)));
9668 
9669   ins_cost(1.9 * INSN_COST);
9670   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
9671 
9672   ins_encode %{
9673     __ eor(as_Register($dst$$reg),
9674               as_Register($src1$$reg),
9675               as_Register($src2$$reg),
9676               Assembler::LSL,
9677               $src3$$constant & 0x3f);
9678   %}
9679 
9680   ins_pipe(ialu_reg_reg_shift);
9681 %}
9682 
9683 instruct OrI_reg_URShift_reg(iRegINoSp dst,
9684                          iRegIorL2I src1, iRegIorL2I src2,
9685                          immI src3, rFlagsReg cr) %{
9686   match(Set dst (OrI src1 (URShiftI src2 src3)));
9687 
9688   ins_cost(1.9 * INSN_COST);
9689   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
9690 
9691   ins_encode %{
9692     __ orrw(as_Register($dst$$reg),
9693               as_Register($src1$$reg),
9694               as_Register($src2$$reg),
9695               Assembler::LSR,
9696               $src3$$constant & 0x3f);
9697   %}
9698 
9699   ins_pipe(ialu_reg_reg_shift);
9700 %}
9701 
9702 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
9703                          iRegL src1, iRegL src2,
9704                          immI src3, rFlagsReg cr) %{
9705   match(Set dst (OrL src1 (URShiftL src2 src3)));
9706 
9707   ins_cost(1.9 * INSN_COST);
9708   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
9709 
9710   ins_encode %{
9711     __ orr(as_Register($dst$$reg),
9712               as_Register($src1$$reg),
9713               as_Register($src2$$reg),
9714               Assembler::LSR,
9715               $src3$$constant & 0x3f);
9716   %}
9717 
9718   ins_pipe(ialu_reg_reg_shift);
9719 %}
9720 
9721 instruct OrI_reg_RShift_reg(iRegINoSp dst,
9722                          iRegIorL2I src1, iRegIorL2I src2,
9723                          immI src3, rFlagsReg cr) %{
9724   match(Set dst (OrI src1 (RShiftI src2 src3)));
9725 
9726   ins_cost(1.9 * INSN_COST);
9727   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
9728 
9729   ins_encode %{
9730     __ orrw(as_Register($dst$$reg),
9731               as_Register($src1$$reg),
9732               as_Register($src2$$reg),
9733               Assembler::ASR,
9734               $src3$$constant & 0x3f);
9735   %}
9736 
9737   ins_pipe(ialu_reg_reg_shift);
9738 %}
9739 
9740 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
9741                          iRegL src1, iRegL src2,
9742                          immI src3, rFlagsReg cr) %{
9743   match(Set dst (OrL src1 (RShiftL src2 src3)));
9744 
9745   ins_cost(1.9 * INSN_COST);
9746   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
9747 
9748   ins_encode %{
9749     __ orr(as_Register($dst$$reg),
9750               as_Register($src1$$reg),
9751               as_Register($src2$$reg),
9752               Assembler::ASR,
9753               $src3$$constant & 0x3f);
9754   %}
9755 
9756   ins_pipe(ialu_reg_reg_shift);
9757 %}
9758 
9759 instruct OrI_reg_LShift_reg(iRegINoSp dst,
9760                          iRegIorL2I src1, iRegIorL2I src2,
9761                          immI src3, rFlagsReg cr) %{
9762   match(Set dst (OrI src1 (LShiftI src2 src3)));
9763 
9764   ins_cost(1.9 * INSN_COST);
9765   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
9766 
9767   ins_encode %{
9768     __ orrw(as_Register($dst$$reg),
9769               as_Register($src1$$reg),
9770               as_Register($src2$$reg),
9771               Assembler::LSL,
9772               $src3$$constant & 0x3f);
9773   %}
9774 
9775   ins_pipe(ialu_reg_reg_shift);
9776 %}
9777 
9778 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
9779                          iRegL src1, iRegL src2,
9780                          immI src3, rFlagsReg cr) %{
9781   match(Set dst (OrL src1 (LShiftL src2 src3)));
9782 
9783   ins_cost(1.9 * INSN_COST);
9784   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
9785 
9786   ins_encode %{
9787     __ orr(as_Register($dst$$reg),
9788               as_Register($src1$$reg),
9789               as_Register($src2$$reg),
9790               Assembler::LSL,
9791               $src3$$constant & 0x3f);
9792   %}
9793 
9794   ins_pipe(ialu_reg_reg_shift);
9795 %}
9796 
9797 instruct AddI_reg_URShift_reg(iRegINoSp dst,
9798                          iRegIorL2I src1, iRegIorL2I src2,
9799                          immI src3, rFlagsReg cr) %{
9800   match(Set dst (AddI src1 (URShiftI src2 src3)));
9801 
9802   ins_cost(1.9 * INSN_COST);
9803   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
9804 
9805   ins_encode %{
9806     __ addw(as_Register($dst$$reg),
9807               as_Register($src1$$reg),
9808               as_Register($src2$$reg),
9809               Assembler::LSR,
9810               $src3$$constant & 0x3f);
9811   %}
9812 
9813   ins_pipe(ialu_reg_reg_shift);
9814 %}
9815 
9816 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
9817                          iRegL src1, iRegL src2,
9818                          immI src3, rFlagsReg cr) %{
9819   match(Set dst (AddL src1 (URShiftL src2 src3)));
9820 
9821   ins_cost(1.9 * INSN_COST);
9822   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
9823 
9824   ins_encode %{
9825     __ add(as_Register($dst$$reg),
9826               as_Register($src1$$reg),
9827               as_Register($src2$$reg),
9828               Assembler::LSR,
9829               $src3$$constant & 0x3f);
9830   %}
9831 
9832   ins_pipe(ialu_reg_reg_shift);
9833 %}
9834 
9835 instruct AddI_reg_RShift_reg(iRegINoSp dst,
9836                          iRegIorL2I src1, iRegIorL2I src2,
9837                          immI src3, rFlagsReg cr) %{
9838   match(Set dst (AddI src1 (RShiftI src2 src3)));
9839 
9840   ins_cost(1.9 * INSN_COST);
9841   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
9842 
9843   ins_encode %{
9844     __ addw(as_Register($dst$$reg),
9845               as_Register($src1$$reg),
9846               as_Register($src2$$reg),
9847               Assembler::ASR,
9848               $src3$$constant & 0x3f);
9849   %}
9850 
9851   ins_pipe(ialu_reg_reg_shift);
9852 %}
9853 
9854 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
9855                          iRegL src1, iRegL src2,
9856                          immI src3, rFlagsReg cr) %{
9857   match(Set dst (AddL src1 (RShiftL src2 src3)));
9858 
9859   ins_cost(1.9 * INSN_COST);
9860   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
9861 
9862   ins_encode %{
9863     __ add(as_Register($dst$$reg),
9864               as_Register($src1$$reg),
9865               as_Register($src2$$reg),
9866               Assembler::ASR,
9867               $src3$$constant & 0x3f);
9868   %}
9869 
9870   ins_pipe(ialu_reg_reg_shift);
9871 %}
9872 
9873 instruct AddI_reg_LShift_reg(iRegINoSp dst,
9874                          iRegIorL2I src1, iRegIorL2I src2,
9875                          immI src3, rFlagsReg cr) %{
9876   match(Set dst (AddI src1 (LShiftI src2 src3)));
9877 
9878   ins_cost(1.9 * INSN_COST);
9879   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
9880 
9881   ins_encode %{
9882     __ addw(as_Register($dst$$reg),
9883               as_Register($src1$$reg),
9884               as_Register($src2$$reg),
9885               Assembler::LSL,
9886               $src3$$constant & 0x3f);
9887   %}
9888 
9889   ins_pipe(ialu_reg_reg_shift);
9890 %}
9891 
9892 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
9893                          iRegL src1, iRegL src2,
9894                          immI src3, rFlagsReg cr) %{
9895   match(Set dst (AddL src1 (LShiftL src2 src3)));
9896 
9897   ins_cost(1.9 * INSN_COST);
9898   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
9899 
9900   ins_encode %{
9901     __ add(as_Register($dst$$reg),
9902               as_Register($src1$$reg),
9903               as_Register($src2$$reg),
9904               Assembler::LSL,
9905               $src3$$constant & 0x3f);
9906   %}
9907 
9908   ins_pipe(ialu_reg_reg_shift);
9909 %}
9910 
9911 instruct SubI_reg_URShift_reg(iRegINoSp dst,
9912                          iRegIorL2I src1, iRegIorL2I src2,
9913                          immI src3, rFlagsReg cr) %{
9914   match(Set dst (SubI src1 (URShiftI src2 src3)));
9915 
9916   ins_cost(1.9 * INSN_COST);
9917   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
9918 
9919   ins_encode %{
9920     __ subw(as_Register($dst$$reg),
9921               as_Register($src1$$reg),
9922               as_Register($src2$$reg),
9923               Assembler::LSR,
9924               $src3$$constant & 0x3f);
9925   %}
9926 
9927   ins_pipe(ialu_reg_reg_shift);
9928 %}
9929 
9930 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
9931                          iRegL src1, iRegL src2,
9932                          immI src3, rFlagsReg cr) %{
9933   match(Set dst (SubL src1 (URShiftL src2 src3)));
9934 
9935   ins_cost(1.9 * INSN_COST);
9936   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
9937 
9938   ins_encode %{
9939     __ sub(as_Register($dst$$reg),
9940               as_Register($src1$$reg),
9941               as_Register($src2$$reg),
9942               Assembler::LSR,
9943               $src3$$constant & 0x3f);
9944   %}
9945 
9946   ins_pipe(ialu_reg_reg_shift);
9947 %}
9948 
9949 instruct SubI_reg_RShift_reg(iRegINoSp dst,
9950                          iRegIorL2I src1, iRegIorL2I src2,
9951                          immI src3, rFlagsReg cr) %{
9952   match(Set dst (SubI src1 (RShiftI src2 src3)));
9953 
9954   ins_cost(1.9 * INSN_COST);
9955   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
9956 
9957   ins_encode %{
9958     __ subw(as_Register($dst$$reg),
9959               as_Register($src1$$reg),
9960               as_Register($src2$$reg),
9961               Assembler::ASR,
9962               $src3$$constant & 0x3f);
9963   %}
9964 
9965   ins_pipe(ialu_reg_reg_shift);
9966 %}
9967 
9968 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
9969                          iRegL src1, iRegL src2,
9970                          immI src3, rFlagsReg cr) %{
9971   match(Set dst (SubL src1 (RShiftL src2 src3)));
9972 
9973   ins_cost(1.9 * INSN_COST);
9974   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
9975 
9976   ins_encode %{
9977     __ sub(as_Register($dst$$reg),
9978               as_Register($src1$$reg),
9979               as_Register($src2$$reg),
9980               Assembler::ASR,
9981               $src3$$constant & 0x3f);
9982   %}
9983 
9984   ins_pipe(ialu_reg_reg_shift);
9985 %}
9986 
9987 instruct SubI_reg_LShift_reg(iRegINoSp dst,
9988                          iRegIorL2I src1, iRegIorL2I src2,
9989                          immI src3, rFlagsReg cr) %{
9990   match(Set dst (SubI src1 (LShiftI src2 src3)));
9991 
9992   ins_cost(1.9 * INSN_COST);
9993   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
9994 
9995   ins_encode %{
9996     __ subw(as_Register($dst$$reg),
9997               as_Register($src1$$reg),
9998               as_Register($src2$$reg),
9999               Assembler::LSL,
10000               $src3$$constant & 0x3f);
10001   %}
10002 
10003   ins_pipe(ialu_reg_reg_shift);
10004 %}
10005 
10006 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
10007                          iRegL src1, iRegL src2,
10008                          immI src3, rFlagsReg cr) %{
10009   match(Set dst (SubL src1 (LShiftL src2 src3)));
10010 
10011   ins_cost(1.9 * INSN_COST);
10012   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
10013 
10014   ins_encode %{
10015     __ sub(as_Register($dst$$reg),
10016               as_Register($src1$$reg),
10017               as_Register($src2$$reg),
10018               Assembler::LSL,
10019               $src3$$constant & 0x3f);
10020   %}
10021 
10022   ins_pipe(ialu_reg_reg_shift);
10023 %}
10024 
10025 
10026 
10027 // Shift Left followed by Shift Right.
10028 // This idiom is used by the compiler for the i2b bytecode etc.
10029 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
10030 %{
10031   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
10032   // Make sure we are not going to exceed what sbfm can do.
10033   predicate((unsigned int)n->in(2)->get_int() <= 63
10034             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
10035 
10036   ins_cost(INSN_COST * 2);
10037   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
10038   ins_encode %{
10039     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
10040     int s = 63 - lshift;
10041     int r = (rshift - lshift) & 63;
10042     __ sbfm(as_Register($dst$$reg),
10043             as_Register($src$$reg),
10044             r, s);
10045   %}
10046 
10047   ins_pipe(ialu_reg_shift);
10048 %}
10049 
10050 // Shift Left followed by Shift Right.
10051 // This idiom is used by the compiler for the i2b bytecode etc.
10052 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
10053 %{
10054   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
10055   // Make sure we are not going to exceed what sbfmw can do.
10056   predicate((unsigned int)n->in(2)->get_int() <= 31
10057             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
10058 
10059   ins_cost(INSN_COST * 2);
10060   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
10061   ins_encode %{
10062     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
10063     int s = 31 - lshift;
10064     int r = (rshift - lshift) & 31;
10065     __ sbfmw(as_Register($dst$$reg),
10066             as_Register($src$$reg),
10067             r, s);
10068   %}
10069 
10070   ins_pipe(ialu_reg_shift);
10071 %}
10072 
10073 // Shift Left followed by Shift Right.
10074 // This idiom is used by the compiler for the i2b bytecode etc.
10075 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
10076 %{
10077   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
10078   // Make sure we are not going to exceed what ubfm can do.
10079   predicate((unsigned int)n->in(2)->get_int() <= 63
10080             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
10081 
10082   ins_cost(INSN_COST * 2);
10083   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
10084   ins_encode %{
10085     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
10086     int s = 63 - lshift;
10087     int r = (rshift - lshift) & 63;
10088     __ ubfm(as_Register($dst$$reg),
10089             as_Register($src$$reg),
10090             r, s);
10091   %}
10092 
10093   ins_pipe(ialu_reg_shift);
10094 %}
10095 
10096 // Shift Left followed by Shift Right.
10097 // This idiom is used by the compiler for the i2b bytecode etc.
10098 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
10099 %{
10100   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
10101   // Make sure we are not going to exceed what ubfmw can do.
10102   predicate((unsigned int)n->in(2)->get_int() <= 31
10103             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
10104 
10105   ins_cost(INSN_COST * 2);
10106   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
10107   ins_encode %{
10108     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
10109     int s = 31 - lshift;
10110     int r = (rshift - lshift) & 31;
10111     __ ubfmw(as_Register($dst$$reg),
10112             as_Register($src$$reg),
10113             r, s);
10114   %}
10115 
10116   ins_pipe(ialu_reg_shift);
10117 %}
10118 // Bitfield extract with shift & mask
10119 
10120 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
10121 %{
10122   match(Set dst (AndI (URShiftI src rshift) mask));
10123 
10124   ins_cost(INSN_COST);
10125   format %{ "ubfxw $dst, $src, $mask" %}
10126   ins_encode %{
10127     int rshift = $rshift$$constant;
10128     long mask = $mask$$constant;
10129     int width = exact_log2(mask+1);
10130     __ ubfxw(as_Register($dst$$reg),
10131             as_Register($src$$reg), rshift, width);
10132   %}
10133   ins_pipe(ialu_reg_shift);
10134 %}
10135 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
10136 %{
10137   match(Set dst (AndL (URShiftL src rshift) mask));
10138 
10139   ins_cost(INSN_COST);
10140   format %{ "ubfx $dst, $src, $mask" %}
10141   ins_encode %{
10142     int rshift = $rshift$$constant;
10143     long mask = $mask$$constant;
10144     int width = exact_log2(mask+1);
10145     __ ubfx(as_Register($dst$$reg),
10146             as_Register($src$$reg), rshift, width);
10147   %}
10148   ins_pipe(ialu_reg_shift);
10149 %}
10150 
10151 // We can use ubfx when extending an And with a mask when we know mask
10152 // is positive.  We know that because immI_bitmask guarantees it.
10153 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
10154 %{
10155   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
10156 
10157   ins_cost(INSN_COST * 2);
10158   format %{ "ubfx $dst, $src, $mask" %}
10159   ins_encode %{
10160     int rshift = $rshift$$constant;
10161     long mask = $mask$$constant;
10162     int width = exact_log2(mask+1);
10163     __ ubfx(as_Register($dst$$reg),
10164             as_Register($src$$reg), rshift, width);
10165   %}
10166   ins_pipe(ialu_reg_shift);
10167 %}
10168 
10169 // Rotations
10170 
10171 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
10172 %{
10173   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
10174   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
10175 
10176   ins_cost(INSN_COST);
10177   format %{ "extr $dst, $src1, $src2, #$rshift" %}
10178 
10179   ins_encode %{
10180     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
10181             $rshift$$constant & 63);
10182   %}
10183   ins_pipe(ialu_reg_reg_extr);
10184 %}
10185 
10186 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
10187 %{
10188   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
10189   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
10190 
10191   ins_cost(INSN_COST);
10192   format %{ "extr $dst, $src1, $src2, #$rshift" %}
10193 
10194   ins_encode %{
10195     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
10196             $rshift$$constant & 31);
10197   %}
10198   ins_pipe(ialu_reg_reg_extr);
10199 %}
10200 
10201 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
10202 %{
10203   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
10204   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
10205 
10206   ins_cost(INSN_COST);
10207   format %{ "extr $dst, $src1, $src2, #$rshift" %}
10208 
10209   ins_encode %{
10210     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
10211             $rshift$$constant & 63);
10212   %}
10213   ins_pipe(ialu_reg_reg_extr);
10214 %}
10215 
10216 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
10217 %{
10218   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
10219   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
10220 
10221   ins_cost(INSN_COST);
10222   format %{ "extr $dst, $src1, $src2, #$rshift" %}
10223 
10224   ins_encode %{
10225     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
10226             $rshift$$constant & 31);
10227   %}
10228   ins_pipe(ialu_reg_reg_extr);
10229 %}
10230 
10231 
10232 // rol expander
10233 
10234 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
10235 %{
10236   effect(DEF dst, USE src, USE shift);
10237 
10238   format %{ "rol    $dst, $src, $shift" %}
10239   ins_cost(INSN_COST * 3);
10240   ins_encode %{
10241     __ subw(rscratch1, zr, as_Register($shift$$reg));
10242     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
10243             rscratch1);
10244     %}
10245   ins_pipe(ialu_reg_reg_vshift);
10246 %}
10247 
10248 // rol expander
10249 
10250 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
10251 %{
10252   effect(DEF dst, USE src, USE shift);
10253 
10254   format %{ "rol    $dst, $src, $shift" %}
10255   ins_cost(INSN_COST * 3);
10256   ins_encode %{
10257     __ subw(rscratch1, zr, as_Register($shift$$reg));
10258     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
10259             rscratch1);
10260     %}
10261   ins_pipe(ialu_reg_reg_vshift);
10262 %}
10263 
10264 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
10265 %{
10266   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
10267 
10268   expand %{
10269     rolL_rReg(dst, src, shift, cr);
10270   %}
10271 %}
10272 
10273 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
10274 %{
10275   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
10276 
10277   expand %{
10278     rolL_rReg(dst, src, shift, cr);
10279   %}
10280 %}
10281 
10282 instruct rolI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
10283 %{
10284   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
10285 
10286   expand %{
10287     rolL_rReg(dst, src, shift, cr);
10288   %}
10289 %}
10290 
10291 instruct rolI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
10292 %{
10293   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
10294 
10295   expand %{
10296     rolL_rReg(dst, src, shift, cr);
10297   %}
10298 %}
10299 
10300 // ror expander
10301 
10302 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
10303 %{
10304   effect(DEF dst, USE src, USE shift);
10305 
10306   format %{ "ror    $dst, $src, $shift" %}
10307   ins_cost(INSN_COST);
10308   ins_encode %{
10309     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
10310             as_Register($shift$$reg));
10311     %}
10312   ins_pipe(ialu_reg_reg_vshift);
10313 %}
10314 
10315 // ror expander
10316 
10317 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
10318 %{
10319   effect(DEF dst, USE src, USE shift);
10320 
10321   format %{ "ror    $dst, $src, $shift" %}
10322   ins_cost(INSN_COST);
10323   ins_encode %{
10324     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
10325             as_Register($shift$$reg));
10326     %}
10327   ins_pipe(ialu_reg_reg_vshift);
10328 %}
10329 
10330 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
10331 %{
10332   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
10333 
10334   expand %{
10335     rorL_rReg(dst, src, shift, cr);
10336   %}
10337 %}
10338 
10339 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
10340 %{
10341   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
10342 
10343   expand %{
10344     rorL_rReg(dst, src, shift, cr);
10345   %}
10346 %}
10347 
10348 instruct rorI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
10349 %{
10350   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
10351 
10352   expand %{
10353     rorL_rReg(dst, src, shift, cr);
10354   %}
10355 %}
10356 
10357 instruct rorI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
10358 %{
10359   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
10360 
10361   expand %{
10362     rorL_rReg(dst, src, shift, cr);
10363   %}
10364 %}
10365 
10366 // Add/subtract (extended)
10367 
10368 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
10369 %{
10370   match(Set dst (AddL src1 (ConvI2L src2)));
10371   ins_cost(INSN_COST);
10372   format %{ "add  $dst, $src1, sxtw $src2" %}
10373 
10374    ins_encode %{
10375      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10376             as_Register($src2$$reg), ext::sxtw);
10377    %}
10378   ins_pipe(ialu_reg_reg);
10379 %};
10380 
10381 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
10382 %{
10383   match(Set dst (SubL src1 (ConvI2L src2)));
10384   ins_cost(INSN_COST);
10385   format %{ "sub  $dst, $src1, sxtw $src2" %}
10386 
10387    ins_encode %{
10388      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
10389             as_Register($src2$$reg), ext::sxtw);
10390    %}
10391   ins_pipe(ialu_reg_reg);
10392 %};
10393 
10394 
10395 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
10396 %{
10397   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
10398   ins_cost(INSN_COST);
10399   format %{ "add  $dst, $src1, sxth $src2" %}
10400 
10401    ins_encode %{
10402      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10403             as_Register($src2$$reg), ext::sxth);
10404    %}
10405   ins_pipe(ialu_reg_reg);
10406 %}
10407 
10408 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
10409 %{
10410   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
10411   ins_cost(INSN_COST);
10412   format %{ "add  $dst, $src1, sxtb $src2" %}
10413 
10414    ins_encode %{
10415      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10416             as_Register($src2$$reg), ext::sxtb);
10417    %}
10418   ins_pipe(ialu_reg_reg);
10419 %}
10420 
10421 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
10422 %{
10423   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
10424   ins_cost(INSN_COST);
10425   format %{ "add  $dst, $src1, uxtb $src2" %}
10426 
10427    ins_encode %{
10428      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10429             as_Register($src2$$reg), ext::uxtb);
10430    %}
10431   ins_pipe(ialu_reg_reg);
10432 %}
10433 
10434 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
10435 %{
10436   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
10437   ins_cost(INSN_COST);
10438   format %{ "add  $dst, $src1, sxth $src2" %}
10439 
10440    ins_encode %{
10441      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10442             as_Register($src2$$reg), ext::sxth);
10443    %}
10444   ins_pipe(ialu_reg_reg);
10445 %}
10446 
10447 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
10448 %{
10449   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
10450   ins_cost(INSN_COST);
10451   format %{ "add  $dst, $src1, sxtw $src2" %}
10452 
10453    ins_encode %{
10454      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10455             as_Register($src2$$reg), ext::sxtw);
10456    %}
10457   ins_pipe(ialu_reg_reg);
10458 %}
10459 
10460 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
10461 %{
10462   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
10463   ins_cost(INSN_COST);
10464   format %{ "add  $dst, $src1, sxtb $src2" %}
10465 
10466    ins_encode %{
10467      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10468             as_Register($src2$$reg), ext::sxtb);
10469    %}
10470   ins_pipe(ialu_reg_reg);
10471 %}
10472 
10473 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
10474 %{
10475   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
10476   ins_cost(INSN_COST);
10477   format %{ "add  $dst, $src1, uxtb $src2" %}
10478 
10479    ins_encode %{
10480      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10481             as_Register($src2$$reg), ext::uxtb);
10482    %}
10483   ins_pipe(ialu_reg_reg);
10484 %}
10485 
10486 
10487 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
10488 %{
10489   match(Set dst (AddI src1 (AndI src2 mask)));
10490   ins_cost(INSN_COST);
10491   format %{ "addw  $dst, $src1, $src2, uxtb" %}
10492 
10493    ins_encode %{
10494      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
10495             as_Register($src2$$reg), ext::uxtb);
10496    %}
10497   ins_pipe(ialu_reg_reg);
10498 %}
10499 
10500 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
10501 %{
10502   match(Set dst (AddI src1 (AndI src2 mask)));
10503   ins_cost(INSN_COST);
10504   format %{ "addw  $dst, $src1, $src2, uxth" %}
10505 
10506    ins_encode %{
10507      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
10508             as_Register($src2$$reg), ext::uxth);
10509    %}
10510   ins_pipe(ialu_reg_reg);
10511 %}
10512 
10513 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
10514 %{
10515   match(Set dst (AddL src1 (AndL src2 mask)));
10516   ins_cost(INSN_COST);
10517   format %{ "add  $dst, $src1, $src2, uxtb" %}
10518 
10519    ins_encode %{
10520      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10521             as_Register($src2$$reg), ext::uxtb);
10522    %}
10523   ins_pipe(ialu_reg_reg);
10524 %}
10525 
10526 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
10527 %{
10528   match(Set dst (AddL src1 (AndL src2 mask)));
10529   ins_cost(INSN_COST);
10530   format %{ "add  $dst, $src1, $src2, uxth" %}
10531 
10532    ins_encode %{
10533      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10534             as_Register($src2$$reg), ext::uxth);
10535    %}
10536   ins_pipe(ialu_reg_reg);
10537 %}
10538 
10539 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
10540 %{
10541   match(Set dst (AddL src1 (AndL src2 mask)));
10542   ins_cost(INSN_COST);
10543   format %{ "add  $dst, $src1, $src2, uxtw" %}
10544 
10545    ins_encode %{
10546      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10547             as_Register($src2$$reg), ext::uxtw);
10548    %}
10549   ins_pipe(ialu_reg_reg);
10550 %}
10551 
10552 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
10553 %{
10554   match(Set dst (SubI src1 (AndI src2 mask)));
10555   ins_cost(INSN_COST);
10556   format %{ "subw  $dst, $src1, $src2, uxtb" %}
10557 
10558    ins_encode %{
10559      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
10560             as_Register($src2$$reg), ext::uxtb);
10561    %}
10562   ins_pipe(ialu_reg_reg);
10563 %}
10564 
10565 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
10566 %{
10567   match(Set dst (SubI src1 (AndI src2 mask)));
10568   ins_cost(INSN_COST);
10569   format %{ "subw  $dst, $src1, $src2, uxth" %}
10570 
10571    ins_encode %{
10572      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
10573             as_Register($src2$$reg), ext::uxth);
10574    %}
10575   ins_pipe(ialu_reg_reg);
10576 %}
10577 
10578 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
10579 %{
10580   match(Set dst (SubL src1 (AndL src2 mask)));
10581   ins_cost(INSN_COST);
10582   format %{ "sub  $dst, $src1, $src2, uxtb" %}
10583 
10584    ins_encode %{
10585      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
10586             as_Register($src2$$reg), ext::uxtb);
10587    %}
10588   ins_pipe(ialu_reg_reg);
10589 %}
10590 
10591 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
10592 %{
10593   match(Set dst (SubL src1 (AndL src2 mask)));
10594   ins_cost(INSN_COST);
10595   format %{ "sub  $dst, $src1, $src2, uxth" %}
10596 
10597    ins_encode %{
10598      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
10599             as_Register($src2$$reg), ext::uxth);
10600    %}
10601   ins_pipe(ialu_reg_reg);
10602 %}
10603 
10604 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
10605 %{
10606   match(Set dst (SubL src1 (AndL src2 mask)));
10607   ins_cost(INSN_COST);
10608   format %{ "sub  $dst, $src1, $src2, uxtw" %}
10609 
10610    ins_encode %{
10611      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
10612             as_Register($src2$$reg), ext::uxtw);
10613    %}
10614   ins_pipe(ialu_reg_reg);
10615 %}
10616 
10617 // END This section of the file is automatically generated. Do not edit --------------
10618 
10619 // ============================================================================
10620 // Floating Point Arithmetic Instructions
10621 
10622 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
10623   match(Set dst (AddF src1 src2));
10624 
10625   ins_cost(INSN_COST * 5);
10626   format %{ "fadds   $dst, $src1, $src2" %}
10627 
10628   ins_encode %{
10629     __ fadds(as_FloatRegister($dst$$reg),
10630              as_FloatRegister($src1$$reg),
10631              as_FloatRegister($src2$$reg));
10632   %}
10633 
10634   ins_pipe(pipe_class_default);
10635 %}
10636 
10637 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
10638   match(Set dst (AddD src1 src2));
10639 
10640   ins_cost(INSN_COST * 5);
10641   format %{ "faddd   $dst, $src1, $src2" %}
10642 
10643   ins_encode %{
10644     __ faddd(as_FloatRegister($dst$$reg),
10645              as_FloatRegister($src1$$reg),
10646              as_FloatRegister($src2$$reg));
10647   %}
10648 
10649   ins_pipe(pipe_class_default);
10650 %}
10651 
10652 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
10653   match(Set dst (SubF src1 src2));
10654 
10655   ins_cost(INSN_COST * 5);
10656   format %{ "fsubs   $dst, $src1, $src2" %}
10657 
10658   ins_encode %{
10659     __ fsubs(as_FloatRegister($dst$$reg),
10660              as_FloatRegister($src1$$reg),
10661              as_FloatRegister($src2$$reg));
10662   %}
10663 
10664   ins_pipe(pipe_class_default);
10665 %}
10666 
10667 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
10668   match(Set dst (SubD src1 src2));
10669 
10670   ins_cost(INSN_COST * 5);
10671   format %{ "fsubd   $dst, $src1, $src2" %}
10672 
10673   ins_encode %{
10674     __ fsubd(as_FloatRegister($dst$$reg),
10675              as_FloatRegister($src1$$reg),
10676              as_FloatRegister($src2$$reg));
10677   %}
10678 
10679   ins_pipe(pipe_class_default);
10680 %}
10681 
10682 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
10683   match(Set dst (MulF src1 src2));
10684 
10685   ins_cost(INSN_COST * 6);
10686   format %{ "fmuls   $dst, $src1, $src2" %}
10687 
10688   ins_encode %{
10689     __ fmuls(as_FloatRegister($dst$$reg),
10690              as_FloatRegister($src1$$reg),
10691              as_FloatRegister($src2$$reg));
10692   %}
10693 
10694   ins_pipe(pipe_class_default);
10695 %}
10696 
10697 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
10698   match(Set dst (MulD src1 src2));
10699 
10700   ins_cost(INSN_COST * 6);
10701   format %{ "fmuld   $dst, $src1, $src2" %}
10702 
10703   ins_encode %{
10704     __ fmuld(as_FloatRegister($dst$$reg),
10705              as_FloatRegister($src1$$reg),
10706              as_FloatRegister($src2$$reg));
10707   %}
10708 
10709   ins_pipe(pipe_class_default);
10710 %}
10711 
10712 // We cannot use these fused mul w add/sub ops because they don't
10713 // produce the same result as the equivalent separated ops
10714 // (essentially they don't round the intermediate result). that's a
10715 // shame. leaving them here in case we can idenitfy cases where it is
10716 // legitimate to use them
10717 
10718 
10719 // instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
10720 //   match(Set dst (AddF (MulF src1 src2) src3));
10721 
10722 //   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
10723 
10724 //   ins_encode %{
10725 //     __ fmadds(as_FloatRegister($dst$$reg),
10726 //              as_FloatRegister($src1$$reg),
10727 //              as_FloatRegister($src2$$reg),
10728 //              as_FloatRegister($src3$$reg));
10729 //   %}
10730 
10731 //   ins_pipe(pipe_class_default);
10732 // %}
10733 
10734 // instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
10735 //   match(Set dst (AddD (MulD src1 src2) src3));
10736 
10737 //   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
10738 
10739 //   ins_encode %{
10740 //     __ fmaddd(as_FloatRegister($dst$$reg),
10741 //              as_FloatRegister($src1$$reg),
10742 //              as_FloatRegister($src2$$reg),
10743 //              as_FloatRegister($src3$$reg));
10744 //   %}
10745 
10746 //   ins_pipe(pipe_class_default);
10747 // %}
10748 
10749 // instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
10750 //   match(Set dst (AddF (MulF (NegF src1) src2) src3));
10751 //   match(Set dst (AddF (NegF (MulF src1 src2)) src3));
10752 
10753 //   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
10754 
10755 //   ins_encode %{
10756 //     __ fmsubs(as_FloatRegister($dst$$reg),
10757 //               as_FloatRegister($src1$$reg),
10758 //               as_FloatRegister($src2$$reg),
10759 //              as_FloatRegister($src3$$reg));
10760 //   %}
10761 
10762 //   ins_pipe(pipe_class_default);
10763 // %}
10764 
10765 // instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
10766 //   match(Set dst (AddD (MulD (NegD src1) src2) src3));
10767 //   match(Set dst (AddD (NegD (MulD src1 src2)) src3));
10768 
10769 //   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
10770 
10771 //   ins_encode %{
10772 //     __ fmsubd(as_FloatRegister($dst$$reg),
10773 //               as_FloatRegister($src1$$reg),
10774 //               as_FloatRegister($src2$$reg),
10775 //               as_FloatRegister($src3$$reg));
10776 //   %}
10777 
10778 //   ins_pipe(pipe_class_default);
10779 // %}
10780 
10781 // instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
10782 //   match(Set dst (SubF (MulF (NegF src1) src2) src3));
10783 //   match(Set dst (SubF (NegF (MulF src1 src2)) src3));
10784 
10785 //   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
10786 
10787 //   ins_encode %{
10788 //     __ fnmadds(as_FloatRegister($dst$$reg),
10789 //                as_FloatRegister($src1$$reg),
10790 //                as_FloatRegister($src2$$reg),
10791 //                as_FloatRegister($src3$$reg));
10792 //   %}
10793 
10794 //   ins_pipe(pipe_class_default);
10795 // %}
10796 
10797 // instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
10798 //   match(Set dst (SubD (MulD (NegD src1) src2) src3));
10799 //   match(Set dst (SubD (NegD (MulD src1 src2)) src3));
10800 
10801 //   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
10802 
10803 //   ins_encode %{
10804 //     __ fnmaddd(as_FloatRegister($dst$$reg),
10805 //                as_FloatRegister($src1$$reg),
10806 //                as_FloatRegister($src2$$reg),
10807 //                as_FloatRegister($src3$$reg));
10808 //   %}
10809 
10810 //   ins_pipe(pipe_class_default);
10811 // %}
10812 
10813 // instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
10814 //   match(Set dst (SubF (MulF src1 src2) src3));
10815 
10816 //   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
10817 
10818 //   ins_encode %{
10819 //     __ fnmsubs(as_FloatRegister($dst$$reg),
10820 //                as_FloatRegister($src1$$reg),
10821 //                as_FloatRegister($src2$$reg),
10822 //                as_FloatRegister($src3$$reg));
10823 //   %}
10824 
10825 //   ins_pipe(pipe_class_default);
10826 // %}
10827 
10828 // instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
10829 //   match(Set dst (SubD (MulD src1 src2) src3));
10830 
10831 //   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
10832 
10833 //   ins_encode %{
10834 //   // n.b. insn name should be fnmsubd
10835 //     __ fnmsub(as_FloatRegister($dst$$reg),
10836 //                as_FloatRegister($src1$$reg),
10837 //                as_FloatRegister($src2$$reg),
10838 //                as_FloatRegister($src3$$reg));
10839 //   %}
10840 
10841 //   ins_pipe(pipe_class_default);
10842 // %}
10843 
10844 
10845 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
10846   match(Set dst (DivF src1  src2));
10847 
10848   ins_cost(INSN_COST * 18);
10849   format %{ "fdivs   $dst, $src1, $src2" %}
10850 
10851   ins_encode %{
10852     __ fdivs(as_FloatRegister($dst$$reg),
10853              as_FloatRegister($src1$$reg),
10854              as_FloatRegister($src2$$reg));
10855   %}
10856 
10857   ins_pipe(pipe_class_default);
10858 %}
10859 
10860 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
10861   match(Set dst (DivD src1  src2));
10862 
10863   ins_cost(INSN_COST * 32);
10864   format %{ "fdivd   $dst, $src1, $src2" %}
10865 
10866   ins_encode %{
10867     __ fdivd(as_FloatRegister($dst$$reg),
10868              as_FloatRegister($src1$$reg),
10869              as_FloatRegister($src2$$reg));
10870   %}
10871 
10872   ins_pipe(pipe_class_default);
10873 %}
10874 
10875 instruct negF_reg_reg(vRegF dst, vRegF src) %{
10876   match(Set dst (NegF src));
10877 
10878   ins_cost(INSN_COST * 3);
10879   format %{ "fneg   $dst, $src" %}
10880 
10881   ins_encode %{
10882     __ fnegs(as_FloatRegister($dst$$reg),
10883              as_FloatRegister($src$$reg));
10884   %}
10885 
10886   ins_pipe(pipe_class_default);
10887 %}
10888 
10889 instruct negD_reg_reg(vRegD dst, vRegD src) %{
10890   match(Set dst (NegD src));
10891 
10892   ins_cost(INSN_COST * 3);
10893   format %{ "fnegd   $dst, $src" %}
10894 
10895   ins_encode %{
10896     __ fnegd(as_FloatRegister($dst$$reg),
10897              as_FloatRegister($src$$reg));
10898   %}
10899 
10900   ins_pipe(pipe_class_default);
10901 %}
10902 
10903 instruct absF_reg(vRegF dst, vRegF src) %{
10904   match(Set dst (AbsF src));
10905 
10906   ins_cost(INSN_COST * 3);
10907   format %{ "fabss   $dst, $src" %}
10908   ins_encode %{
10909     __ fabss(as_FloatRegister($dst$$reg),
10910              as_FloatRegister($src$$reg));
10911   %}
10912 
10913   ins_pipe(pipe_class_default);
10914 %}
10915 
10916 instruct absD_reg(vRegD dst, vRegD src) %{
10917   match(Set dst (AbsD src));
10918 
10919   ins_cost(INSN_COST * 3);
10920   format %{ "fabsd   $dst, $src" %}
10921   ins_encode %{
10922     __ fabsd(as_FloatRegister($dst$$reg),
10923              as_FloatRegister($src$$reg));
10924   %}
10925 
10926   ins_pipe(pipe_class_default);
10927 %}
10928 
10929 instruct sqrtD_reg(vRegD dst, vRegD src) %{
10930   match(Set dst (SqrtD src));
10931 
10932   ins_cost(INSN_COST * 50);
10933   format %{ "fsqrtd  $dst, $src" %}
10934   ins_encode %{
10935     __ fsqrtd(as_FloatRegister($dst$$reg),
10936              as_FloatRegister($src$$reg));
10937   %}
10938 
10939   ins_pipe(pipe_class_default);
10940 %}
10941 
10942 instruct sqrtF_reg(vRegF dst, vRegF src) %{
10943   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
10944 
10945   ins_cost(INSN_COST * 50);
10946   format %{ "fsqrts  $dst, $src" %}
10947   ins_encode %{
10948     __ fsqrts(as_FloatRegister($dst$$reg),
10949              as_FloatRegister($src$$reg));
10950   %}
10951 
10952   ins_pipe(pipe_class_default);
10953 %}
10954 
10955 // ============================================================================
10956 // Logical Instructions
10957 
10958 // Integer Logical Instructions
10959 
10960 // And Instructions
10961 
10962 
10963 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
10964   match(Set dst (AndI src1 src2));
10965 
10966   format %{ "andw  $dst, $src1, $src2\t# int" %}
10967 
10968   ins_cost(INSN_COST);
10969   ins_encode %{
10970     __ andw(as_Register($dst$$reg),
10971             as_Register($src1$$reg),
10972             as_Register($src2$$reg));
10973   %}
10974 
10975   ins_pipe(ialu_reg_reg);
10976 %}
10977 
10978 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
10979   match(Set dst (AndI src1 src2));
10980 
10981   format %{ "andsw  $dst, $src1, $src2\t# int" %}
10982 
10983   ins_cost(INSN_COST);
10984   ins_encode %{
10985     __ andw(as_Register($dst$$reg),
10986             as_Register($src1$$reg),
10987             (unsigned long)($src2$$constant));
10988   %}
10989 
10990   ins_pipe(ialu_reg_imm);
10991 %}
10992 
10993 // Or Instructions
10994 
10995 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10996   match(Set dst (OrI src1 src2));
10997 
10998   format %{ "orrw  $dst, $src1, $src2\t# int" %}
10999 
11000   ins_cost(INSN_COST);
11001   ins_encode %{
11002     __ orrw(as_Register($dst$$reg),
11003             as_Register($src1$$reg),
11004             as_Register($src2$$reg));
11005   %}
11006 
11007   ins_pipe(ialu_reg_reg);
11008 %}
11009 
11010 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
11011   match(Set dst (OrI src1 src2));
11012 
11013   format %{ "orrw  $dst, $src1, $src2\t# int" %}
11014 
11015   ins_cost(INSN_COST);
11016   ins_encode %{
11017     __ orrw(as_Register($dst$$reg),
11018             as_Register($src1$$reg),
11019             (unsigned long)($src2$$constant));
11020   %}
11021 
11022   ins_pipe(ialu_reg_imm);
11023 %}
11024 
11025 // Xor Instructions
11026 
11027 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11028   match(Set dst (XorI src1 src2));
11029 
11030   format %{ "eorw  $dst, $src1, $src2\t# int" %}
11031 
11032   ins_cost(INSN_COST);
11033   ins_encode %{
11034     __ eorw(as_Register($dst$$reg),
11035             as_Register($src1$$reg),
11036             as_Register($src2$$reg));
11037   %}
11038 
11039   ins_pipe(ialu_reg_reg);
11040 %}
11041 
11042 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
11043   match(Set dst (XorI src1 src2));
11044 
11045   format %{ "eorw  $dst, $src1, $src2\t# int" %}
11046 
11047   ins_cost(INSN_COST);
11048   ins_encode %{
11049     __ eorw(as_Register($dst$$reg),
11050             as_Register($src1$$reg),
11051             (unsigned long)($src2$$constant));
11052   %}
11053 
11054   ins_pipe(ialu_reg_imm);
11055 %}
11056 
11057 // Long Logical Instructions
11058 // TODO
11059 
11060 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
11061   match(Set dst (AndL src1 src2));
11062 
11063   format %{ "and  $dst, $src1, $src2\t# int" %}
11064 
11065   ins_cost(INSN_COST);
11066   ins_encode %{
11067     __ andr(as_Register($dst$$reg),
11068             as_Register($src1$$reg),
11069             as_Register($src2$$reg));
11070   %}
11071 
11072   ins_pipe(ialu_reg_reg);
11073 %}
11074 
11075 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
11076   match(Set dst (AndL src1 src2));
11077 
11078   format %{ "and  $dst, $src1, $src2\t# int" %}
11079 
11080   ins_cost(INSN_COST);
11081   ins_encode %{
11082     __ andr(as_Register($dst$$reg),
11083             as_Register($src1$$reg),
11084             (unsigned long)($src2$$constant));
11085   %}
11086 
11087   ins_pipe(ialu_reg_imm);
11088 %}
11089 
11090 // Or Instructions
11091 
11092 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11093   match(Set dst (OrL src1 src2));
11094 
11095   format %{ "orr  $dst, $src1, $src2\t# int" %}
11096 
11097   ins_cost(INSN_COST);
11098   ins_encode %{
11099     __ orr(as_Register($dst$$reg),
11100            as_Register($src1$$reg),
11101            as_Register($src2$$reg));
11102   %}
11103 
11104   ins_pipe(ialu_reg_reg);
11105 %}
11106 
11107 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
11108   match(Set dst (OrL src1 src2));
11109 
11110   format %{ "orr  $dst, $src1, $src2\t# int" %}
11111 
11112   ins_cost(INSN_COST);
11113   ins_encode %{
11114     __ orr(as_Register($dst$$reg),
11115            as_Register($src1$$reg),
11116            (unsigned long)($src2$$constant));
11117   %}
11118 
11119   ins_pipe(ialu_reg_imm);
11120 %}
11121 
11122 // Xor Instructions
11123 
11124 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11125   match(Set dst (XorL src1 src2));
11126 
11127   format %{ "eor  $dst, $src1, $src2\t# int" %}
11128 
11129   ins_cost(INSN_COST);
11130   ins_encode %{
11131     __ eor(as_Register($dst$$reg),
11132            as_Register($src1$$reg),
11133            as_Register($src2$$reg));
11134   %}
11135 
11136   ins_pipe(ialu_reg_reg);
11137 %}
11138 
11139 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
11140   match(Set dst (XorL src1 src2));
11141 
11142   ins_cost(INSN_COST);
11143   format %{ "eor  $dst, $src1, $src2\t# int" %}
11144 
11145   ins_encode %{
11146     __ eor(as_Register($dst$$reg),
11147            as_Register($src1$$reg),
11148            (unsigned long)($src2$$constant));
11149   %}
11150 
11151   ins_pipe(ialu_reg_imm);
11152 %}
11153 
11154 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
11155 %{
11156   match(Set dst (ConvI2L src));
11157 
11158   ins_cost(INSN_COST);
11159   format %{ "sxtw  $dst, $src\t# i2l" %}
11160   ins_encode %{
11161     __ sbfm($dst$$Register, $src$$Register, 0, 31);
11162   %}
11163   ins_pipe(ialu_reg_shift);
11164 %}
11165 
11166 // this pattern occurs in bigmath arithmetic
11167 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
11168 %{
11169   match(Set dst (AndL (ConvI2L src) mask));
11170 
11171   ins_cost(INSN_COST);
11172   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
11173   ins_encode %{
11174     __ ubfm($dst$$Register, $src$$Register, 0, 31);
11175   %}
11176 
11177   ins_pipe(ialu_reg_shift);
11178 %}
11179 
11180 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
11181   match(Set dst (ConvL2I src));
11182 
11183   ins_cost(INSN_COST);
11184   format %{ "movw  $dst, $src \t// l2i" %}
11185 
11186   ins_encode %{
11187     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
11188   %}
11189 
11190   ins_pipe(ialu_reg);
11191 %}
11192 
11193 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
11194 %{
11195   match(Set dst (Conv2B src));
11196   effect(KILL cr);
11197 
11198   format %{
11199     "cmpw $src, zr\n\t"
11200     "cset $dst, ne"
11201   %}
11202 
11203   ins_encode %{
11204     __ cmpw(as_Register($src$$reg), zr);
11205     __ cset(as_Register($dst$$reg), Assembler::NE);
11206   %}
11207 
11208   ins_pipe(ialu_reg);
11209 %}
11210 
11211 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
11212 %{
11213   match(Set dst (Conv2B src));
11214   effect(KILL cr);
11215 
11216   format %{
11217     "cmp  $src, zr\n\t"
11218     "cset $dst, ne"
11219   %}
11220 
11221   ins_encode %{
11222     __ cmp(as_Register($src$$reg), zr);
11223     __ cset(as_Register($dst$$reg), Assembler::NE);
11224   %}
11225 
11226   ins_pipe(ialu_reg);
11227 %}
11228 
11229 instruct convD2F_reg(vRegF dst, vRegD src) %{
11230   match(Set dst (ConvD2F src));
11231 
11232   ins_cost(INSN_COST * 5);
11233   format %{ "fcvtd  $dst, $src \t// d2f" %}
11234 
11235   ins_encode %{
11236     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
11237   %}
11238 
11239   ins_pipe(pipe_class_default);
11240 %}
11241 
11242 instruct convF2D_reg(vRegD dst, vRegF src) %{
11243   match(Set dst (ConvF2D src));
11244 
11245   ins_cost(INSN_COST * 5);
11246   format %{ "fcvts  $dst, $src \t// f2d" %}
11247 
11248   ins_encode %{
11249     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
11250   %}
11251 
11252   ins_pipe(pipe_class_default);
11253 %}
11254 
11255 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
11256   match(Set dst (ConvF2I src));
11257 
11258   ins_cost(INSN_COST * 5);
11259   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
11260 
11261   ins_encode %{
11262     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
11263   %}
11264 
11265   ins_pipe(pipe_class_default);
11266 %}
11267 
11268 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
11269   match(Set dst (ConvF2L src));
11270 
11271   ins_cost(INSN_COST * 5);
11272   format %{ "fcvtzs  $dst, $src \t// f2l" %}
11273 
11274   ins_encode %{
11275     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
11276   %}
11277 
11278   ins_pipe(pipe_class_default);
11279 %}
11280 
11281 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
11282   match(Set dst (ConvI2F src));
11283 
11284   ins_cost(INSN_COST * 5);
11285   format %{ "scvtfws  $dst, $src \t// i2f" %}
11286 
11287   ins_encode %{
11288     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
11289   %}
11290 
11291   ins_pipe(pipe_class_default);
11292 %}
11293 
11294 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
11295   match(Set dst (ConvL2F src));
11296 
11297   ins_cost(INSN_COST * 5);
11298   format %{ "scvtfs  $dst, $src \t// l2f" %}
11299 
11300   ins_encode %{
11301     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
11302   %}
11303 
11304   ins_pipe(pipe_class_default);
11305 %}
11306 
11307 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
11308   match(Set dst (ConvD2I src));
11309 
11310   ins_cost(INSN_COST * 5);
11311   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
11312 
11313   ins_encode %{
11314     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
11315   %}
11316 
11317   ins_pipe(pipe_class_default);
11318 %}
11319 
11320 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
11321   match(Set dst (ConvD2L src));
11322 
11323   ins_cost(INSN_COST * 5);
11324   format %{ "fcvtzd  $dst, $src \t// d2l" %}
11325 
11326   ins_encode %{
11327     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
11328   %}
11329 
11330   ins_pipe(pipe_class_default);
11331 %}
11332 
11333 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
11334   match(Set dst (ConvI2D src));
11335 
11336   ins_cost(INSN_COST * 5);
11337   format %{ "scvtfwd  $dst, $src \t// i2d" %}
11338 
11339   ins_encode %{
11340     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
11341   %}
11342 
11343   ins_pipe(pipe_class_default);
11344 %}
11345 
11346 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
11347   match(Set dst (ConvL2D src));
11348 
11349   ins_cost(INSN_COST * 5);
11350   format %{ "scvtfd  $dst, $src \t// l2d" %}
11351 
11352   ins_encode %{
11353     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
11354   %}
11355 
11356   ins_pipe(pipe_class_default);
11357 %}
11358 
11359 // stack <-> reg and reg <-> reg shuffles with no conversion
11360 
11361 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
11362 
11363   match(Set dst (MoveF2I src));
11364 
11365   effect(DEF dst, USE src);
11366 
11367   ins_cost(4 * INSN_COST);
11368 
11369   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
11370 
11371   ins_encode %{
11372     __ ldrw($dst$$Register, Address(sp, $src$$disp));
11373   %}
11374 
11375   ins_pipe(iload_reg_reg);
11376 
11377 %}
11378 
11379 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
11380 
11381   match(Set dst (MoveI2F src));
11382 
11383   effect(DEF dst, USE src);
11384 
11385   ins_cost(4 * INSN_COST);
11386 
11387   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
11388 
11389   ins_encode %{
11390     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
11391   %}
11392 
11393   ins_pipe(pipe_class_memory);
11394 
11395 %}
11396 
11397 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
11398 
11399   match(Set dst (MoveD2L src));
11400 
11401   effect(DEF dst, USE src);
11402 
11403   ins_cost(4 * INSN_COST);
11404 
11405   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
11406 
11407   ins_encode %{
11408     __ ldr($dst$$Register, Address(sp, $src$$disp));
11409   %}
11410 
11411   ins_pipe(iload_reg_reg);
11412 
11413 %}
11414 
11415 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
11416 
11417   match(Set dst (MoveL2D src));
11418 
11419   effect(DEF dst, USE src);
11420 
11421   ins_cost(4 * INSN_COST);
11422 
11423   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
11424 
11425   ins_encode %{
11426     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
11427   %}
11428 
11429   ins_pipe(pipe_class_memory);
11430 
11431 %}
11432 
11433 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
11434 
11435   match(Set dst (MoveF2I src));
11436 
11437   effect(DEF dst, USE src);
11438 
11439   ins_cost(INSN_COST);
11440 
11441   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
11442 
11443   ins_encode %{
11444     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
11445   %}
11446 
11447   ins_pipe(pipe_class_memory);
11448 
11449 %}
11450 
11451 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
11452 
11453   match(Set dst (MoveI2F src));
11454 
11455   effect(DEF dst, USE src);
11456 
11457   ins_cost(INSN_COST);
11458 
11459   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
11460 
11461   ins_encode %{
11462     __ strw($src$$Register, Address(sp, $dst$$disp));
11463   %}
11464 
11465   ins_pipe(istore_reg_reg);
11466 
11467 %}
11468 
11469 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
11470 
11471   match(Set dst (MoveD2L src));
11472 
11473   effect(DEF dst, USE src);
11474 
11475   ins_cost(INSN_COST);
11476 
11477   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
11478 
11479   ins_encode %{
11480     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
11481   %}
11482 
11483   ins_pipe(pipe_class_memory);
11484 
11485 %}
11486 
11487 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
11488 
11489   match(Set dst (MoveL2D src));
11490 
11491   effect(DEF dst, USE src);
11492 
11493   ins_cost(INSN_COST);
11494 
11495   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
11496 
11497   ins_encode %{
11498     __ str($src$$Register, Address(sp, $dst$$disp));
11499   %}
11500 
11501   ins_pipe(istore_reg_reg);
11502 
11503 %}
11504 
11505 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
11506 
11507   match(Set dst (MoveF2I src));
11508 
11509   effect(DEF dst, USE src);
11510 
11511   ins_cost(INSN_COST);
11512 
11513   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
11514 
11515   ins_encode %{
11516     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
11517   %}
11518 
11519   ins_pipe(pipe_class_memory);
11520 
11521 %}
11522 
11523 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
11524 
11525   match(Set dst (MoveI2F src));
11526 
11527   effect(DEF dst, USE src);
11528 
11529   ins_cost(INSN_COST);
11530 
11531   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
11532 
11533   ins_encode %{
11534     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
11535   %}
11536 
11537   ins_pipe(pipe_class_memory);
11538 
11539 %}
11540 
11541 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
11542 
11543   match(Set dst (MoveD2L src));
11544 
11545   effect(DEF dst, USE src);
11546 
11547   ins_cost(INSN_COST);
11548 
11549   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
11550 
11551   ins_encode %{
11552     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
11553   %}
11554 
11555   ins_pipe(pipe_class_memory);
11556 
11557 %}
11558 
11559 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
11560 
11561   match(Set dst (MoveL2D src));
11562 
11563   effect(DEF dst, USE src);
11564 
11565   ins_cost(INSN_COST);
11566 
11567   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
11568 
11569   ins_encode %{
11570     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
11571   %}
11572 
11573   ins_pipe(pipe_class_memory);
11574 
11575 %}
11576 
11577 // ============================================================================
11578 // clearing of an array
11579 
11580 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
11581 %{
11582   match(Set dummy (ClearArray cnt base));
11583   effect(USE_KILL cnt, USE_KILL base);
11584 
11585   ins_cost(4 * INSN_COST);
11586   format %{ "ClearArray $cnt, $base" %}
11587 
11588   ins_encode(aarch64_enc_clear_array_reg_reg(cnt, base));
11589 
11590   ins_pipe(pipe_class_memory);
11591 %}
11592 
11593 // ============================================================================
11594 // Overflow Math Instructions
11595 
11596 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
11597 %{
11598   match(Set cr (OverflowAddI op1 op2));
11599 
11600   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
11601   ins_cost(INSN_COST);
11602   ins_encode %{
11603     __ cmnw($op1$$Register, $op2$$Register);
11604   %}
11605 
11606   ins_pipe(icmp_reg_reg);
11607 %}
11608 
11609 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
11610 %{
11611   match(Set cr (OverflowAddI op1 op2));
11612 
11613   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
11614   ins_cost(INSN_COST);
11615   ins_encode %{
11616     __ cmnw($op1$$Register, $op2$$constant);
11617   %}
11618 
11619   ins_pipe(icmp_reg_imm);
11620 %}
11621 
11622 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
11623 %{
11624   match(Set cr (OverflowAddL op1 op2));
11625 
11626   format %{ "cmn   $op1, $op2\t# overflow check long" %}
11627   ins_cost(INSN_COST);
11628   ins_encode %{
11629     __ cmn($op1$$Register, $op2$$Register);
11630   %}
11631 
11632   ins_pipe(icmp_reg_reg);
11633 %}
11634 
11635 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
11636 %{
11637   match(Set cr (OverflowAddL op1 op2));
11638 
11639   format %{ "cmn   $op1, $op2\t# overflow check long" %}
11640   ins_cost(INSN_COST);
11641   ins_encode %{
11642     __ cmn($op1$$Register, $op2$$constant);
11643   %}
11644 
11645   ins_pipe(icmp_reg_imm);
11646 %}
11647 
11648 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
11649 %{
11650   match(Set cr (OverflowSubI op1 op2));
11651 
11652   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
11653   ins_cost(INSN_COST);
11654   ins_encode %{
11655     __ cmpw($op1$$Register, $op2$$Register);
11656   %}
11657 
11658   ins_pipe(icmp_reg_reg);
11659 %}
11660 
11661 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
11662 %{
11663   match(Set cr (OverflowSubI op1 op2));
11664 
11665   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
11666   ins_cost(INSN_COST);
11667   ins_encode %{
11668     __ cmpw($op1$$Register, $op2$$constant);
11669   %}
11670 
11671   ins_pipe(icmp_reg_imm);
11672 %}
11673 
11674 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
11675 %{
11676   match(Set cr (OverflowSubL op1 op2));
11677 
11678   format %{ "cmp   $op1, $op2\t# overflow check long" %}
11679   ins_cost(INSN_COST);
11680   ins_encode %{
11681     __ cmp($op1$$Register, $op2$$Register);
11682   %}
11683 
11684   ins_pipe(icmp_reg_reg);
11685 %}
11686 
11687 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
11688 %{
11689   match(Set cr (OverflowSubL op1 op2));
11690 
11691   format %{ "cmp   $op1, $op2\t# overflow check long" %}
11692   ins_cost(INSN_COST);
11693   ins_encode %{
11694     __ cmp($op1$$Register, $op2$$constant);
11695   %}
11696 
11697   ins_pipe(icmp_reg_imm);
11698 %}
11699 
11700 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
11701 %{
11702   match(Set cr (OverflowSubI zero op1));
11703 
11704   format %{ "cmpw  zr, $op1\t# overflow check int" %}
11705   ins_cost(INSN_COST);
11706   ins_encode %{
11707     __ cmpw(zr, $op1$$Register);
11708   %}
11709 
11710   ins_pipe(icmp_reg_imm);
11711 %}
11712 
11713 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
11714 %{
11715   match(Set cr (OverflowSubL zero op1));
11716 
11717   format %{ "cmp   zr, $op1\t# overflow check long" %}
11718   ins_cost(INSN_COST);
11719   ins_encode %{
11720     __ cmp(zr, $op1$$Register);
11721   %}
11722 
11723   ins_pipe(icmp_reg_imm);
11724 %}
11725 
11726 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
11727 %{
11728   match(Set cr (OverflowMulI op1 op2));
11729 
11730   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
11731             "cmp   rscratch1, rscratch1, sxtw\n\t"
11732             "movw  rscratch1, #0x80000000\n\t"
11733             "cselw rscratch1, rscratch1, zr, NE\n\t"
11734             "cmpw  rscratch1, #1" %}
11735   ins_cost(5 * INSN_COST);
11736   ins_encode %{
11737     __ smull(rscratch1, $op1$$Register, $op2$$Register);
11738     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
11739     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
11740     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
11741     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
11742   %}
11743 
11744   ins_pipe(pipe_slow);
11745 %}
11746 
11747 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
11748 %{
11749   match(If cmp (OverflowMulI op1 op2));
11750   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
11751             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
11752   effect(USE labl, KILL cr);
11753 
11754   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
11755             "cmp   rscratch1, rscratch1, sxtw\n\t"
11756             "b$cmp   $labl" %}
11757   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
11758   ins_encode %{
11759     Label* L = $labl$$label;
11760     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
11761     __ smull(rscratch1, $op1$$Register, $op2$$Register);
11762     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
11763     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
11764   %}
11765 
11766   ins_pipe(pipe_serial);
11767 %}
11768 
11769 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
11770 %{
11771   match(Set cr (OverflowMulL op1 op2));
11772 
11773   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
11774             "smulh rscratch2, $op1, $op2\n\t"
11775             "cmp   rscratch2, rscratch1, ASR #31\n\t"
11776             "movw  rscratch1, #0x80000000\n\t"
11777             "cselw rscratch1, rscratch1, zr, NE\n\t"
11778             "cmpw  rscratch1, #1" %}
11779   ins_cost(6 * INSN_COST);
11780   ins_encode %{
11781     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
11782     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
11783     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
11784     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
11785     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
11786     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
11787   %}
11788 
11789   ins_pipe(pipe_slow);
11790 %}
11791 
11792 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
11793 %{
11794   match(If cmp (OverflowMulL op1 op2));
11795   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
11796             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
11797   effect(USE labl, KILL cr);
11798 
11799   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
11800             "smulh rscratch2, $op1, $op2\n\t"
11801             "cmp   rscratch2, rscratch1, ASR #31\n\t"
11802             "b$cmp $labl" %}
11803   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
11804   ins_encode %{
11805     Label* L = $labl$$label;
11806     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
11807     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
11808     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
11809     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
11810     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
11811   %}
11812 
11813   ins_pipe(pipe_serial);
11814 %}
11815 
11816 // ============================================================================
11817 // Compare Instructions
11818 
11819 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
11820 %{
11821   match(Set cr (CmpI op1 op2));
11822 
11823   effect(DEF cr, USE op1, USE op2);
11824 
11825   ins_cost(INSN_COST);
11826   format %{ "cmpw  $op1, $op2" %}
11827 
11828   ins_encode(aarch64_enc_cmpw(op1, op2));
11829 
11830   ins_pipe(icmp_reg_reg);
11831 %}
11832 
11833 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
11834 %{
11835   match(Set cr (CmpI op1 zero));
11836 
11837   effect(DEF cr, USE op1);
11838 
11839   ins_cost(INSN_COST);
11840   format %{ "cmpw $op1, 0" %}
11841 
11842   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
11843 
11844   ins_pipe(icmp_reg_imm);
11845 %}
11846 
11847 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
11848 %{
11849   match(Set cr (CmpI op1 op2));
11850 
11851   effect(DEF cr, USE op1);
11852 
11853   ins_cost(INSN_COST);
11854   format %{ "cmpw  $op1, $op2" %}
11855 
11856   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
11857 
11858   ins_pipe(icmp_reg_imm);
11859 %}
11860 
11861 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
11862 %{
11863   match(Set cr (CmpI op1 op2));
11864 
11865   effect(DEF cr, USE op1);
11866 
11867   ins_cost(INSN_COST * 2);
11868   format %{ "cmpw  $op1, $op2" %}
11869 
11870   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
11871 
11872   ins_pipe(icmp_reg_imm);
11873 %}
11874 
11875 // Unsigned compare Instructions; really, same as signed compare
11876 // except it should only be used to feed an If or a CMovI which takes a
11877 // cmpOpU.
11878 
11879 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
11880 %{
11881   match(Set cr (CmpU op1 op2));
11882 
11883   effect(DEF cr, USE op1, USE op2);
11884 
11885   ins_cost(INSN_COST);
11886   format %{ "cmpw  $op1, $op2\t# unsigned" %}
11887 
11888   ins_encode(aarch64_enc_cmpw(op1, op2));
11889 
11890   ins_pipe(icmp_reg_reg);
11891 %}
11892 
11893 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
11894 %{
11895   match(Set cr (CmpU op1 zero));
11896 
11897   effect(DEF cr, USE op1);
11898 
11899   ins_cost(INSN_COST);
11900   format %{ "cmpw $op1, #0\t# unsigned" %}
11901 
11902   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
11903 
11904   ins_pipe(icmp_reg_imm);
11905 %}
11906 
11907 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
11908 %{
11909   match(Set cr (CmpU op1 op2));
11910 
11911   effect(DEF cr, USE op1);
11912 
11913   ins_cost(INSN_COST);
11914   format %{ "cmpw  $op1, $op2\t# unsigned" %}
11915 
11916   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
11917 
11918   ins_pipe(icmp_reg_imm);
11919 %}
11920 
11921 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
11922 %{
11923   match(Set cr (CmpU op1 op2));
11924 
11925   effect(DEF cr, USE op1);
11926 
11927   ins_cost(INSN_COST * 2);
11928   format %{ "cmpw  $op1, $op2\t# unsigned" %}
11929 
11930   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
11931 
11932   ins_pipe(icmp_reg_imm);
11933 %}
11934 
11935 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
11936 %{
11937   match(Set cr (CmpL op1 op2));
11938 
11939   effect(DEF cr, USE op1, USE op2);
11940 
11941   ins_cost(INSN_COST);
11942   format %{ "cmp  $op1, $op2" %}
11943 
11944   ins_encode(aarch64_enc_cmp(op1, op2));
11945 
11946   ins_pipe(icmp_reg_reg);
11947 %}
11948 
11949 instruct compL_reg_immI0(rFlagsReg cr, iRegL op1, immI0 zero)
11950 %{
11951   match(Set cr (CmpL op1 zero));
11952 
11953   effect(DEF cr, USE op1);
11954 
11955   ins_cost(INSN_COST);
11956   format %{ "tst  $op1" %}
11957 
11958   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
11959 
11960   ins_pipe(icmp_reg_imm);
11961 %}
11962 
11963 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
11964 %{
11965   match(Set cr (CmpL op1 op2));
11966 
11967   effect(DEF cr, USE op1);
11968 
11969   ins_cost(INSN_COST);
11970   format %{ "cmp  $op1, $op2" %}
11971 
11972   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
11973 
11974   ins_pipe(icmp_reg_imm);
11975 %}
11976 
11977 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
11978 %{
11979   match(Set cr (CmpL op1 op2));
11980 
11981   effect(DEF cr, USE op1);
11982 
11983   ins_cost(INSN_COST * 2);
11984   format %{ "cmp  $op1, $op2" %}
11985 
11986   ins_encode(aarch64_enc_cmp_imm(op1, op2));
11987 
11988   ins_pipe(icmp_reg_imm);
11989 %}
11990 
11991 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
11992 %{
11993   match(Set cr (CmpP op1 op2));
11994 
11995   effect(DEF cr, USE op1, USE op2);
11996 
11997   ins_cost(INSN_COST);
11998   format %{ "cmp  $op1, $op2\t // ptr" %}
11999 
12000   ins_encode(aarch64_enc_cmpp(op1, op2));
12001 
12002   ins_pipe(icmp_reg_reg);
12003 %}
12004 
12005 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
12006 %{
12007   match(Set cr (CmpN op1 op2));
12008 
12009   effect(DEF cr, USE op1, USE op2);
12010 
12011   ins_cost(INSN_COST);
12012   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
12013 
12014   ins_encode(aarch64_enc_cmpn(op1, op2));
12015 
12016   ins_pipe(icmp_reg_reg);
12017 %}
12018 
12019 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
12020 %{
12021   match(Set cr (CmpP op1 zero));
12022 
12023   effect(DEF cr, USE op1, USE zero);
12024 
12025   ins_cost(INSN_COST);
12026   format %{ "cmp  $op1, 0\t // ptr" %}
12027 
12028   ins_encode(aarch64_enc_testp(op1));
12029 
12030   ins_pipe(icmp_reg_imm);
12031 %}
12032 
12033 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
12034 %{
12035   match(Set cr (CmpN op1 zero));
12036 
12037   effect(DEF cr, USE op1, USE zero);
12038 
12039   ins_cost(INSN_COST);
12040   format %{ "cmp  $op1, 0\t // compressed ptr" %}
12041 
12042   ins_encode(aarch64_enc_testn(op1));
12043 
12044   ins_pipe(icmp_reg_imm);
12045 %}
12046 
12047 // FP comparisons
12048 //
12049 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
12050 // using normal cmpOp. See declaration of rFlagsReg for details.
12051 
12052 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
12053 %{
12054   match(Set cr (CmpF src1 src2));
12055 
12056   ins_cost(3 * INSN_COST);
12057   format %{ "fcmps $src1, $src2" %}
12058 
12059   ins_encode %{
12060     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
12061   %}
12062 
12063   ins_pipe(pipe_class_compare);
12064 %}
12065 
12066 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
12067 %{
12068   match(Set cr (CmpF src1 src2));
12069 
12070   ins_cost(3 * INSN_COST);
12071   format %{ "fcmps $src1, 0.0" %}
12072 
12073   ins_encode %{
12074     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
12075   %}
12076 
12077   ins_pipe(pipe_class_compare);
12078 %}
12079 // FROM HERE
12080 
12081 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
12082 %{
12083   match(Set cr (CmpD src1 src2));
12084 
12085   ins_cost(3 * INSN_COST);
12086   format %{ "fcmpd $src1, $src2" %}
12087 
12088   ins_encode %{
12089     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
12090   %}
12091 
12092   ins_pipe(pipe_class_compare);
12093 %}
12094 
12095 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
12096 %{
12097   match(Set cr (CmpD src1 src2));
12098 
12099   ins_cost(3 * INSN_COST);
12100   format %{ "fcmpd $src1, 0.0" %}
12101 
12102   ins_encode %{
12103     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
12104   %}
12105 
12106   ins_pipe(pipe_class_compare);
12107 %}
12108 
12109 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
12110 %{
12111   match(Set dst (CmpF3 src1 src2));
12112   effect(KILL cr);
12113 
12114   ins_cost(5 * INSN_COST);
12115   format %{ "fcmps $src1, $src2\n\t"
12116             "csinvw($dst, zr, zr, eq\n\t"
12117             "csnegw($dst, $dst, $dst, lt)"
12118   %}
12119 
12120   ins_encode %{
12121     Label done;
12122     FloatRegister s1 = as_FloatRegister($src1$$reg);
12123     FloatRegister s2 = as_FloatRegister($src2$$reg);
12124     Register d = as_Register($dst$$reg);
12125     __ fcmps(s1, s2);
12126     // installs 0 if EQ else -1
12127     __ csinvw(d, zr, zr, Assembler::EQ);
12128     // keeps -1 if less or unordered else installs 1
12129     __ csnegw(d, d, d, Assembler::LT);
12130     __ bind(done);
12131   %}
12132 
12133   ins_pipe(pipe_class_default);
12134 
12135 %}
12136 
12137 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
12138 %{
12139   match(Set dst (CmpD3 src1 src2));
12140   effect(KILL cr);
12141 
12142   ins_cost(5 * INSN_COST);
12143   format %{ "fcmpd $src1, $src2\n\t"
12144             "csinvw($dst, zr, zr, eq\n\t"
12145             "csnegw($dst, $dst, $dst, lt)"
12146   %}
12147 
12148   ins_encode %{
12149     Label done;
12150     FloatRegister s1 = as_FloatRegister($src1$$reg);
12151     FloatRegister s2 = as_FloatRegister($src2$$reg);
12152     Register d = as_Register($dst$$reg);
12153     __ fcmpd(s1, s2);
12154     // installs 0 if EQ else -1
12155     __ csinvw(d, zr, zr, Assembler::EQ);
12156     // keeps -1 if less or unordered else installs 1
12157     __ csnegw(d, d, d, Assembler::LT);
12158     __ bind(done);
12159   %}
12160   ins_pipe(pipe_class_default);
12161 
12162 %}
12163 
12164 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
12165 %{
12166   match(Set dst (CmpF3 src1 zero));
12167   effect(KILL cr);
12168 
12169   ins_cost(5 * INSN_COST);
12170   format %{ "fcmps $src1, 0.0\n\t"
12171             "csinvw($dst, zr, zr, eq\n\t"
12172             "csnegw($dst, $dst, $dst, lt)"
12173   %}
12174 
12175   ins_encode %{
12176     Label done;
12177     FloatRegister s1 = as_FloatRegister($src1$$reg);
12178     Register d = as_Register($dst$$reg);
12179     __ fcmps(s1, 0.0D);
12180     // installs 0 if EQ else -1
12181     __ csinvw(d, zr, zr, Assembler::EQ);
12182     // keeps -1 if less or unordered else installs 1
12183     __ csnegw(d, d, d, Assembler::LT);
12184     __ bind(done);
12185   %}
12186 
12187   ins_pipe(pipe_class_default);
12188 
12189 %}
12190 
12191 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
12192 %{
12193   match(Set dst (CmpD3 src1 zero));
12194   effect(KILL cr);
12195 
12196   ins_cost(5 * INSN_COST);
12197   format %{ "fcmpd $src1, 0.0\n\t"
12198             "csinvw($dst, zr, zr, eq\n\t"
12199             "csnegw($dst, $dst, $dst, lt)"
12200   %}
12201 
12202   ins_encode %{
12203     Label done;
12204     FloatRegister s1 = as_FloatRegister($src1$$reg);
12205     Register d = as_Register($dst$$reg);
12206     __ fcmpd(s1, 0.0D);
12207     // installs 0 if EQ else -1
12208     __ csinvw(d, zr, zr, Assembler::EQ);
12209     // keeps -1 if less or unordered else installs 1
12210     __ csnegw(d, d, d, Assembler::LT);
12211     __ bind(done);
12212   %}
12213   ins_pipe(pipe_class_default);
12214 
12215 %}
12216 
12217 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
12218 %{
12219   match(Set dst (CmpLTMask p q));
12220   effect(KILL cr);
12221 
12222   ins_cost(3 * INSN_COST);
12223 
12224   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
12225             "csetw $dst, lt\n\t"
12226             "subw $dst, zr, $dst"
12227   %}
12228 
12229   ins_encode %{
12230     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
12231     __ csetw(as_Register($dst$$reg), Assembler::LT);
12232     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
12233   %}
12234 
12235   ins_pipe(ialu_reg_reg);
12236 %}
12237 
12238 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
12239 %{
12240   match(Set dst (CmpLTMask src zero));
12241   effect(KILL cr);
12242 
12243   ins_cost(INSN_COST);
12244 
12245   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
12246 
12247   ins_encode %{
12248     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
12249   %}
12250 
12251   ins_pipe(ialu_reg_shift);
12252 %}
12253 
12254 // ============================================================================
12255 // Max and Min
12256 
12257 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
12258 %{
12259   match(Set dst (MinI src1 src2));
12260 
12261   effect(DEF dst, USE src1, USE src2, KILL cr);
12262   size(8);
12263 
12264   ins_cost(INSN_COST * 3);
12265   format %{
12266     "cmpw $src1 $src2\t signed int\n\t"
12267     "cselw $dst, $src1, $src2 lt\t"
12268   %}
12269 
12270   ins_encode %{
12271     __ cmpw(as_Register($src1$$reg),
12272             as_Register($src2$$reg));
12273     __ cselw(as_Register($dst$$reg),
12274              as_Register($src1$$reg),
12275              as_Register($src2$$reg),
12276              Assembler::LT);
12277   %}
12278 
12279   ins_pipe(ialu_reg_reg);
12280 %}
12281 // FROM HERE
12282 
12283 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
12284 %{
12285   match(Set dst (MaxI src1 src2));
12286 
12287   effect(DEF dst, USE src1, USE src2, KILL cr);
12288   size(8);
12289 
12290   ins_cost(INSN_COST * 3);
12291   format %{
12292     "cmpw $src1 $src2\t signed int\n\t"
12293     "cselw $dst, $src1, $src2 gt\t"
12294   %}
12295 
12296   ins_encode %{
12297     __ cmpw(as_Register($src1$$reg),
12298             as_Register($src2$$reg));
12299     __ cselw(as_Register($dst$$reg),
12300              as_Register($src1$$reg),
12301              as_Register($src2$$reg),
12302              Assembler::GT);
12303   %}
12304 
12305   ins_pipe(ialu_reg_reg);
12306 %}
12307 
12308 // ============================================================================
12309 // Branch Instructions
12310 
12311 // Direct Branch.
12312 instruct branch(label lbl)
12313 %{
12314   match(Goto);
12315 
12316   effect(USE lbl);
12317 
12318   ins_cost(BRANCH_COST);
12319   format %{ "b  $lbl" %}
12320 
12321   ins_encode(aarch64_enc_b(lbl));
12322 
12323   ins_pipe(pipe_branch);
12324 %}
12325 
12326 // Conditional Near Branch
12327 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
12328 %{
12329   // Same match rule as `branchConFar'.
12330   match(If cmp cr);
12331 
12332   effect(USE lbl);
12333 
12334   ins_cost(BRANCH_COST);
12335   // If set to 1 this indicates that the current instruction is a
12336   // short variant of a long branch. This avoids using this
12337   // instruction in first-pass matching. It will then only be used in
12338   // the `Shorten_branches' pass.
12339   // ins_short_branch(1);
12340   format %{ "b$cmp  $lbl" %}
12341 
12342   ins_encode(aarch64_enc_br_con(cmp, lbl));
12343 
12344   ins_pipe(pipe_branch_cond);
12345 %}
12346 
12347 // Conditional Near Branch Unsigned
12348 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
12349 %{
12350   // Same match rule as `branchConFar'.
12351   match(If cmp cr);
12352 
12353   effect(USE lbl);
12354 
12355   ins_cost(BRANCH_COST);
12356   // If set to 1 this indicates that the current instruction is a
12357   // short variant of a long branch. This avoids using this
12358   // instruction in first-pass matching. It will then only be used in
12359   // the `Shorten_branches' pass.
12360   // ins_short_branch(1);
12361   format %{ "b$cmp  $lbl\t# unsigned" %}
12362 
12363   ins_encode(aarch64_enc_br_conU(cmp, lbl));
12364 
12365   ins_pipe(pipe_branch_cond);
12366 %}
12367 
12368 // Make use of CBZ and CBNZ.  These instructions, as well as being
12369 // shorter than (cmp; branch), have the additional benefit of not
12370 // killing the flags.
12371 
12372 instruct cmpI_imm0_branch(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
12373   match(If cmp (CmpI op1 op2));
12374   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
12375             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
12376   effect(USE labl);
12377 
12378   ins_cost(BRANCH_COST);
12379   format %{ "cbw$cmp   $op1, $labl" %}
12380   ins_encode %{
12381     Label* L = $labl$$label;
12382     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
12383     if (cond == Assembler::EQ)
12384       __ cbzw($op1$$Register, *L);
12385     else
12386       __ cbnzw($op1$$Register, *L);
12387   %}
12388   ins_pipe(pipe_cmp_branch);
12389 %}
12390 
12391 instruct cmpL_imm0_branch(cmpOp cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
12392   match(If cmp (CmpL op1 op2));
12393   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
12394             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
12395   effect(USE labl);
12396 
12397   ins_cost(BRANCH_COST);
12398   format %{ "cb$cmp   $op1, $labl" %}
12399   ins_encode %{
12400     Label* L = $labl$$label;
12401     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
12402     if (cond == Assembler::EQ)
12403       __ cbz($op1$$Register, *L);
12404     else
12405       __ cbnz($op1$$Register, *L);
12406   %}
12407   ins_pipe(pipe_cmp_branch);
12408 %}
12409 
12410 instruct cmpP_imm0_branch(cmpOp cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
12411   match(If cmp (CmpP op1 op2));
12412   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
12413             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
12414   effect(USE labl);
12415 
12416   ins_cost(BRANCH_COST);
12417   format %{ "cb$cmp   $op1, $labl" %}
12418   ins_encode %{
12419     Label* L = $labl$$label;
12420     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
12421     if (cond == Assembler::EQ)
12422       __ cbz($op1$$Register, *L);
12423     else
12424       __ cbnz($op1$$Register, *L);
12425   %}
12426   ins_pipe(pipe_cmp_branch);
12427 %}
12428 
12429 // Conditional Far Branch
12430 // Conditional Far Branch Unsigned
12431 // TODO: fixme
12432 
12433 // counted loop end branch near
12434 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
12435 %{
12436   match(CountedLoopEnd cmp cr);
12437 
12438   effect(USE lbl);
12439 
12440   ins_cost(BRANCH_COST);
12441   // short variant.
12442   // ins_short_branch(1);
12443   format %{ "b$cmp $lbl \t// counted loop end" %}
12444 
12445   ins_encode(aarch64_enc_br_con(cmp, lbl));
12446 
12447   ins_pipe(pipe_branch);
12448 %}
12449 
12450 // counted loop end branch near Unsigned
12451 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
12452 %{
12453   match(CountedLoopEnd cmp cr);
12454 
12455   effect(USE lbl);
12456 
12457   ins_cost(BRANCH_COST);
12458   // short variant.
12459   // ins_short_branch(1);
12460   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
12461 
12462   ins_encode(aarch64_enc_br_conU(cmp, lbl));
12463 
12464   ins_pipe(pipe_branch);
12465 %}
12466 
12467 // counted loop end branch far
12468 // counted loop end branch far unsigned
12469 // TODO: fixme
12470 
12471 // ============================================================================
12472 // inlined locking and unlocking
12473 
12474 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
12475 %{
12476   match(Set cr (FastLock object box));
12477   effect(TEMP tmp, TEMP tmp2);
12478 
12479   // TODO
12480   // identify correct cost
12481   ins_cost(5 * INSN_COST);
12482   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
12483 
12484   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
12485 
12486   ins_pipe(pipe_serial);
12487 %}
12488 
12489 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
12490 %{
12491   match(Set cr (FastUnlock object box));
12492   effect(TEMP tmp, TEMP tmp2);
12493 
12494   ins_cost(5 * INSN_COST);
12495   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
12496 
12497   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
12498 
12499   ins_pipe(pipe_serial);
12500 %}
12501 
12502 
12503 // ============================================================================
12504 // Safepoint Instructions
12505 
12506 // TODO
12507 // provide a near and far version of this code
12508 
12509 instruct safePoint(iRegP poll)
12510 %{
12511   match(SafePoint poll);
12512 
12513   format %{
12514     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
12515   %}
12516   ins_encode %{
12517     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
12518   %}
12519   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
12520 %}
12521 
12522 
12523 // ============================================================================
12524 // Procedure Call/Return Instructions
12525 
12526 // Call Java Static Instruction
12527 
12528 instruct CallStaticJavaDirect(method meth)
12529 %{
12530   match(CallStaticJava);
12531 
12532   effect(USE meth);
12533 
12534   predicate(!((CallStaticJavaNode*)n)->is_method_handle_invoke());
12535 
12536   ins_cost(CALL_COST);
12537 
12538   format %{ "call,static $meth \t// ==> " %}
12539 
12540   ins_encode( aarch64_enc_java_static_call(meth),
12541               aarch64_enc_call_epilog );
12542 
12543   ins_pipe(pipe_class_call);
12544 %}
12545 
12546 // TO HERE
12547 
12548 // Call Java Static Instruction (method handle version)
12549 
12550 instruct CallStaticJavaDirectHandle(method meth, iRegP_FP reg_mh_save)
12551 %{
12552   match(CallStaticJava);
12553 
12554   effect(USE meth);
12555 
12556   predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
12557 
12558   ins_cost(CALL_COST);
12559 
12560   format %{ "call,static $meth \t// (methodhandle) ==> " %}
12561 
12562   ins_encode( aarch64_enc_java_handle_call(meth),
12563               aarch64_enc_call_epilog );
12564 
12565   ins_pipe(pipe_class_call);
12566 %}
12567 
12568 // Call Java Dynamic Instruction
12569 instruct CallDynamicJavaDirect(method meth)
12570 %{
12571   match(CallDynamicJava);
12572 
12573   effect(USE meth);
12574 
12575   ins_cost(CALL_COST);
12576 
12577   format %{ "CALL,dynamic $meth \t// ==> " %}
12578 
12579   ins_encode( aarch64_enc_java_dynamic_call(meth),
12580                aarch64_enc_call_epilog );
12581 
12582   ins_pipe(pipe_class_call);
12583 %}
12584 
12585 // Call Runtime Instruction
12586 
12587 instruct CallRuntimeDirect(method meth)
12588 %{
12589   match(CallRuntime);
12590 
12591   effect(USE meth);
12592 
12593   ins_cost(CALL_COST);
12594 
12595   format %{ "CALL, runtime $meth" %}
12596 
12597   ins_encode( aarch64_enc_java_to_runtime(meth) );
12598 
12599   ins_pipe(pipe_class_call);
12600 %}
12601 
12602 // Call Runtime Instruction
12603 
12604 instruct CallLeafDirect(method meth)
12605 %{
12606   match(CallLeaf);
12607 
12608   effect(USE meth);
12609 
12610   ins_cost(CALL_COST);
12611 
12612   format %{ "CALL, runtime leaf $meth" %}
12613 
12614   ins_encode( aarch64_enc_java_to_runtime(meth) );
12615 
12616   ins_pipe(pipe_class_call);
12617 %}
12618 
12619 // Call Runtime Instruction
12620 
12621 instruct CallLeafNoFPDirect(method meth)
12622 %{
12623   match(CallLeafNoFP);
12624 
12625   effect(USE meth);
12626 
12627   ins_cost(CALL_COST);
12628 
12629   format %{ "CALL, runtime leaf nofp $meth" %}
12630 
12631   ins_encode( aarch64_enc_java_to_runtime(meth) );
12632 
12633   ins_pipe(pipe_class_call);
12634 %}
12635 
12636 // Tail Call; Jump from runtime stub to Java code.
12637 // Also known as an 'interprocedural jump'.
12638 // Target of jump will eventually return to caller.
12639 // TailJump below removes the return address.
12640 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
12641 %{
12642   match(TailCall jump_target method_oop);
12643 
12644   ins_cost(CALL_COST);
12645 
12646   format %{ "br $jump_target\t# $method_oop holds method oop" %}
12647 
12648   ins_encode(aarch64_enc_tail_call(jump_target));
12649 
12650   ins_pipe(pipe_class_call);
12651 %}
12652 
12653 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
12654 %{
12655   match(TailJump jump_target ex_oop);
12656 
12657   ins_cost(CALL_COST);
12658 
12659   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
12660 
12661   ins_encode(aarch64_enc_tail_jmp(jump_target));
12662 
12663   ins_pipe(pipe_class_call);
12664 %}
12665 
12666 // Create exception oop: created by stack-crawling runtime code.
12667 // Created exception is now available to this handler, and is setup
12668 // just prior to jumping to this handler. No code emitted.
12669 // TODO check
12670 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
12671 instruct CreateException(iRegP_R0 ex_oop)
12672 %{
12673   match(Set ex_oop (CreateEx));
12674 
12675   format %{ " -- \t// exception oop; no code emitted" %}
12676 
12677   size(0);
12678 
12679   ins_encode( /*empty*/ );
12680 
12681   ins_pipe(pipe_class_empty);
12682 %}
12683 
12684 // Rethrow exception: The exception oop will come in the first
12685 // argument position. Then JUMP (not call) to the rethrow stub code.
12686 instruct RethrowException() %{
12687   match(Rethrow);
12688   ins_cost(CALL_COST);
12689 
12690   format %{ "b rethrow_stub" %}
12691 
12692   ins_encode( aarch64_enc_rethrow() );
12693 
12694   ins_pipe(pipe_class_call);
12695 %}
12696 
12697 
12698 // Return Instruction
12699 // epilog node loads ret address into lr as part of frame pop
12700 instruct Ret()
12701 %{
12702   match(Return);
12703 
12704   format %{ "ret\t// return register" %}
12705 
12706   ins_encode( aarch64_enc_ret() );
12707 
12708   ins_pipe(pipe_branch);
12709 %}
12710 
12711 // Die now.
12712 instruct ShouldNotReachHere() %{
12713   match(Halt);
12714 
12715   ins_cost(CALL_COST);
12716   format %{ "ShouldNotReachHere" %}
12717 
12718   ins_encode %{
12719     // TODO
12720     // implement proper trap call here
12721     __ brk(999);
12722   %}
12723 
12724   ins_pipe(pipe_class_default);
12725 %}
12726 
12727 // ============================================================================
12728 // Partial Subtype Check
12729 //
12730 // superklass array for an instance of the superklass.  Set a hidden
12731 // internal cache on a hit (cache is checked with exposed code in
12732 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
12733 // encoding ALSO sets flags.
12734 
12735 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
12736 %{
12737   match(Set result (PartialSubtypeCheck sub super));
12738   effect(KILL cr, KILL temp);
12739 
12740   ins_cost(1100);  // slightly larger than the next version
12741   format %{ "partialSubtypeCheck $result, $sub, $super" %}
12742 
12743   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
12744 
12745   opcode(0x1); // Force zero of result reg on hit
12746 
12747   ins_pipe(pipe_class_memory);
12748 %}
12749 
12750 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
12751 %{
12752   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
12753   effect(KILL temp, KILL result);
12754 
12755   ins_cost(1100);  // slightly larger than the next version
12756   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
12757 
12758   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
12759 
12760   opcode(0x0); // Don't zero result reg on hit
12761 
12762   ins_pipe(pipe_class_memory);
12763 %}
12764 
12765 instruct string_compare(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
12766                         iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
12767 %{
12768   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
12769   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
12770 
12771   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
12772   ins_encode %{
12773     __ string_compare($str1$$Register, $str2$$Register,
12774                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
12775                       $tmp1$$Register);
12776   %}
12777   ins_pipe(pipe_class_memory);
12778 %}
12779 
12780 instruct string_indexof(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
12781        iRegI_R0 result, iRegI tmp1, iRegI tmp2, iRegI tmp3, iRegI tmp4, rFlagsReg cr)
12782 %{
12783   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
12784   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
12785          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
12786   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result" %}
12787 
12788   ins_encode %{
12789     __ string_indexof($str1$$Register, $str2$$Register,
12790                       $cnt1$$Register, $cnt2$$Register,
12791                       $tmp1$$Register, $tmp2$$Register,
12792                       $tmp3$$Register, $tmp4$$Register,
12793                       -1, $result$$Register);
12794   %}
12795   ins_pipe(pipe_class_memory);
12796 %}
12797 
12798 instruct string_indexof_con(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
12799                  immI_le_4 int_cnt2, iRegI_R0 result, iRegI tmp1, iRegI tmp2,
12800                  iRegI tmp3, iRegI tmp4, rFlagsReg cr)
12801 %{
12802   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
12803   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
12804          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
12805   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result" %}
12806 
12807   ins_encode %{
12808     int icnt2 = (int)$int_cnt2$$constant;
12809     __ string_indexof($str1$$Register, $str2$$Register,
12810                       $cnt1$$Register, zr,
12811                       $tmp1$$Register, $tmp2$$Register,
12812                       $tmp3$$Register, $tmp4$$Register,
12813                       icnt2, $result$$Register);
12814   %}
12815   ins_pipe(pipe_class_memory);
12816 %}
12817 
12818 instruct string_equals(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
12819                         iRegI_R0 result, iRegP_R10 tmp, rFlagsReg cr)
12820 %{
12821   match(Set result (StrEquals (Binary str1 str2) cnt));
12822   effect(KILL tmp, USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
12823 
12824   format %{ "String Equals $str1,$str2,$cnt -> $result    // KILL $tmp" %}
12825   ins_encode %{
12826     __ string_equals($str1$$Register, $str2$$Register,
12827                       $cnt$$Register, $result$$Register,
12828                       $tmp$$Register);
12829   %}
12830   ins_pipe(pipe_class_memory);
12831 %}
12832 
12833 instruct array_equals(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
12834                       iRegP_R10 tmp, rFlagsReg cr)
12835 %{
12836   match(Set result (AryEq ary1 ary2));
12837   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
12838 
12839   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
12840   ins_encode %{
12841     __ char_arrays_equals($ary1$$Register, $ary2$$Register,
12842                           $result$$Register, $tmp$$Register);
12843   %}
12844   ins_pipe(pipe_class_memory);
12845 %}
12846 
12847 // encode char[] to byte[] in ISO_8859_1
12848 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
12849                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
12850                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
12851                           iRegI_R0 result, rFlagsReg cr)
12852 %{
12853   match(Set result (EncodeISOArray src (Binary dst len)));
12854   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
12855          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
12856 
12857   format %{ "Encode array $src,$dst,$len -> $result" %}
12858   ins_encode %{
12859     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
12860          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
12861          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
12862   %}
12863   ins_pipe( pipe_class_memory );
12864 %}
12865 
12866 // ============================================================================
12867 // This name is KNOWN by the ADLC and cannot be changed.
12868 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
12869 // for this guy.
12870 instruct tlsLoadP(thread_RegP dst)
12871 %{
12872   match(Set dst (ThreadLocal));
12873 
12874   ins_cost(0);
12875 
12876   format %{ " -- \t// $dst=Thread::current(), empty" %}
12877 
12878   size(0);
12879 
12880   ins_encode( /*empty*/ );
12881 
12882   ins_pipe(pipe_class_empty);
12883 %}
12884 
12885 
12886 
12887 //----------PEEPHOLE RULES-----------------------------------------------------
12888 // These must follow all instruction definitions as they use the names
12889 // defined in the instructions definitions.
12890 //
12891 // peepmatch ( root_instr_name [preceding_instruction]* );
12892 //
12893 // peepconstraint %{
12894 // (instruction_number.operand_name relational_op instruction_number.operand_name
12895 //  [, ...] );
12896 // // instruction numbers are zero-based using left to right order in peepmatch
12897 //
12898 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
12899 // // provide an instruction_number.operand_name for each operand that appears
12900 // // in the replacement instruction's match rule
12901 //
12902 // ---------VM FLAGS---------------------------------------------------------
12903 //
12904 // All peephole optimizations can be turned off using -XX:-OptoPeephole
12905 //
12906 // Each peephole rule is given an identifying number starting with zero and
12907 // increasing by one in the order seen by the parser.  An individual peephole
12908 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
12909 // on the command-line.
12910 //
12911 // ---------CURRENT LIMITATIONS----------------------------------------------
12912 //
12913 // Only match adjacent instructions in same basic block
12914 // Only equality constraints
12915 // Only constraints between operands, not (0.dest_reg == RAX_enc)
12916 // Only one replacement instruction
12917 //
12918 // ---------EXAMPLE----------------------------------------------------------
12919 //
12920 // // pertinent parts of existing instructions in architecture description
12921 // instruct movI(iRegINoSp dst, iRegI src)
12922 // %{
12923 //   match(Set dst (CopyI src));
12924 // %}
12925 //
12926 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
12927 // %{
12928 //   match(Set dst (AddI dst src));
12929 //   effect(KILL cr);
12930 // %}
12931 //
12932 // // Change (inc mov) to lea
12933 // peephole %{
12934 //   // increment preceeded by register-register move
12935 //   peepmatch ( incI_iReg movI );
12936 //   // require that the destination register of the increment
12937 //   // match the destination register of the move
12938 //   peepconstraint ( 0.dst == 1.dst );
12939 //   // construct a replacement instruction that sets
12940 //   // the destination to ( move's source register + one )
12941 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
12942 // %}
12943 //
12944 
12945 // Implementation no longer uses movX instructions since
12946 // machine-independent system no longer uses CopyX nodes.
12947 //
12948 // peephole
12949 // %{
12950 //   peepmatch (incI_iReg movI);
12951 //   peepconstraint (0.dst == 1.dst);
12952 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
12953 // %}
12954 
12955 // peephole
12956 // %{
12957 //   peepmatch (decI_iReg movI);
12958 //   peepconstraint (0.dst == 1.dst);
12959 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
12960 // %}
12961 
12962 // peephole
12963 // %{
12964 //   peepmatch (addI_iReg_imm movI);
12965 //   peepconstraint (0.dst == 1.dst);
12966 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
12967 // %}
12968 
12969 // peephole
12970 // %{
12971 //   peepmatch (incL_iReg movL);
12972 //   peepconstraint (0.dst == 1.dst);
12973 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
12974 // %}
12975 
12976 // peephole
12977 // %{
12978 //   peepmatch (decL_iReg movL);
12979 //   peepconstraint (0.dst == 1.dst);
12980 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
12981 // %}
12982 
12983 // peephole
12984 // %{
12985 //   peepmatch (addL_iReg_imm movL);
12986 //   peepconstraint (0.dst == 1.dst);
12987 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
12988 // %}
12989 
12990 // peephole
12991 // %{
12992 //   peepmatch (addP_iReg_imm movP);
12993 //   peepconstraint (0.dst == 1.dst);
12994 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
12995 // %}
12996 
12997 // // Change load of spilled value to only a spill
12998 // instruct storeI(memory mem, iRegI src)
12999 // %{
13000 //   match(Set mem (StoreI mem src));
13001 // %}
13002 //
13003 // instruct loadI(iRegINoSp dst, memory mem)
13004 // %{
13005 //   match(Set dst (LoadI mem));
13006 // %}
13007 //
13008 
13009 //----------SMARTSPILL RULES---------------------------------------------------
13010 // These must follow all instruction definitions as they use the names
13011 // defined in the instructions definitions.
13012 
13013 // Local Variables:
13014 // mode: c++
13015 // End: