1 //
    2 // Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
    3 // Copyright (c) 2014, 2020, Red Hat, Inc. All rights reserved.
    4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    5 //
    6 // This code is free software; you can redistribute it and/or modify it
    7 // under the terms of the GNU General Public License version 2 only, as
    8 // published by the Free Software Foundation.
    9 //
   10 // This code is distributed in the hope that it will be useful, but WITHOUT
   11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   13 // version 2 for more details (a copy is included in the LICENSE file that
   14 // accompanied this code).
   15 //
   16 // You should have received a copy of the GNU General Public License version
   17 // 2 along with this work; if not, write to the Free Software Foundation,
   18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   19 //
   20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   21 // or visit www.oracle.com if you need additional information or have any
   22 // questions.
   23 //
   24 //
   25 
   26 // AArch64 Architecture Description File
   27 
   28 //----------REGISTER DEFINITION BLOCK------------------------------------------
   29 // This information is used by the matcher and the register allocator to
   30 // describe individual registers and classes of registers within the target
   31 // archtecture.
   32 
   33 register %{
   34 //----------Architecture Description Register Definitions----------------------
   35 // General Registers
   36 // "reg_def"  name ( register save type, C convention save type,
   37 //                   ideal register type, encoding );
   38 // Register Save Types:
   39 //
   40 // NS  = No-Save:       The register allocator assumes that these registers
   41 //                      can be used without saving upon entry to the method, &
   42 //                      that they do not need to be saved at call sites.
   43 //
   44 // SOC = Save-On-Call:  The register allocator assumes that these registers
   45 //                      can be used without saving upon entry to the method,
   46 //                      but that they must be saved at call sites.
   47 //
   48 // SOE = Save-On-Entry: The register allocator assumes that these registers
   49 //                      must be saved before using them upon entry to the
   50 //                      method, but they do not need to be saved at call
   51 //                      sites.
   52 //
   53 // AS  = Always-Save:   The register allocator assumes that these registers
   54 //                      must be saved before using them upon entry to the
   55 //                      method, & that they must be saved at call sites.
   56 //
   57 // Ideal Register Type is used to determine how to save & restore a
   58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
   59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
   60 //
   61 // The encoding number is the actual bit-pattern placed into the opcodes.
   62 
   63 // We must define the 64 bit int registers in two 32 bit halves, the
   64 // real lower register and a virtual upper half register. upper halves
   65 // are used by the register allocator but are not actually supplied as
   66 // operands to memory ops.
   67 //
   68 // follow the C1 compiler in making registers
   69 //
   70 //   r0-r7,r10-r26 volatile (caller save)
   71 //   r27-r32 system (no save, no allocate)
   72 //   r8-r9 non-allocatable (so we can use them as scratch regs)
   73 //
   74 // as regards Java usage. we don't use any callee save registers
   75 // because this makes it difficult to de-optimise a frame (see comment
   76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
   77 //
   78 
   79 // General Registers
   80 
   81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
   82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
   83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
   84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
   85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
   86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
   87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
   88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
   89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
   90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
   91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
   92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
   93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
   94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
   95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
   96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
   97 reg_def R8      ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()         ); // rscratch1, non-allocatable
   98 reg_def R8_H    ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()->next() );
   99 reg_def R9      ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()         ); // rscratch2, non-allocatable
  100 reg_def R9_H    ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()->next() );
  101 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  102 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  103 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
  104 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
  105 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
  106 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
  107 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
  108 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
  109 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
  110 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
  111 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
  112 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
  113 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
  114 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
  115 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
  116 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
  117 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
  118 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
  119 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
  120 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
  121 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
  122 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
  123 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
  124 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
  125 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
  126 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
  127 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
  128 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
  129 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
  130 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
  131 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
  132 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
  133 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
  134 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
  135 reg_def R27     ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
  136 reg_def R27_H   ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()->next());
  137 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
  138 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
  139 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
  140 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
  141 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
  142 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
  143 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
  144 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
  145 
  146 // ----------------------------
  147 // Float/Double/Vector Registers
  148 // ----------------------------
  149 
  150 // Double Registers
  151 
  152 // The rules of ADL require that double registers be defined in pairs.
  153 // Each pair must be two 32-bit values, but not necessarily a pair of
  154 // single float registers. In each pair, ADLC-assigned register numbers
  155 // must be adjacent, with the lower number even. Finally, when the
  156 // CPU stores such a register pair to memory, the word associated with
  157 // the lower ADLC-assigned number must be stored to the lower address.
  158 
  159 // AArch64 has 32 floating-point registers. Each can store a vector of
  160 // single or double precision floating-point values up to 8 * 32
  161 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
  162 // use the first float or double element of the vector.
  163 
  164 // for Java use float registers v0-v15 are always save on call whereas
  165 // the platform ABI treats v8-v15 as callee save). float registers
  166 // v16-v31 are SOC as per the platform spec
  167 
  168 // For SVE vector registers, we simply extend vector register size to 8
  169 // slots. A vector register with lower 4 slots, denotes a 128-bit vector
  170 // NEON vector register. While a vector register with whole 8 slots,
  171 // indicating an SVE scalable vector register with vector size >= 128
  172 // bits (128 ~ 2048 bits, multiple of 128 bits). A 128-bit SVE vector
  173 // register also has 8 slots, but the the actual size is 128 bits, the
  174 // same as a NEON vector register. Since during JIT compilation, the
  175 // real SVE vector register size can be detected, so register allocator
  176 // is able to do the right thing with the real register size, e.g. for
  177 // spilling/unspilling.
  178 
  179   reg_def V0   ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()          );
  180   reg_def V0_H ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next()  );
  181   reg_def V0_J ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(2) );
  182   reg_def V0_K ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(3) );
  183   reg_def V0_L ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(4) );
  184   reg_def V0_M ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(5) );
  185   reg_def V0_N ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(6) );
  186   reg_def V0_O ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(7) );
  187 
  188   reg_def V1   ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()          );
  189   reg_def V1_H ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next()  );
  190   reg_def V1_J ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(2) );
  191   reg_def V1_K ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(3) );
  192   reg_def V1_L ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(4) );
  193   reg_def V1_M ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(5) );
  194   reg_def V1_N ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(6) );
  195   reg_def V1_O ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(7) );
  196 
  197   reg_def V2   ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()          );
  198   reg_def V2_H ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next()  );
  199   reg_def V2_J ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(2) );
  200   reg_def V2_K ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(3) );
  201   reg_def V2_L ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(4) );
  202   reg_def V2_M ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(5) );
  203   reg_def V2_N ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(6) );
  204   reg_def V2_O ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(7) );
  205 
  206   reg_def V3   ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()          );
  207   reg_def V3_H ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next()  );
  208   reg_def V3_J ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(2) );
  209   reg_def V3_K ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(3) );
  210   reg_def V3_L ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(4) );
  211   reg_def V3_M ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(5) );
  212   reg_def V3_N ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(6) );
  213   reg_def V3_O ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(7) );
  214 
  215   reg_def V4   ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()          );
  216   reg_def V4_H ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next()  );
  217   reg_def V4_J ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(2) );
  218   reg_def V4_K ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(3) );
  219   reg_def V4_L ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(4) );
  220   reg_def V4_M ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(5) );
  221   reg_def V4_N ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(6) );
  222   reg_def V4_O ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(7) );
  223 
  224   reg_def V5   ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()          );
  225   reg_def V5_H ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next()  );
  226   reg_def V5_J ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(2) );
  227   reg_def V5_K ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(3) );
  228   reg_def V5_L ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(4) );
  229   reg_def V5_M ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(5) );
  230   reg_def V5_N ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(6) );
  231   reg_def V5_O ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(7) );
  232 
  233   reg_def V6   ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()          );
  234   reg_def V6_H ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next()  );
  235   reg_def V6_J ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(2) );
  236   reg_def V6_K ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(3) );
  237   reg_def V6_L ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(4) );
  238   reg_def V6_M ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(5) );
  239   reg_def V6_N ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(6) );
  240   reg_def V6_O ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(7) );
  241 
  242   reg_def V7   ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()          );
  243   reg_def V7_H ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next()  );
  244   reg_def V7_J ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(2) );
  245   reg_def V7_K ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(3) );
  246   reg_def V7_L ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(4) );
  247   reg_def V7_M ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(5) );
  248   reg_def V7_N ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(6) );
  249   reg_def V7_O ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(7) );
  250 
  251   reg_def V8   ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()          );
  252   reg_def V8_H ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next()  );
  253   reg_def V8_J ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(2) );
  254   reg_def V8_K ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(3) );
  255   reg_def V8_L ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(4) );
  256   reg_def V8_M ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(5) );
  257   reg_def V8_N ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(6) );
  258   reg_def V8_O ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(7) );
  259 
  260   reg_def V9   ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()          );
  261   reg_def V9_H ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next()  );
  262   reg_def V9_J ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(2) );
  263   reg_def V9_K ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(3) );
  264   reg_def V9_L ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(4) );
  265   reg_def V9_M ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(5) );
  266   reg_def V9_N ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(6) );
  267   reg_def V9_O ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(7) );
  268 
  269   reg_def V10   ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()          );
  270   reg_def V10_H ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next()  );
  271   reg_def V10_J ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2) );
  272   reg_def V10_K ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3) );
  273   reg_def V10_L ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(4) );
  274   reg_def V10_M ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(5) );
  275   reg_def V10_N ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(6) );
  276   reg_def V10_O ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(7) );
  277 
  278   reg_def V11   ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()          );
  279   reg_def V11_H ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next()  );
  280   reg_def V11_J ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2) );
  281   reg_def V11_K ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3) );
  282   reg_def V11_L ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(4) );
  283   reg_def V11_M ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(5) );
  284   reg_def V11_N ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(6) );
  285   reg_def V11_O ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(7) );
  286 
  287   reg_def V12   ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()          );
  288   reg_def V12_H ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next()  );
  289   reg_def V12_J ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2) );
  290   reg_def V12_K ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3) );
  291   reg_def V12_L ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(4) );
  292   reg_def V12_M ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(5) );
  293   reg_def V12_N ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(6) );
  294   reg_def V12_O ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(7) );
  295 
  296   reg_def V13   ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()          );
  297   reg_def V13_H ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next()  );
  298   reg_def V13_J ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2) );
  299   reg_def V13_K ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3) );
  300   reg_def V13_L ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(4) );
  301   reg_def V13_M ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(5) );
  302   reg_def V13_N ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(6) );
  303   reg_def V13_O ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(7) );
  304 
  305   reg_def V14   ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()          );
  306   reg_def V14_H ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next()  );
  307   reg_def V14_J ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2) );
  308   reg_def V14_K ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3) );
  309   reg_def V14_L ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(4) );
  310   reg_def V14_M ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(5) );
  311   reg_def V14_N ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(6) );
  312   reg_def V14_O ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(7) );
  313 
  314   reg_def V15   ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()          );
  315   reg_def V15_H ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next()  );
  316   reg_def V15_J ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2) );
  317   reg_def V15_K ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3) );
  318   reg_def V15_L ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(4) );
  319   reg_def V15_M ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(5) );
  320   reg_def V15_N ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(6) );
  321   reg_def V15_O ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(7) );
  322 
  323   reg_def V16   ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()          );
  324   reg_def V16_H ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next()  );
  325   reg_def V16_J ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2) );
  326   reg_def V16_K ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3) );
  327   reg_def V16_L ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(4) );
  328   reg_def V16_M ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(5) );
  329   reg_def V16_N ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(6) );
  330   reg_def V16_O ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(7) );
  331 
  332   reg_def V17   ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()          );
  333   reg_def V17_H ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next()  );
  334   reg_def V17_J ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2) );
  335   reg_def V17_K ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3) );
  336   reg_def V17_L ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(4) );
  337   reg_def V17_M ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(5) );
  338   reg_def V17_N ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(6) );
  339   reg_def V17_O ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(7) );
  340 
  341   reg_def V18   ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()          );
  342   reg_def V18_H ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next()  );
  343   reg_def V18_J ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2) );
  344   reg_def V18_K ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3) );
  345   reg_def V18_L ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(4) );
  346   reg_def V18_M ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(5) );
  347   reg_def V18_N ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(6) );
  348   reg_def V18_O ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(7) );
  349 
  350   reg_def V19   ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()          );
  351   reg_def V19_H ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next()  );
  352   reg_def V19_J ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2) );
  353   reg_def V19_K ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3) );
  354   reg_def V19_L ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(4) );
  355   reg_def V19_M ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(5) );
  356   reg_def V19_N ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(6) );
  357   reg_def V19_O ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(7) );
  358 
  359   reg_def V20   ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()          );
  360   reg_def V20_H ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next()  );
  361   reg_def V20_J ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2) );
  362   reg_def V20_K ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3) );
  363   reg_def V20_L ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(4) );
  364   reg_def V20_M ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(5) );
  365   reg_def V20_N ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(6) );
  366   reg_def V20_O ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(7) );
  367 
  368   reg_def V21   ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()          );
  369   reg_def V21_H ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next()  );
  370   reg_def V21_J ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2) );
  371   reg_def V21_K ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3) );
  372   reg_def V21_L ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(4) );
  373   reg_def V21_M ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(5) );
  374   reg_def V21_N ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(6) );
  375   reg_def V21_O ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(7) );
  376 
  377   reg_def V22   ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()          );
  378   reg_def V22_H ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next()  );
  379   reg_def V22_J ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2) );
  380   reg_def V22_K ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3) );
  381   reg_def V22_L ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(4) );
  382   reg_def V22_M ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(5) );
  383   reg_def V22_N ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(6) );
  384   reg_def V22_O ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(7) );
  385 
  386   reg_def V23   ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()          );
  387   reg_def V23_H ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next()  );
  388   reg_def V23_J ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2) );
  389   reg_def V23_K ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3) );
  390   reg_def V23_L ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(4) );
  391   reg_def V23_M ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(5) );
  392   reg_def V23_N ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(6) );
  393   reg_def V23_O ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(7) );
  394 
  395   reg_def V24   ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()          );
  396   reg_def V24_H ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next()  );
  397   reg_def V24_J ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2) );
  398   reg_def V24_K ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3) );
  399   reg_def V24_L ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(4) );
  400   reg_def V24_M ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(5) );
  401   reg_def V24_N ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(6) );
  402   reg_def V24_O ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(7) );
  403 
  404   reg_def V25   ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()          );
  405   reg_def V25_H ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next()  );
  406   reg_def V25_J ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2) );
  407   reg_def V25_K ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3) );
  408   reg_def V25_L ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(4) );
  409   reg_def V25_M ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(5) );
  410   reg_def V25_N ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(6) );
  411   reg_def V25_O ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(7) );
  412 
  413   reg_def V26   ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()          );
  414   reg_def V26_H ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next()  );
  415   reg_def V26_J ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2) );
  416   reg_def V26_K ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3) );
  417   reg_def V26_L ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(4) );
  418   reg_def V26_M ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(5) );
  419   reg_def V26_N ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(6) );
  420   reg_def V26_O ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(7) );
  421 
  422   reg_def V27   ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()          );
  423   reg_def V27_H ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next()  );
  424   reg_def V27_J ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2) );
  425   reg_def V27_K ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3) );
  426   reg_def V27_L ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(4) );
  427   reg_def V27_M ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(5) );
  428   reg_def V27_N ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(6) );
  429   reg_def V27_O ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(7) );
  430 
  431   reg_def V28   ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()          );
  432   reg_def V28_H ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next()  );
  433   reg_def V28_J ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2) );
  434   reg_def V28_K ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3) );
  435   reg_def V28_L ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(4) );
  436   reg_def V28_M ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(5) );
  437   reg_def V28_N ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(6) );
  438   reg_def V28_O ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(7) );
  439 
  440   reg_def V29   ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()          );
  441   reg_def V29_H ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next()  );
  442   reg_def V29_J ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2) );
  443   reg_def V29_K ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3) );
  444   reg_def V29_L ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(4) );
  445   reg_def V29_M ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(5) );
  446   reg_def V29_N ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(6) );
  447   reg_def V29_O ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(7) );
  448 
  449   reg_def V30   ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()          );
  450   reg_def V30_H ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next()  );
  451   reg_def V30_J ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2) );
  452   reg_def V30_K ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3) );
  453   reg_def V30_L ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(4) );
  454   reg_def V30_M ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(5) );
  455   reg_def V30_N ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(6) );
  456   reg_def V30_O ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(7) );
  457 
  458   reg_def V31   ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()          );
  459   reg_def V31_H ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next()  );
  460   reg_def V31_J ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2) );
  461   reg_def V31_K ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3) );
  462   reg_def V31_L ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(4) );
  463   reg_def V31_M ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(5) );
  464   reg_def V31_N ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(6) );
  465   reg_def V31_O ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(7) );
  466 
  467 
  468 // ----------------------------
  469 // SVE Predicate Registers
  470 // ----------------------------
  471   reg_def P0 (SOC, SOC, Op_RegVMask, 0, p0->as_VMReg());
  472   reg_def P1 (SOC, SOC, Op_RegVMask, 1, p1->as_VMReg());
  473   reg_def P2 (SOC, SOC, Op_RegVMask, 2, p2->as_VMReg());
  474   reg_def P3 (SOC, SOC, Op_RegVMask, 3, p3->as_VMReg());
  475   reg_def P4 (SOC, SOC, Op_RegVMask, 4, p4->as_VMReg());
  476   reg_def P5 (SOC, SOC, Op_RegVMask, 5, p5->as_VMReg());
  477   reg_def P6 (SOC, SOC, Op_RegVMask, 6, p6->as_VMReg());
  478   reg_def P7 (SOC, SOC, Op_RegVMask, 7, p7->as_VMReg());
  479   reg_def P8 (SOC, SOC, Op_RegVMask, 8, p8->as_VMReg());
  480   reg_def P9 (SOC, SOC, Op_RegVMask, 9, p9->as_VMReg());
  481   reg_def P10 (SOC, SOC, Op_RegVMask, 10, p10->as_VMReg());
  482   reg_def P11 (SOC, SOC, Op_RegVMask, 11, p11->as_VMReg());
  483   reg_def P12 (SOC, SOC, Op_RegVMask, 12, p12->as_VMReg());
  484   reg_def P13 (SOC, SOC, Op_RegVMask, 13, p13->as_VMReg());
  485   reg_def P14 (SOC, SOC, Op_RegVMask, 14, p14->as_VMReg());
  486   reg_def P15 (SOC, SOC, Op_RegVMask, 15, p15->as_VMReg());
  487 
  488 // ----------------------------
  489 // Special Registers
  490 // ----------------------------
  491 
  492 // the AArch64 CSPR status flag register is not directly acessible as
  493 // instruction operand. the FPSR status flag register is a system
  494 // register which can be written/read using MSR/MRS but again does not
  495 // appear as an operand (a code identifying the FSPR occurs as an
  496 // immediate value in the instruction).
  497 
  498 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
  499 
  500 // Specify priority of register selection within phases of register
  501 // allocation.  Highest priority is first.  A useful heuristic is to
  502 // give registers a low priority when they are required by machine
  503 // instructions, like EAX and EDX on I486, and choose no-save registers
  504 // before save-on-call, & save-on-call before save-on-entry.  Registers
  505 // which participate in fixed calling sequences should come last.
  506 // Registers which are used as pairs must fall on an even boundary.
  507 
  508 alloc_class chunk0(
  509     // volatiles
  510     R10, R10_H,
  511     R11, R11_H,
  512     R12, R12_H,
  513     R13, R13_H,
  514     R14, R14_H,
  515     R15, R15_H,
  516     R16, R16_H,
  517     R17, R17_H,
  518     R18, R18_H,
  519 
  520     // arg registers
  521     R0, R0_H,
  522     R1, R1_H,
  523     R2, R2_H,
  524     R3, R3_H,
  525     R4, R4_H,
  526     R5, R5_H,
  527     R6, R6_H,
  528     R7, R7_H,
  529 
  530     // non-volatiles
  531     R19, R19_H,
  532     R20, R20_H,
  533     R21, R21_H,
  534     R22, R22_H,
  535     R23, R23_H,
  536     R24, R24_H,
  537     R25, R25_H,
  538     R26, R26_H,
  539 
  540     // non-allocatable registers
  541 
  542     R27, R27_H, // heapbase
  543     R28, R28_H, // thread
  544     R29, R29_H, // fp
  545     R30, R30_H, // lr
  546     R31, R31_H, // sp
  547     R8, R8_H,   // rscratch1
  548     R9, R9_H,   // rscratch2
  549 );
  550 
  551 alloc_class chunk1(
  552 
  553     // no save
  554     V16, V16_H, V16_J, V16_K, V16_L, V16_M, V16_N, V16_O,
  555     V17, V17_H, V17_J, V17_K, V17_L, V17_M, V17_N, V17_O,
  556     V18, V18_H, V18_J, V18_K, V18_L, V18_M, V18_N, V18_O,
  557     V19, V19_H, V19_J, V19_K, V19_L, V19_M, V19_N, V19_O,
  558     V20, V20_H, V20_J, V20_K, V20_L, V20_M, V20_N, V20_O,
  559     V21, V21_H, V21_J, V21_K, V21_L, V21_M, V21_N, V21_O,
  560     V22, V22_H, V22_J, V22_K, V22_L, V22_M, V22_N, V22_O,
  561     V23, V23_H, V23_J, V23_K, V23_L, V23_M, V23_N, V23_O,
  562     V24, V24_H, V24_J, V24_K, V24_L, V24_M, V24_N, V24_O,
  563     V25, V25_H, V25_J, V25_K, V25_L, V25_M, V25_N, V25_O,
  564     V26, V26_H, V26_J, V26_K, V26_L, V26_M, V26_N, V26_O,
  565     V27, V27_H, V27_J, V27_K, V27_L, V27_M, V27_N, V27_O,
  566     V28, V28_H, V28_J, V28_K, V28_L, V28_M, V28_N, V28_O,
  567     V29, V29_H, V29_J, V29_K, V29_L, V29_M, V29_N, V29_O,
  568     V30, V30_H, V30_J, V30_K, V30_L, V30_M, V30_N, V30_O,
  569     V31, V31_H, V31_J, V31_K, V31_L, V31_M, V31_N, V31_O,
  570 
  571     // arg registers
  572     V0, V0_H, V0_J, V0_K, V0_L, V0_M, V0_N, V0_O,
  573     V1, V1_H, V1_J, V1_K, V1_L, V1_M, V1_N, V1_O,
  574     V2, V2_H, V2_J, V2_K, V2_L, V2_M, V2_N, V2_O,
  575     V3, V3_H, V3_J, V3_K, V3_L, V3_M, V3_N, V3_O,
  576     V4, V4_H, V4_J, V4_K, V4_L, V4_M, V4_N, V4_O,
  577     V5, V5_H, V5_J, V5_K, V5_L, V5_M, V5_N, V5_O,
  578     V6, V6_H, V6_J, V6_K, V6_L, V6_M, V6_N, V6_O,
  579     V7, V7_H, V7_J, V7_K, V7_L, V7_M, V7_N, V7_O,
  580 
  581     // non-volatiles
  582     V8, V8_H, V8_J, V8_K, V8_L, V8_M, V8_N, V8_O,
  583     V9, V9_H, V9_J, V9_K, V9_L, V9_M, V9_N, V9_O,
  584     V10, V10_H, V10_J, V10_K, V10_L, V10_M, V10_N, V10_O,
  585     V11, V11_H, V11_J, V11_K, V11_L, V11_M, V11_N, V11_O,
  586     V12, V12_H, V12_J, V12_K, V12_L, V12_M, V12_N, V12_O,
  587     V13, V13_H, V13_J, V13_K, V13_L, V13_M, V13_N, V13_O,
  588     V14, V14_H, V14_J, V14_K, V14_L, V14_M, V14_N, V14_O,
  589     V15, V15_H, V15_J, V15_K, V15_L, V15_M, V15_N, V15_O,
  590 );
  591 
  592 alloc_class chunk2 (
  593     P0,
  594     P1,
  595     P2,
  596     P3,
  597     P4,
  598     P5,
  599     P6,
  600     P7,
  601 
  602     P8,
  603     P9,
  604     P10,
  605     P11,
  606     P12,
  607     P13,
  608     P14,
  609     P15,
  610 );
  611 
  612 alloc_class chunk3(RFLAGS);
  613 
  614 //----------Architecture Description Register Classes--------------------------
  615 // Several register classes are automatically defined based upon information in
  616 // this architecture description.
  617 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
  618 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
  619 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
  620 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
  621 //
  622 
  623 // Class for all 32 bit general purpose registers
  624 reg_class all_reg32(
  625     R0,
  626     R1,
  627     R2,
  628     R3,
  629     R4,
  630     R5,
  631     R6,
  632     R7,
  633     R10,
  634     R11,
  635     R12,
  636     R13,
  637     R14,
  638     R15,
  639     R16,
  640     R17,
  641     R18,
  642     R19,
  643     R20,
  644     R21,
  645     R22,
  646     R23,
  647     R24,
  648     R25,
  649     R26,
  650     R27,
  651     R28,
  652     R29,
  653     R30,
  654     R31
  655 );
  656 
  657 
  658 // Class for all 32 bit integer registers (excluding SP which
  659 // will never be used as an integer register)
  660 reg_class any_reg32 %{
  661   return _ANY_REG32_mask;
  662 %}
  663 
  664 // Singleton class for R0 int register
  665 reg_class int_r0_reg(R0);
  666 
  667 // Singleton class for R2 int register
  668 reg_class int_r2_reg(R2);
  669 
  670 // Singleton class for R3 int register
  671 reg_class int_r3_reg(R3);
  672 
  673 // Singleton class for R4 int register
  674 reg_class int_r4_reg(R4);
  675 
  676 // Singleton class for R31 int register
  677 reg_class int_r31_reg(R31);
  678 
  679 // Class for all 64 bit general purpose registers
  680 reg_class all_reg(
  681     R0, R0_H,
  682     R1, R1_H,
  683     R2, R2_H,
  684     R3, R3_H,
  685     R4, R4_H,
  686     R5, R5_H,
  687     R6, R6_H,
  688     R7, R7_H,
  689     R10, R10_H,
  690     R11, R11_H,
  691     R12, R12_H,
  692     R13, R13_H,
  693     R14, R14_H,
  694     R15, R15_H,
  695     R16, R16_H,
  696     R17, R17_H,
  697     R18, R18_H,
  698     R19, R19_H,
  699     R20, R20_H,
  700     R21, R21_H,
  701     R22, R22_H,
  702     R23, R23_H,
  703     R24, R24_H,
  704     R25, R25_H,
  705     R26, R26_H,
  706     R27, R27_H,
  707     R28, R28_H,
  708     R29, R29_H,
  709     R30, R30_H,
  710     R31, R31_H
  711 );
  712 
  713 // Class for all long integer registers (including SP)
  714 reg_class any_reg %{
  715   return _ANY_REG_mask;
  716 %}
  717 
  718 // Class for non-allocatable 32 bit registers
  719 reg_class non_allocatable_reg32(
  720     R28,                        // thread
  721     R30,                        // lr
  722     R31                         // sp
  723 );
  724 
  725 // Class for non-allocatable 64 bit registers
  726 reg_class non_allocatable_reg(
  727     R28, R28_H,                 // thread
  728     R30, R30_H,                 // lr
  729     R31, R31_H                  // sp
  730 );
  731 
  732 // Class for all non-special integer registers
  733 reg_class no_special_reg32 %{
  734   return _NO_SPECIAL_REG32_mask;
  735 %}
  736 
  737 // Class for all non-special long integer registers
  738 reg_class no_special_reg %{
  739   return _NO_SPECIAL_REG_mask;
  740 %}
  741 
  742 // Class for 64 bit register r0
  743 reg_class r0_reg(
  744     R0, R0_H
  745 );
  746 
  747 // Class for 64 bit register r1
  748 reg_class r1_reg(
  749     R1, R1_H
  750 );
  751 
  752 // Class for 64 bit register r2
  753 reg_class r2_reg(
  754     R2, R2_H
  755 );
  756 
  757 // Class for 64 bit register r3
  758 reg_class r3_reg(
  759     R3, R3_H
  760 );
  761 
  762 // Class for 64 bit register r4
  763 reg_class r4_reg(
  764     R4, R4_H
  765 );
  766 
  767 // Class for 64 bit register r5
  768 reg_class r5_reg(
  769     R5, R5_H
  770 );
  771 
  772 // Class for 64 bit register r10
  773 reg_class r10_reg(
  774     R10, R10_H
  775 );
  776 
  777 // Class for 64 bit register r11
  778 reg_class r11_reg(
  779     R11, R11_H
  780 );
  781 
  782 // Class for method register
  783 reg_class method_reg(
  784     R12, R12_H
  785 );
  786 
  787 // Class for heapbase register
  788 reg_class heapbase_reg(
  789     R27, R27_H
  790 );
  791 
  792 // Class for thread register
  793 reg_class thread_reg(
  794     R28, R28_H
  795 );
  796 
  797 // Class for frame pointer register
  798 reg_class fp_reg(
  799     R29, R29_H
  800 );
  801 
  802 // Class for link register
  803 reg_class lr_reg(
  804     R30, R30_H
  805 );
  806 
  807 // Class for long sp register
  808 reg_class sp_reg(
  809   R31, R31_H
  810 );
  811 
  812 // Class for all pointer registers
  813 reg_class ptr_reg %{
  814   return _PTR_REG_mask;
  815 %}
  816 
  817 // Class for all non_special pointer registers
  818 reg_class no_special_ptr_reg %{
  819   return _NO_SPECIAL_PTR_REG_mask;
  820 %}
  821 
  822 // Class for all float registers
  823 reg_class float_reg(
  824     V0,
  825     V1,
  826     V2,
  827     V3,
  828     V4,
  829     V5,
  830     V6,
  831     V7,
  832     V8,
  833     V9,
  834     V10,
  835     V11,
  836     V12,
  837     V13,
  838     V14,
  839     V15,
  840     V16,
  841     V17,
  842     V18,
  843     V19,
  844     V20,
  845     V21,
  846     V22,
  847     V23,
  848     V24,
  849     V25,
  850     V26,
  851     V27,
  852     V28,
  853     V29,
  854     V30,
  855     V31
  856 );
  857 
  858 // Double precision float registers have virtual `high halves' that
  859 // are needed by the allocator.
  860 // Class for all double registers
  861 reg_class double_reg(
  862     V0, V0_H,
  863     V1, V1_H,
  864     V2, V2_H,
  865     V3, V3_H,
  866     V4, V4_H,
  867     V5, V5_H,
  868     V6, V6_H,
  869     V7, V7_H,
  870     V8, V8_H,
  871     V9, V9_H,
  872     V10, V10_H,
  873     V11, V11_H,
  874     V12, V12_H,
  875     V13, V13_H,
  876     V14, V14_H,
  877     V15, V15_H,
  878     V16, V16_H,
  879     V17, V17_H,
  880     V18, V18_H,
  881     V19, V19_H,
  882     V20, V20_H,
  883     V21, V21_H,
  884     V22, V22_H,
  885     V23, V23_H,
  886     V24, V24_H,
  887     V25, V25_H,
  888     V26, V26_H,
  889     V27, V27_H,
  890     V28, V28_H,
  891     V29, V29_H,
  892     V30, V30_H,
  893     V31, V31_H
  894 );
  895 
  896 // Class for all SVE vector registers.
  897 reg_class vectora_reg (
  898     V0, V0_H, V0_J, V0_K, V0_L, V0_M, V0_N, V0_O,
  899     V1, V1_H, V1_J, V1_K, V1_L, V1_M, V1_N, V1_O,
  900     V2, V2_H, V2_J, V2_K, V2_L, V2_M, V2_N, V2_O,
  901     V3, V3_H, V3_J, V3_K, V3_L, V3_M, V3_N, V3_O,
  902     V4, V4_H, V4_J, V4_K, V4_L, V4_M, V4_N, V4_O,
  903     V5, V5_H, V5_J, V5_K, V5_L, V5_M, V5_N, V5_O,
  904     V6, V6_H, V6_J, V6_K, V6_L, V6_M, V6_N, V6_O,
  905     V7, V7_H, V7_J, V7_K, V7_L, V7_M, V7_N, V7_O,
  906     V8, V8_H, V8_J, V8_K, V8_L, V8_M, V8_N, V8_O,
  907     V9, V9_H, V9_J, V9_K, V9_L, V9_M, V9_N, V9_O,
  908     V10, V10_H, V10_J, V10_K, V10_L, V10_M, V10_N, V10_O,
  909     V11, V11_H, V11_J, V11_K, V11_L, V11_M, V11_N, V11_O,
  910     V12, V12_H, V12_J, V12_K, V12_L, V12_M, V12_N, V12_O,
  911     V13, V13_H, V13_J, V13_K, V13_L, V13_M, V13_N, V13_O,
  912     V14, V14_H, V14_J, V14_K, V14_L, V14_M, V14_N, V14_O,
  913     V15, V15_H, V15_J, V15_K, V15_L, V15_M, V15_N, V15_O,
  914     V16, V16_H, V16_J, V16_K, V16_L, V16_M, V16_N, V16_O,
  915     V17, V17_H, V17_J, V17_K, V17_L, V17_M, V17_N, V17_O,
  916     V18, V18_H, V18_J, V18_K, V18_L, V18_M, V18_N, V18_O,
  917     V19, V19_H, V19_J, V19_K, V19_L, V19_M, V19_N, V19_O,
  918     V20, V20_H, V20_J, V20_K, V20_L, V20_M, V20_N, V20_O,
  919     V21, V21_H, V21_J, V21_K, V21_L, V21_M, V21_N, V21_O,
  920     V22, V22_H, V22_J, V22_K, V22_L, V22_M, V22_N, V22_O,
  921     V23, V23_H, V23_J, V23_K, V23_L, V23_M, V23_N, V23_O,
  922     V24, V24_H, V24_J, V24_K, V24_L, V24_M, V24_N, V24_O,
  923     V25, V25_H, V25_J, V25_K, V25_L, V25_M, V25_N, V25_O,
  924     V26, V26_H, V26_J, V26_K, V26_L, V26_M, V26_N, V26_O,
  925     V27, V27_H, V27_J, V27_K, V27_L, V27_M, V27_N, V27_O,
  926     V28, V28_H, V28_J, V28_K, V28_L, V28_M, V28_N, V28_O,
  927     V29, V29_H, V29_J, V29_K, V29_L, V29_M, V29_N, V29_O,
  928     V30, V30_H, V30_J, V30_K, V30_L, V30_M, V30_N, V30_O,
  929     V31, V31_H, V31_J, V31_K, V31_L, V31_M, V31_N, V31_O,
  930 );
  931 
  932 // Class for all 64bit vector registers
  933 reg_class vectord_reg(
  934     V0, V0_H,
  935     V1, V1_H,
  936     V2, V2_H,
  937     V3, V3_H,
  938     V4, V4_H,
  939     V5, V5_H,
  940     V6, V6_H,
  941     V7, V7_H,
  942     V8, V8_H,
  943     V9, V9_H,
  944     V10, V10_H,
  945     V11, V11_H,
  946     V12, V12_H,
  947     V13, V13_H,
  948     V14, V14_H,
  949     V15, V15_H,
  950     V16, V16_H,
  951     V17, V17_H,
  952     V18, V18_H,
  953     V19, V19_H,
  954     V20, V20_H,
  955     V21, V21_H,
  956     V22, V22_H,
  957     V23, V23_H,
  958     V24, V24_H,
  959     V25, V25_H,
  960     V26, V26_H,
  961     V27, V27_H,
  962     V28, V28_H,
  963     V29, V29_H,
  964     V30, V30_H,
  965     V31, V31_H
  966 );
  967 
  968 // Class for all 128bit vector registers
  969 reg_class vectorx_reg(
  970     V0, V0_H, V0_J, V0_K,
  971     V1, V1_H, V1_J, V1_K,
  972     V2, V2_H, V2_J, V2_K,
  973     V3, V3_H, V3_J, V3_K,
  974     V4, V4_H, V4_J, V4_K,
  975     V5, V5_H, V5_J, V5_K,
  976     V6, V6_H, V6_J, V6_K,
  977     V7, V7_H, V7_J, V7_K,
  978     V8, V8_H, V8_J, V8_K,
  979     V9, V9_H, V9_J, V9_K,
  980     V10, V10_H, V10_J, V10_K,
  981     V11, V11_H, V11_J, V11_K,
  982     V12, V12_H, V12_J, V12_K,
  983     V13, V13_H, V13_J, V13_K,
  984     V14, V14_H, V14_J, V14_K,
  985     V15, V15_H, V15_J, V15_K,
  986     V16, V16_H, V16_J, V16_K,
  987     V17, V17_H, V17_J, V17_K,
  988     V18, V18_H, V18_J, V18_K,
  989     V19, V19_H, V19_J, V19_K,
  990     V20, V20_H, V20_J, V20_K,
  991     V21, V21_H, V21_J, V21_K,
  992     V22, V22_H, V22_J, V22_K,
  993     V23, V23_H, V23_J, V23_K,
  994     V24, V24_H, V24_J, V24_K,
  995     V25, V25_H, V25_J, V25_K,
  996     V26, V26_H, V26_J, V26_K,
  997     V27, V27_H, V27_J, V27_K,
  998     V28, V28_H, V28_J, V28_K,
  999     V29, V29_H, V29_J, V29_K,
 1000     V30, V30_H, V30_J, V30_K,
 1001     V31, V31_H, V31_J, V31_K
 1002 );
 1003 
 1004 // Class for 128 bit register v0
 1005 reg_class v0_reg(
 1006     V0, V0_H
 1007 );
 1008 
 1009 // Class for 128 bit register v1
 1010 reg_class v1_reg(
 1011     V1, V1_H
 1012 );
 1013 
 1014 // Class for 128 bit register v2
 1015 reg_class v2_reg(
 1016     V2, V2_H
 1017 );
 1018 
 1019 // Class for 128 bit register v3
 1020 reg_class v3_reg(
 1021     V3, V3_H
 1022 );
 1023 
 1024 // Class for 128 bit register v4
 1025 reg_class v4_reg(
 1026     V4, V4_H
 1027 );
 1028 
 1029 // Class for 128 bit register v5
 1030 reg_class v5_reg(
 1031     V5, V5_H
 1032 );
 1033 
 1034 // Class for 128 bit register v6
 1035 reg_class v6_reg(
 1036     V6, V6_H
 1037 );
 1038 
 1039 // Class for 128 bit register v7
 1040 reg_class v7_reg(
 1041     V7, V7_H
 1042 );
 1043 
 1044 // Class for 128 bit register v8
 1045 reg_class v8_reg(
 1046     V8, V8_H
 1047 );
 1048 
 1049 // Class for 128 bit register v9
 1050 reg_class v9_reg(
 1051     V9, V9_H
 1052 );
 1053 
 1054 // Class for 128 bit register v10
 1055 reg_class v10_reg(
 1056     V10, V10_H
 1057 );
 1058 
 1059 // Class for 128 bit register v11
 1060 reg_class v11_reg(
 1061     V11, V11_H
 1062 );
 1063 
 1064 // Class for 128 bit register v12
 1065 reg_class v12_reg(
 1066     V12, V12_H
 1067 );
 1068 
 1069 // Class for 128 bit register v13
 1070 reg_class v13_reg(
 1071     V13, V13_H
 1072 );
 1073 
 1074 // Class for 128 bit register v14
 1075 reg_class v14_reg(
 1076     V14, V14_H
 1077 );
 1078 
 1079 // Class for 128 bit register v15
 1080 reg_class v15_reg(
 1081     V15, V15_H
 1082 );
 1083 
 1084 // Class for 128 bit register v16
 1085 reg_class v16_reg(
 1086     V16, V16_H
 1087 );
 1088 
 1089 // Class for 128 bit register v17
 1090 reg_class v17_reg(
 1091     V17, V17_H
 1092 );
 1093 
 1094 // Class for 128 bit register v18
 1095 reg_class v18_reg(
 1096     V18, V18_H
 1097 );
 1098 
 1099 // Class for 128 bit register v19
 1100 reg_class v19_reg(
 1101     V19, V19_H
 1102 );
 1103 
 1104 // Class for 128 bit register v20
 1105 reg_class v20_reg(
 1106     V20, V20_H
 1107 );
 1108 
 1109 // Class for 128 bit register v21
 1110 reg_class v21_reg(
 1111     V21, V21_H
 1112 );
 1113 
 1114 // Class for 128 bit register v22
 1115 reg_class v22_reg(
 1116     V22, V22_H
 1117 );
 1118 
 1119 // Class for 128 bit register v23
 1120 reg_class v23_reg(
 1121     V23, V23_H
 1122 );
 1123 
 1124 // Class for 128 bit register v24
 1125 reg_class v24_reg(
 1126     V24, V24_H
 1127 );
 1128 
 1129 // Class for 128 bit register v25
 1130 reg_class v25_reg(
 1131     V25, V25_H
 1132 );
 1133 
 1134 // Class for 128 bit register v26
 1135 reg_class v26_reg(
 1136     V26, V26_H
 1137 );
 1138 
 1139 // Class for 128 bit register v27
 1140 reg_class v27_reg(
 1141     V27, V27_H
 1142 );
 1143 
 1144 // Class for 128 bit register v28
 1145 reg_class v28_reg(
 1146     V28, V28_H
 1147 );
 1148 
 1149 // Class for 128 bit register v29
 1150 reg_class v29_reg(
 1151     V29, V29_H
 1152 );
 1153 
 1154 // Class for 128 bit register v30
 1155 reg_class v30_reg(
 1156     V30, V30_H
 1157 );
 1158 
 1159 // Class for 128 bit register v31
 1160 reg_class v31_reg(
 1161     V31, V31_H
 1162 );
 1163 
 1164 // Class for all SVE predicate registers.
 1165 reg_class pr_reg (
 1166     P0,
 1167     P1,
 1168     P2,
 1169     P3,
 1170     P4,
 1171     P5,
 1172     P6,
 1173     // P7, non-allocatable, preserved with all elements preset to TRUE.
 1174     P8,
 1175     P9,
 1176     P10,
 1177     P11,
 1178     P12,
 1179     P13,
 1180     P14,
 1181     P15
 1182 );
 1183 
 1184 // Class for SVE governing predicate registers, which are used
 1185 // to determine the active elements of a predicated instruction.
 1186 reg_class gov_pr (
 1187     P0,
 1188     P1,
 1189     P2,
 1190     P3,
 1191     P4,
 1192     P5,
 1193     P6,
 1194     // P7, non-allocatable, preserved with all elements preset to TRUE.
 1195 );
 1196 
 1197 // Singleton class for condition codes
 1198 reg_class int_flags(RFLAGS);
 1199 
 1200 %}
 1201 
 1202 //----------DEFINITION BLOCK---------------------------------------------------
 1203 // Define name --> value mappings to inform the ADLC of an integer valued name
 1204 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 1205 // Format:
 1206 //        int_def  <name>         ( <int_value>, <expression>);
 1207 // Generated Code in ad_<arch>.hpp
 1208 //        #define  <name>   (<expression>)
 1209 //        // value == <int_value>
 1210 // Generated code in ad_<arch>.cpp adlc_verification()
 1211 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 1212 //
 1213 
 1214 // we follow the ppc-aix port in using a simple cost model which ranks
 1215 // register operations as cheap, memory ops as more expensive and
 1216 // branches as most expensive. the first two have a low as well as a
 1217 // normal cost. huge cost appears to be a way of saying don't do
 1218 // something
 1219 
 1220 definitions %{
 1221   // The default cost (of a register move instruction).
 1222   int_def INSN_COST            (    100,     100);
 1223   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 1224   int_def CALL_COST            (    200,     2 * INSN_COST);
 1225   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 1226 %}
 1227 
 1228 
 1229 //----------SOURCE BLOCK-------------------------------------------------------
 1230 // This is a block of C++ code which provides values, functions, and
 1231 // definitions necessary in the rest of the architecture description
 1232 
 1233 source_hpp %{
 1234 
 1235 #include "asm/macroAssembler.hpp"
 1236 #include "gc/shared/cardTable.hpp"
 1237 #include "gc/shared/cardTableBarrierSet.hpp"
 1238 #include "gc/shared/collectedHeap.hpp"
 1239 #include "opto/addnode.hpp"
 1240 #include "opto/convertnode.hpp"
 1241 
 1242 extern RegMask _ANY_REG32_mask;
 1243 extern RegMask _ANY_REG_mask;
 1244 extern RegMask _PTR_REG_mask;
 1245 extern RegMask _NO_SPECIAL_REG32_mask;
 1246 extern RegMask _NO_SPECIAL_REG_mask;
 1247 extern RegMask _NO_SPECIAL_PTR_REG_mask;
 1248 
 1249 class CallStubImpl {
 1250 
 1251   //--------------------------------------------------------------
 1252   //---<  Used for optimization in Compile::shorten_branches  >---
 1253   //--------------------------------------------------------------
 1254 
 1255  public:
 1256   // Size of call trampoline stub.
 1257   static uint size_call_trampoline() {
 1258     return 0; // no call trampolines on this platform
 1259   }
 1260 
 1261   // number of relocations needed by a call trampoline stub
 1262   static uint reloc_call_trampoline() {
 1263     return 0; // no call trampolines on this platform
 1264   }
 1265 };
 1266 
 1267 class HandlerImpl {
 1268 
 1269  public:
 1270 
 1271   static int emit_exception_handler(CodeBuffer &cbuf);
 1272   static int emit_deopt_handler(CodeBuffer& cbuf);
 1273 
 1274   static uint size_exception_handler() {
 1275     return MacroAssembler::far_branch_size();
 1276   }
 1277 
 1278   static uint size_deopt_handler() {
 1279     // count one adr and one far branch instruction
 1280     return 4 * NativeInstruction::instruction_size;
 1281   }
 1282 };
 1283 
 1284 class Node::PD {
 1285 public:
 1286   enum NodeFlags {
 1287     _last_flag = Node::_last_flag
 1288   };
 1289 };
 1290 
 1291  bool is_CAS(int opcode, bool maybe_volatile);
 1292 
 1293   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
 1294 
 1295   bool unnecessary_acquire(const Node *barrier);
 1296   bool needs_acquiring_load(const Node *load);
 1297 
 1298   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
 1299 
 1300   bool unnecessary_release(const Node *barrier);
 1301   bool unnecessary_volatile(const Node *barrier);
 1302   bool needs_releasing_store(const Node *store);
 1303 
 1304   // predicate controlling translation of CompareAndSwapX
 1305   bool needs_acquiring_load_exclusive(const Node *load);
 1306 
 1307   // predicate controlling addressing modes
 1308   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
 1309 %}
 1310 
 1311 source %{
 1312 
 1313   // Derived RegMask with conditionally allocatable registers
 1314 
 1315   void PhaseOutput::pd_perform_mach_node_analysis() {
 1316   }
 1317 
 1318   int MachNode::pd_alignment_required() const {
 1319     return 1;
 1320   }
 1321 
 1322   int MachNode::compute_padding(int current_offset) const {
 1323     return 0;
 1324   }
 1325 
 1326   RegMask _ANY_REG32_mask;
 1327   RegMask _ANY_REG_mask;
 1328   RegMask _PTR_REG_mask;
 1329   RegMask _NO_SPECIAL_REG32_mask;
 1330   RegMask _NO_SPECIAL_REG_mask;
 1331   RegMask _NO_SPECIAL_PTR_REG_mask;
 1332 
 1333   void reg_mask_init() {
 1334     // We derive below RegMask(s) from the ones which are auto-generated from
 1335     // adlc register classes to make AArch64 rheapbase (r27) and rfp (r29)
 1336     // registers conditionally reserved.
 1337 
 1338     _ANY_REG32_mask = _ALL_REG32_mask;
 1339     _ANY_REG32_mask.Remove(OptoReg::as_OptoReg(r31_sp->as_VMReg()));
 1340 
 1341     _ANY_REG_mask = _ALL_REG_mask;
 1342 
 1343     _PTR_REG_mask = _ALL_REG_mask;
 1344 
 1345     _NO_SPECIAL_REG32_mask = _ALL_REG32_mask;
 1346     _NO_SPECIAL_REG32_mask.SUBTRACT(_NON_ALLOCATABLE_REG32_mask);
 1347 
 1348     _NO_SPECIAL_REG_mask = _ALL_REG_mask;
 1349     _NO_SPECIAL_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1350 
 1351     _NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask;
 1352     _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1353 
 1354     // r27 is not allocatable when compressed oops is on and heapbase is not
 1355     // zero, compressed klass pointers doesn't use r27 after JDK-8234794
 1356     if (UseCompressedOops && (CompressedOops::ptrs_base() != NULL || UseAOT)) {
 1357       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1358       _NO_SPECIAL_REG_mask.SUBTRACT(_HEAPBASE_REG_mask);
 1359       _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_HEAPBASE_REG_mask);
 1360     }
 1361 
 1362     // r29 is not allocatable when PreserveFramePointer is on
 1363     if (PreserveFramePointer) {
 1364       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1365       _NO_SPECIAL_REG_mask.SUBTRACT(_FP_REG_mask);
 1366       _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_FP_REG_mask);
 1367     }
 1368   }
 1369 
 1370   // Optimizaton of volatile gets and puts
 1371   // -------------------------------------
 1372   //
 1373   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
 1374   // use to implement volatile reads and writes. For a volatile read
 1375   // we simply need
 1376   //
 1377   //   ldar<x>
 1378   //
 1379   // and for a volatile write we need
 1380   //
 1381   //   stlr<x>
 1382   //
 1383   // Alternatively, we can implement them by pairing a normal
 1384   // load/store with a memory barrier. For a volatile read we need
 1385   //
 1386   //   ldr<x>
 1387   //   dmb ishld
 1388   //
 1389   // for a volatile write
 1390   //
 1391   //   dmb ish
 1392   //   str<x>
 1393   //   dmb ish
 1394   //
 1395   // We can also use ldaxr and stlxr to implement compare and swap CAS
 1396   // sequences. These are normally translated to an instruction
 1397   // sequence like the following
 1398   //
 1399   //   dmb      ish
 1400   // retry:
 1401   //   ldxr<x>   rval raddr
 1402   //   cmp       rval rold
 1403   //   b.ne done
 1404   //   stlxr<x>  rval, rnew, rold
 1405   //   cbnz      rval retry
 1406   // done:
 1407   //   cset      r0, eq
 1408   //   dmb ishld
 1409   //
 1410   // Note that the exclusive store is already using an stlxr
 1411   // instruction. That is required to ensure visibility to other
 1412   // threads of the exclusive write (assuming it succeeds) before that
 1413   // of any subsequent writes.
 1414   //
 1415   // The following instruction sequence is an improvement on the above
 1416   //
 1417   // retry:
 1418   //   ldaxr<x>  rval raddr
 1419   //   cmp       rval rold
 1420   //   b.ne done
 1421   //   stlxr<x>  rval, rnew, rold
 1422   //   cbnz      rval retry
 1423   // done:
 1424   //   cset      r0, eq
 1425   //
 1426   // We don't need the leading dmb ish since the stlxr guarantees
 1427   // visibility of prior writes in the case that the swap is
 1428   // successful. Crucially we don't have to worry about the case where
 1429   // the swap is not successful since no valid program should be
 1430   // relying on visibility of prior changes by the attempting thread
 1431   // in the case where the CAS fails.
 1432   //
 1433   // Similarly, we don't need the trailing dmb ishld if we substitute
 1434   // an ldaxr instruction since that will provide all the guarantees we
 1435   // require regarding observation of changes made by other threads
 1436   // before any change to the CAS address observed by the load.
 1437   //
 1438   // In order to generate the desired instruction sequence we need to
 1439   // be able to identify specific 'signature' ideal graph node
 1440   // sequences which i) occur as a translation of a volatile reads or
 1441   // writes or CAS operations and ii) do not occur through any other
 1442   // translation or graph transformation. We can then provide
 1443   // alternative aldc matching rules which translate these node
 1444   // sequences to the desired machine code sequences. Selection of the
 1445   // alternative rules can be implemented by predicates which identify
 1446   // the relevant node sequences.
 1447   //
 1448   // The ideal graph generator translates a volatile read to the node
 1449   // sequence
 1450   //
 1451   //   LoadX[mo_acquire]
 1452   //   MemBarAcquire
 1453   //
 1454   // As a special case when using the compressed oops optimization we
 1455   // may also see this variant
 1456   //
 1457   //   LoadN[mo_acquire]
 1458   //   DecodeN
 1459   //   MemBarAcquire
 1460   //
 1461   // A volatile write is translated to the node sequence
 1462   //
 1463   //   MemBarRelease
 1464   //   StoreX[mo_release] {CardMark}-optional
 1465   //   MemBarVolatile
 1466   //
 1467   // n.b. the above node patterns are generated with a strict
 1468   // 'signature' configuration of input and output dependencies (see
 1469   // the predicates below for exact details). The card mark may be as
 1470   // simple as a few extra nodes or, in a few GC configurations, may
 1471   // include more complex control flow between the leading and
 1472   // trailing memory barriers. However, whatever the card mark
 1473   // configuration these signatures are unique to translated volatile
 1474   // reads/stores -- they will not appear as a result of any other
 1475   // bytecode translation or inlining nor as a consequence of
 1476   // optimizing transforms.
 1477   //
 1478   // We also want to catch inlined unsafe volatile gets and puts and
 1479   // be able to implement them using either ldar<x>/stlr<x> or some
 1480   // combination of ldr<x>/stlr<x> and dmb instructions.
 1481   //
 1482   // Inlined unsafe volatiles puts manifest as a minor variant of the
 1483   // normal volatile put node sequence containing an extra cpuorder
 1484   // membar
 1485   //
 1486   //   MemBarRelease
 1487   //   MemBarCPUOrder
 1488   //   StoreX[mo_release] {CardMark}-optional
 1489   //   MemBarCPUOrder
 1490   //   MemBarVolatile
 1491   //
 1492   // n.b. as an aside, a cpuorder membar is not itself subject to
 1493   // matching and translation by adlc rules.  However, the rule
 1494   // predicates need to detect its presence in order to correctly
 1495   // select the desired adlc rules.
 1496   //
 1497   // Inlined unsafe volatile gets manifest as a slightly different
 1498   // node sequence to a normal volatile get because of the
 1499   // introduction of some CPUOrder memory barriers to bracket the
 1500   // Load. However, but the same basic skeleton of a LoadX feeding a
 1501   // MemBarAcquire, possibly thorugh an optional DecodeN, is still
 1502   // present
 1503   //
 1504   //   MemBarCPUOrder
 1505   //        ||       \\
 1506   //   MemBarCPUOrder LoadX[mo_acquire]
 1507   //        ||            |
 1508   //        ||       {DecodeN} optional
 1509   //        ||       /
 1510   //     MemBarAcquire
 1511   //
 1512   // In this case the acquire membar does not directly depend on the
 1513   // load. However, we can be sure that the load is generated from an
 1514   // inlined unsafe volatile get if we see it dependent on this unique
 1515   // sequence of membar nodes. Similarly, given an acquire membar we
 1516   // can know that it was added because of an inlined unsafe volatile
 1517   // get if it is fed and feeds a cpuorder membar and if its feed
 1518   // membar also feeds an acquiring load.
 1519   //
 1520   // Finally an inlined (Unsafe) CAS operation is translated to the
 1521   // following ideal graph
 1522   //
 1523   //   MemBarRelease
 1524   //   MemBarCPUOrder
 1525   //   CompareAndSwapX {CardMark}-optional
 1526   //   MemBarCPUOrder
 1527   //   MemBarAcquire
 1528   //
 1529   // So, where we can identify these volatile read and write
 1530   // signatures we can choose to plant either of the above two code
 1531   // sequences. For a volatile read we can simply plant a normal
 1532   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
 1533   // also choose to inhibit translation of the MemBarAcquire and
 1534   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
 1535   //
 1536   // When we recognise a volatile store signature we can choose to
 1537   // plant at a dmb ish as a translation for the MemBarRelease, a
 1538   // normal str<x> and then a dmb ish for the MemBarVolatile.
 1539   // Alternatively, we can inhibit translation of the MemBarRelease
 1540   // and MemBarVolatile and instead plant a simple stlr<x>
 1541   // instruction.
 1542   //
 1543   // when we recognise a CAS signature we can choose to plant a dmb
 1544   // ish as a translation for the MemBarRelease, the conventional
 1545   // macro-instruction sequence for the CompareAndSwap node (which
 1546   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
 1547   // Alternatively, we can elide generation of the dmb instructions
 1548   // and plant the alternative CompareAndSwap macro-instruction
 1549   // sequence (which uses ldaxr<x>).
 1550   //
 1551   // Of course, the above only applies when we see these signature
 1552   // configurations. We still want to plant dmb instructions in any
 1553   // other cases where we may see a MemBarAcquire, MemBarRelease or
 1554   // MemBarVolatile. For example, at the end of a constructor which
 1555   // writes final/volatile fields we will see a MemBarRelease
 1556   // instruction and this needs a 'dmb ish' lest we risk the
 1557   // constructed object being visible without making the
 1558   // final/volatile field writes visible.
 1559   //
 1560   // n.b. the translation rules below which rely on detection of the
 1561   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
 1562   // If we see anything other than the signature configurations we
 1563   // always just translate the loads and stores to ldr<x> and str<x>
 1564   // and translate acquire, release and volatile membars to the
 1565   // relevant dmb instructions.
 1566   //
 1567 
 1568   // is_CAS(int opcode, bool maybe_volatile)
 1569   //
 1570   // return true if opcode is one of the possible CompareAndSwapX
 1571   // values otherwise false.
 1572 
 1573   bool is_CAS(int opcode, bool maybe_volatile)
 1574   {
 1575     switch(opcode) {
 1576       // We handle these
 1577     case Op_CompareAndSwapI:
 1578     case Op_CompareAndSwapL:
 1579     case Op_CompareAndSwapP:
 1580     case Op_CompareAndSwapN:
 1581     case Op_ShenandoahCompareAndSwapP:
 1582     case Op_ShenandoahCompareAndSwapN:
 1583     case Op_CompareAndSwapB:
 1584     case Op_CompareAndSwapS:
 1585     case Op_GetAndSetI:
 1586     case Op_GetAndSetL:
 1587     case Op_GetAndSetP:
 1588     case Op_GetAndSetN:
 1589     case Op_GetAndAddI:
 1590     case Op_GetAndAddL:
 1591       return true;
 1592     case Op_CompareAndExchangeI:
 1593     case Op_CompareAndExchangeN:
 1594     case Op_CompareAndExchangeB:
 1595     case Op_CompareAndExchangeS:
 1596     case Op_CompareAndExchangeL:
 1597     case Op_CompareAndExchangeP:
 1598     case Op_WeakCompareAndSwapB:
 1599     case Op_WeakCompareAndSwapS:
 1600     case Op_WeakCompareAndSwapI:
 1601     case Op_WeakCompareAndSwapL:
 1602     case Op_WeakCompareAndSwapP:
 1603     case Op_WeakCompareAndSwapN:
 1604     case Op_ShenandoahWeakCompareAndSwapP:
 1605     case Op_ShenandoahWeakCompareAndSwapN:
 1606     case Op_ShenandoahCompareAndExchangeP:
 1607     case Op_ShenandoahCompareAndExchangeN:
 1608       return maybe_volatile;
 1609     default:
 1610       return false;
 1611     }
 1612   }
 1613 
 1614   // helper to determine the maximum number of Phi nodes we may need to
 1615   // traverse when searching from a card mark membar for the merge mem
 1616   // feeding a trailing membar or vice versa
 1617 
 1618 // predicates controlling emit of ldr<x>/ldar<x>
 1619 
 1620 bool unnecessary_acquire(const Node *barrier)
 1621 {
 1622   assert(barrier->is_MemBar(), "expecting a membar");
 1623 
 1624   MemBarNode* mb = barrier->as_MemBar();
 1625 
 1626   if (mb->trailing_load()) {
 1627     return true;
 1628   }
 1629 
 1630   if (mb->trailing_load_store()) {
 1631     Node* load_store = mb->in(MemBarNode::Precedent);
 1632     assert(load_store->is_LoadStore(), "unexpected graph shape");
 1633     return is_CAS(load_store->Opcode(), true);
 1634   }
 1635 
 1636   return false;
 1637 }
 1638 
 1639 bool needs_acquiring_load(const Node *n)
 1640 {
 1641   assert(n->is_Load(), "expecting a load");
 1642   LoadNode *ld = n->as_Load();
 1643   return ld->is_acquire();
 1644 }
 1645 
 1646 bool unnecessary_release(const Node *n)
 1647 {
 1648   assert((n->is_MemBar() &&
 1649           n->Opcode() == Op_MemBarRelease),
 1650          "expecting a release membar");
 1651 
 1652   MemBarNode *barrier = n->as_MemBar();
 1653   if (!barrier->leading()) {
 1654     return false;
 1655   } else {
 1656     Node* trailing = barrier->trailing_membar();
 1657     MemBarNode* trailing_mb = trailing->as_MemBar();
 1658     assert(trailing_mb->trailing(), "Not a trailing membar?");
 1659     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
 1660 
 1661     Node* mem = trailing_mb->in(MemBarNode::Precedent);
 1662     if (mem->is_Store()) {
 1663       assert(mem->as_Store()->is_release(), "");
 1664       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
 1665       return true;
 1666     } else {
 1667       assert(mem->is_LoadStore(), "");
 1668       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
 1669       return is_CAS(mem->Opcode(), true);
 1670     }
 1671   }
 1672   return false;
 1673 }
 1674 
 1675 bool unnecessary_volatile(const Node *n)
 1676 {
 1677   // assert n->is_MemBar();
 1678   MemBarNode *mbvol = n->as_MemBar();
 1679 
 1680   bool release = mbvol->trailing_store();
 1681   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
 1682 #ifdef ASSERT
 1683   if (release) {
 1684     Node* leading = mbvol->leading_membar();
 1685     assert(leading->Opcode() == Op_MemBarRelease, "");
 1686     assert(leading->as_MemBar()->leading_store(), "");
 1687     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
 1688   }
 1689 #endif
 1690 
 1691   return release;
 1692 }
 1693 
 1694 // predicates controlling emit of str<x>/stlr<x>
 1695 
 1696 bool needs_releasing_store(const Node *n)
 1697 {
 1698   // assert n->is_Store();
 1699   StoreNode *st = n->as_Store();
 1700   return st->trailing_membar() != NULL;
 1701 }
 1702 
 1703 // predicate controlling translation of CAS
 1704 //
 1705 // returns true if CAS needs to use an acquiring load otherwise false
 1706 
 1707 bool needs_acquiring_load_exclusive(const Node *n)
 1708 {
 1709   assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
 1710   LoadStoreNode* ldst = n->as_LoadStore();
 1711   if (is_CAS(n->Opcode(), false)) {
 1712     assert(ldst->trailing_membar() != NULL, "expected trailing membar");
 1713   } else {
 1714     return ldst->trailing_membar() != NULL;
 1715   }
 1716 
 1717   // so we can just return true here
 1718   return true;
 1719 }
 1720 
 1721 #define __ _masm.
 1722 
 1723 // advance declarations for helper functions to convert register
 1724 // indices to register objects
 1725 
 1726 // the ad file has to provide implementations of certain methods
 1727 // expected by the generic code
 1728 //
 1729 // REQUIRED FUNCTIONALITY
 1730 
 1731 //=============================================================================
 1732 
 1733 // !!!!! Special hack to get all types of calls to specify the byte offset
 1734 //       from the start of the call to the point where the return address
 1735 //       will point.
 1736 
 1737 int MachCallStaticJavaNode::ret_addr_offset()
 1738 {
 1739   // call should be a simple bl
 1740   int off = 4;
 1741   return off;
 1742 }
 1743 
 1744 int MachCallDynamicJavaNode::ret_addr_offset()
 1745 {
 1746   return 16; // movz, movk, movk, bl
 1747 }
 1748 
 1749 int MachCallRuntimeNode::ret_addr_offset() {
 1750   // for generated stubs the call will be
 1751   //   far_call(addr)
 1752   // for real runtime callouts it will be six instructions
 1753   // see aarch64_enc_java_to_runtime
 1754   //   adr(rscratch2, retaddr)
 1755   //   lea(rscratch1, RuntimeAddress(addr)
 1756   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
 1757   //   blr(rscratch1)
 1758   CodeBlob *cb = CodeCache::find_blob(_entry_point);
 1759   if (cb) {
 1760     return MacroAssembler::far_branch_size();
 1761   } else {
 1762     return 6 * NativeInstruction::instruction_size;
 1763   }
 1764 }
 1765 
 1766 // Indicate if the safepoint node needs the polling page as an input
 1767 
 1768 // the shared code plants the oop data at the start of the generated
 1769 // code for the safepoint node and that needs ot be at the load
 1770 // instruction itself. so we cannot plant a mov of the safepoint poll
 1771 // address followed by a load. setting this to true means the mov is
 1772 // scheduled as a prior instruction. that's better for scheduling
 1773 // anyway.
 1774 
 1775 bool SafePointNode::needs_polling_address_input()
 1776 {
 1777   return true;
 1778 }
 1779 
 1780 //=============================================================================
 1781 
 1782 #ifndef PRODUCT
 1783 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1784   st->print("BREAKPOINT");
 1785 }
 1786 #endif
 1787 
 1788 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1789   C2_MacroAssembler _masm(&cbuf);
 1790   __ brk(0);
 1791 }
 1792 
 1793 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
 1794   return MachNode::size(ra_);
 1795 }
 1796 
 1797 //=============================================================================
 1798 
 1799 #ifndef PRODUCT
 1800   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
 1801     st->print("nop \t# %d bytes pad for loops and calls", _count);
 1802   }
 1803 #endif
 1804 
 1805   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
 1806     C2_MacroAssembler _masm(&cbuf);
 1807     for (int i = 0; i < _count; i++) {
 1808       __ nop();
 1809     }
 1810   }
 1811 
 1812   uint MachNopNode::size(PhaseRegAlloc*) const {
 1813     return _count * NativeInstruction::instruction_size;
 1814   }
 1815 
 1816 //=============================================================================
 1817 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
 1818 
 1819 int ConstantTable::calculate_table_base_offset() const {
 1820   return 0;  // absolute addressing, no offset
 1821 }
 1822 
 1823 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
 1824 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
 1825   ShouldNotReachHere();
 1826 }
 1827 
 1828 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
 1829   // Empty encoding
 1830 }
 1831 
 1832 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
 1833   return 0;
 1834 }
 1835 
 1836 #ifndef PRODUCT
 1837 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
 1838   st->print("-- \t// MachConstantBaseNode (empty encoding)");
 1839 }
 1840 #endif
 1841 
 1842 #ifndef PRODUCT
 1843 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1844   Compile* C = ra_->C;
 1845 
 1846   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1847 
 1848   if (C->output()->need_stack_bang(framesize))
 1849     st->print("# stack bang size=%d\n\t", framesize);
 1850 
 1851   if (framesize < ((1 << 9) + 2 * wordSize)) {
 1852     st->print("sub  sp, sp, #%d\n\t", framesize);
 1853     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
 1854     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
 1855   } else {
 1856     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
 1857     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
 1858     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1859     st->print("sub  sp, sp, rscratch1");
 1860   }
 1861   if (C->stub_function() == NULL && BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) {
 1862     st->print("\n\t");
 1863     st->print("ldr  rscratch1, [guard]\n\t");
 1864     st->print("dmb ishld\n\t");
 1865     st->print("ldr  rscratch2, [rthread, #thread_disarmed_offset]\n\t");
 1866     st->print("cmp  rscratch1, rscratch2\n\t");
 1867     st->print("b.eq skip");
 1868     st->print("\n\t");
 1869     st->print("blr #nmethod_entry_barrier_stub\n\t");
 1870     st->print("b skip\n\t");
 1871     st->print("guard: int\n\t");
 1872     st->print("\n\t");
 1873     st->print("skip:\n\t");
 1874   }
 1875 }
 1876 #endif
 1877 
 1878 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1879   Compile* C = ra_->C;
 1880   C2_MacroAssembler _masm(&cbuf);
 1881 
 1882   // n.b. frame size includes space for return pc and rfp
 1883   const int framesize = C->output()->frame_size_in_bytes();
 1884   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
 1885 
 1886   // insert a nop at the start of the prolog so we can patch in a
 1887   // branch if we need to invalidate the method later
 1888   __ nop();
 1889 
 1890   if (C->clinit_barrier_on_entry()) {
 1891     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
 1892 
 1893     Label L_skip_barrier;
 1894 
 1895     __ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
 1896     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 1897     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 1898     __ bind(L_skip_barrier);
 1899   }
 1900 
 1901   if (UseSVE > 0 && C->max_vector_size() >= 16) {
 1902     __ reinitialize_ptrue();
 1903   }
 1904 
 1905   int bangsize = C->output()->bang_size_in_bytes();
 1906   if (C->output()->need_stack_bang(bangsize) && UseStackBanging)
 1907     __ generate_stack_overflow_check(bangsize);
 1908 
 1909   __ build_frame(framesize);
 1910 
 1911   if (C->stub_function() == NULL) {
 1912     BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 1913     bs->nmethod_entry_barrier(&_masm);
 1914   }
 1915 
 1916   if (VerifyStackAtCalls) {
 1917     Unimplemented();
 1918   }
 1919 
 1920   C->output()->set_frame_complete(cbuf.insts_size());
 1921 
 1922   if (C->has_mach_constant_base_node()) {
 1923     // NOTE: We set the table base offset here because users might be
 1924     // emitted before MachConstantBaseNode.
 1925     ConstantTable& constant_table = C->output()->constant_table();
 1926     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
 1927   }
 1928 }
 1929 
 1930 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
 1931 {
 1932   return MachNode::size(ra_); // too many variables; just compute it
 1933                               // the hard way
 1934 }
 1935 
 1936 int MachPrologNode::reloc() const
 1937 {
 1938   return 0;
 1939 }
 1940 
 1941 //=============================================================================
 1942 
 1943 #ifndef PRODUCT
 1944 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1945   Compile* C = ra_->C;
 1946   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1947 
 1948   st->print("# pop frame %d\n\t",framesize);
 1949 
 1950   if (framesize == 0) {
 1951     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1952   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
 1953     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
 1954     st->print("add  sp, sp, #%d\n\t", framesize);
 1955   } else {
 1956     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1957     st->print("add  sp, sp, rscratch1\n\t");
 1958     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1959   }
 1960 
 1961   if (do_polling() && C->is_method_compilation()) {
 1962     st->print("# touch polling page\n\t");
 1963     st->print("ldr rscratch1, [rthread],#polling_page_offset\n\t");
 1964     st->print("ldr zr, [rscratch1]");
 1965   }
 1966 }
 1967 #endif
 1968 
 1969 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1970   Compile* C = ra_->C;
 1971   C2_MacroAssembler _masm(&cbuf);
 1972   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1973 
 1974   __ remove_frame(framesize);
 1975 
 1976   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
 1977     __ reserved_stack_check();
 1978   }
 1979 
 1980   if (do_polling() && C->is_method_compilation()) {
 1981     __ fetch_and_read_polling_page(rscratch1, relocInfo::poll_return_type);
 1982   }
 1983 }
 1984 
 1985 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 1986   // Variable size. Determine dynamically.
 1987   return MachNode::size(ra_);
 1988 }
 1989 
 1990 int MachEpilogNode::reloc() const {
 1991   // Return number of relocatable values contained in this instruction.
 1992   return 1; // 1 for polling page.
 1993 }
 1994 
 1995 const Pipeline * MachEpilogNode::pipeline() const {
 1996   return MachNode::pipeline_class();
 1997 }
 1998 
 1999 //=============================================================================
 2000 
 2001 // Figure out which register class each belongs in: rc_int, rc_float or
 2002 // rc_stack.
 2003 enum RC { rc_bad, rc_int, rc_float, rc_predicate, rc_stack };
 2004 
 2005 static enum RC rc_class(OptoReg::Name reg) {
 2006 
 2007   if (reg == OptoReg::Bad) {
 2008     return rc_bad;
 2009   }
 2010 
 2011   // we have 32 int registers * 2 halves
 2012   int slots_of_int_registers = RegisterImpl::max_slots_per_register * RegisterImpl::number_of_registers;
 2013 
 2014   if (reg < slots_of_int_registers) {
 2015     return rc_int;
 2016   }
 2017 
 2018   // we have 32 float register * 8 halves
 2019   int slots_of_float_registers = FloatRegisterImpl::max_slots_per_register * FloatRegisterImpl::number_of_registers;
 2020   if (reg < slots_of_int_registers + slots_of_float_registers) {
 2021     return rc_float;
 2022   }
 2023 
 2024   int slots_of_predicate_registers = PRegisterImpl::max_slots_per_register * PRegisterImpl::number_of_registers;
 2025   if (reg < slots_of_int_registers + slots_of_float_registers + slots_of_predicate_registers) {
 2026     return rc_predicate;
 2027   }
 2028 
 2029   // Between predicate regs & stack is the flags.
 2030   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
 2031 
 2032   return rc_stack;
 2033 }
 2034 
 2035 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
 2036   Compile* C = ra_->C;
 2037 
 2038   // Get registers to move.
 2039   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
 2040   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
 2041   OptoReg::Name dst_hi = ra_->get_reg_second(this);
 2042   OptoReg::Name dst_lo = ra_->get_reg_first(this);
 2043 
 2044   enum RC src_hi_rc = rc_class(src_hi);
 2045   enum RC src_lo_rc = rc_class(src_lo);
 2046   enum RC dst_hi_rc = rc_class(dst_hi);
 2047   enum RC dst_lo_rc = rc_class(dst_lo);
 2048 
 2049   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
 2050 
 2051   if (src_hi != OptoReg::Bad) {
 2052     assert((src_lo&1)==0 && src_lo+1==src_hi &&
 2053            (dst_lo&1)==0 && dst_lo+1==dst_hi,
 2054            "expected aligned-adjacent pairs");
 2055   }
 2056 
 2057   if (src_lo == dst_lo && src_hi == dst_hi) {
 2058     return 0;            // Self copy, no move.
 2059   }
 2060 
 2061   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
 2062               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
 2063   int src_offset = ra_->reg2offset(src_lo);
 2064   int dst_offset = ra_->reg2offset(dst_lo);
 2065 
 2066   if (bottom_type()->isa_vect() != NULL) {
 2067     uint ireg = ideal_reg();
 2068     if (ireg == Op_VecA && cbuf) {
 2069       C2_MacroAssembler _masm(cbuf);
 2070       int sve_vector_reg_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
 2071       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 2072         // stack->stack
 2073         __ spill_copy_sve_vector_stack_to_stack(src_offset, dst_offset,
 2074                                                 sve_vector_reg_size_in_bytes);
 2075       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 2076         __ spill_sve_vector(as_FloatRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo),
 2077                             sve_vector_reg_size_in_bytes);
 2078       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 2079         __ unspill_sve_vector(as_FloatRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo),
 2080                               sve_vector_reg_size_in_bytes);
 2081       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 2082         __ sve_orr(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2083                    as_FloatRegister(Matcher::_regEncode[src_lo]),
 2084                    as_FloatRegister(Matcher::_regEncode[src_lo]));
 2085       } else {
 2086         ShouldNotReachHere();
 2087       }
 2088     } else if (cbuf) {
 2089       assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
 2090       C2_MacroAssembler _masm(cbuf);
 2091       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
 2092       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 2093         // stack->stack
 2094         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
 2095         if (ireg == Op_VecD) {
 2096           __ unspill(rscratch1, true, src_offset);
 2097           __ spill(rscratch1, true, dst_offset);
 2098         } else {
 2099           __ spill_copy128(src_offset, dst_offset);
 2100         }
 2101       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 2102         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2103                ireg == Op_VecD ? __ T8B : __ T16B,
 2104                as_FloatRegister(Matcher::_regEncode[src_lo]));
 2105       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 2106         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 2107                  ireg == Op_VecD ? __ D : __ Q,
 2108                  ra_->reg2offset(dst_lo));
 2109       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 2110         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2111                    ireg == Op_VecD ? __ D : __ Q,
 2112                    ra_->reg2offset(src_lo));
 2113       } else {
 2114         ShouldNotReachHere();
 2115       }
 2116     }
 2117   } else if (cbuf) {
 2118     C2_MacroAssembler _masm(cbuf);
 2119     switch (src_lo_rc) {
 2120     case rc_int:
 2121       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
 2122         if (is64) {
 2123             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
 2124                    as_Register(Matcher::_regEncode[src_lo]));
 2125         } else {
 2126             C2_MacroAssembler _masm(cbuf);
 2127             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
 2128                     as_Register(Matcher::_regEncode[src_lo]));
 2129         }
 2130       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
 2131         if (is64) {
 2132             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2133                      as_Register(Matcher::_regEncode[src_lo]));
 2134         } else {
 2135             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2136                      as_Register(Matcher::_regEncode[src_lo]));
 2137         }
 2138       } else {                    // gpr --> stack spill
 2139         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2140         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
 2141       }
 2142       break;
 2143     case rc_float:
 2144       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
 2145         if (is64) {
 2146             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
 2147                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2148         } else {
 2149             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
 2150                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2151         }
 2152       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
 2153           if (cbuf) {
 2154             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2155                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2156         } else {
 2157             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2158                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2159         }
 2160       } else {                    // fpr --> stack spill
 2161         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2162         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 2163                  is64 ? __ D : __ S, dst_offset);
 2164       }
 2165       break;
 2166     case rc_stack:
 2167       if (dst_lo_rc == rc_int) {  // stack --> gpr load
 2168         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
 2169       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
 2170         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2171                    is64 ? __ D : __ S, src_offset);
 2172       } else {                    // stack --> stack copy
 2173         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2174         __ unspill(rscratch1, is64, src_offset);
 2175         __ spill(rscratch1, is64, dst_offset);
 2176       }
 2177       break;
 2178     default:
 2179       assert(false, "bad rc_class for spill");
 2180       ShouldNotReachHere();
 2181     }
 2182   }
 2183 
 2184   if (st) {
 2185     st->print("spill ");
 2186     if (src_lo_rc == rc_stack) {
 2187       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
 2188     } else {
 2189       st->print("%s -> ", Matcher::regName[src_lo]);
 2190     }
 2191     if (dst_lo_rc == rc_stack) {
 2192       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
 2193     } else {
 2194       st->print("%s", Matcher::regName[dst_lo]);
 2195     }
 2196     if (bottom_type()->isa_vect() != NULL) {
 2197       int vsize = 0;
 2198       switch (ideal_reg()) {
 2199       case Op_VecD:
 2200         vsize = 64;
 2201         break;
 2202       case Op_VecX:
 2203         vsize = 128;
 2204         break;
 2205       case Op_VecA:
 2206         vsize = Matcher::scalable_vector_reg_size(T_BYTE) * 8;
 2207         break;
 2208       default:
 2209         assert(false, "bad register type for spill");
 2210         ShouldNotReachHere();
 2211       }
 2212       st->print("\t# vector spill size = %d", vsize);
 2213     } else {
 2214       st->print("\t# spill size = %d", is64 ? 64 : 32);
 2215     }
 2216   }
 2217 
 2218   return 0;
 2219 
 2220 }
 2221 
 2222 #ifndef PRODUCT
 2223 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2224   if (!ra_)
 2225     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
 2226   else
 2227     implementation(NULL, ra_, false, st);
 2228 }
 2229 #endif
 2230 
 2231 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 2232   implementation(&cbuf, ra_, false, NULL);
 2233 }
 2234 
 2235 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
 2236   return MachNode::size(ra_);
 2237 }
 2238 
 2239 //=============================================================================
 2240 
 2241 #ifndef PRODUCT
 2242 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2243   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2244   int reg = ra_->get_reg_first(this);
 2245   st->print("add %s, rsp, #%d]\t# box lock",
 2246             Matcher::regName[reg], offset);
 2247 }
 2248 #endif
 2249 
 2250 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 2251   C2_MacroAssembler _masm(&cbuf);
 2252 
 2253   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2254   int reg    = ra_->get_encode(this);
 2255 
 2256   // This add will handle any 24-bit signed offset. 24 bits allows an
 2257   // 8 megabyte stack frame.
 2258   __ add(as_Register(reg), sp, offset);
 2259 }
 2260 
 2261 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
 2262   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
 2263   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2264 
 2265   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
 2266     return NativeInstruction::instruction_size;
 2267   } else {
 2268     return 2 * NativeInstruction::instruction_size;
 2269   }
 2270 }
 2271 
 2272 //=============================================================================
 2273 
 2274 #ifndef PRODUCT
 2275 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 2276 {
 2277   st->print_cr("# MachUEPNode");
 2278   if (UseCompressedClassPointers) {
 2279     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2280     if (CompressedKlassPointers::shift() != 0) {
 2281       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
 2282     }
 2283   } else {
 2284    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2285   }
 2286   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
 2287   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
 2288 }
 2289 #endif
 2290 
 2291 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
 2292 {
 2293   // This is the unverified entry point.
 2294   C2_MacroAssembler _masm(&cbuf);
 2295 
 2296   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
 2297   Label skip;
 2298   // TODO
 2299   // can we avoid this skip and still use a reloc?
 2300   __ br(Assembler::EQ, skip);
 2301   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 2302   __ bind(skip);
 2303 }
 2304 
 2305 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
 2306 {
 2307   return MachNode::size(ra_);
 2308 }
 2309 
 2310 // REQUIRED EMIT CODE
 2311 
 2312 //=============================================================================
 2313 
 2314 // Emit exception handler code.
 2315 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
 2316 {
 2317   // mov rscratch1 #exception_blob_entry_point
 2318   // br rscratch1
 2319   // Note that the code buffer's insts_mark is always relative to insts.
 2320   // That's why we must use the macroassembler to generate a handler.
 2321   C2_MacroAssembler _masm(&cbuf);
 2322   address base = __ start_a_stub(size_exception_handler());
 2323   if (base == NULL) {
 2324     ciEnv::current()->record_failure("CodeCache is full");
 2325     return 0;  // CodeBuffer::expand failed
 2326   }
 2327   int offset = __ offset();
 2328   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
 2329   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
 2330   __ end_a_stub();
 2331   return offset;
 2332 }
 2333 
 2334 // Emit deopt handler code.
 2335 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
 2336 {
 2337   // Note that the code buffer's insts_mark is always relative to insts.
 2338   // That's why we must use the macroassembler to generate a handler.
 2339   C2_MacroAssembler _masm(&cbuf);
 2340   address base = __ start_a_stub(size_deopt_handler());
 2341   if (base == NULL) {
 2342     ciEnv::current()->record_failure("CodeCache is full");
 2343     return 0;  // CodeBuffer::expand failed
 2344   }
 2345   int offset = __ offset();
 2346 
 2347   __ adr(lr, __ pc());
 2348   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 2349 
 2350   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
 2351   __ end_a_stub();
 2352   return offset;
 2353 }
 2354 
 2355 // REQUIRED MATCHER CODE
 2356 
 2357 //=============================================================================
 2358 
 2359 const bool Matcher::match_rule_supported(int opcode) {
 2360   if (!has_match_rule(opcode))
 2361     return false;
 2362 
 2363   bool ret_value = true;
 2364   switch (opcode) {
 2365     case Op_CacheWB:
 2366     case Op_CacheWBPreSync:
 2367     case Op_CacheWBPostSync:
 2368       if (!VM_Version::supports_data_cache_line_flush()) {
 2369         ret_value = false;
 2370       }
 2371       break;
 2372   }
 2373 
 2374   return ret_value; // Per default match rules are supported.
 2375 }
 2376 
 2377 // Identify extra cases that we might want to provide match rules for vector nodes and
 2378 // other intrinsics guarded with vector length (vlen) and element type (bt).
 2379 const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
 2380   if (!match_rule_supported(opcode) || !vector_size_supported(bt, vlen)) {
 2381     return false;
 2382   }
 2383   int bit_size = vlen * type2aelembytes(bt) * 8;
 2384   if (UseSVE == 0 && bit_size > 128) {
 2385     return false;
 2386   }
 2387   if (UseSVE > 0) {
 2388     return op_sve_supported(opcode);
 2389   } else { // NEON
 2390     // Special cases
 2391     switch (opcode) {
 2392     case Op_MulAddVS2VI:
 2393       if (bit_size < 128) {
 2394         return false;
 2395       }
 2396       break;
 2397     case Op_MulVL:
 2398       return false;
 2399     default:
 2400       break;
 2401     }
 2402   }
 2403   return true; // Per default match rules are supported.
 2404 }
 2405 
 2406 const bool Matcher::has_predicated_vectors(void) {
 2407   return UseSVE > 0;
 2408 }
 2409 
 2410 const int Matcher::float_pressure(int default_pressure_threshold) {
 2411   return default_pressure_threshold;
 2412 }
 2413 
 2414 int Matcher::regnum_to_fpu_offset(int regnum)
 2415 {
 2416   Unimplemented();
 2417   return 0;
 2418 }
 2419 
 2420 // Is this branch offset short enough that a short branch can be used?
 2421 //
 2422 // NOTE: If the platform does not provide any short branch variants, then
 2423 //       this method should return false for offset 0.
 2424 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
 2425   // The passed offset is relative to address of the branch.
 2426 
 2427   return (-32768 <= offset && offset < 32768);
 2428 }
 2429 
 2430 const bool Matcher::isSimpleConstant64(jlong value) {
 2431   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
 2432   // Probably always true, even if a temp register is required.
 2433   return true;
 2434 }
 2435 
 2436 // true just means we have fast l2f conversion
 2437 const bool Matcher::convL2FSupported(void) {
 2438   return true;
 2439 }
 2440 
 2441 // Vector width in bytes.
 2442 const int Matcher::vector_width_in_bytes(BasicType bt) {
 2443   // The MaxVectorSize should have been set by detecting SVE max vector register size.
 2444   int size = MIN2((UseSVE > 0) ? 256 : 16, (int)MaxVectorSize);
 2445   // Minimum 2 values in vector
 2446   if (size < 2*type2aelembytes(bt)) size = 0;
 2447   // But never < 4
 2448   if (size < 4) size = 0;
 2449   return size;
 2450 }
 2451 
 2452 // Limits on vector size (number of elements) loaded into vector.
 2453 const int Matcher::max_vector_size(const BasicType bt) {
 2454   return vector_width_in_bytes(bt)/type2aelembytes(bt);
 2455 }
 2456 const int Matcher::min_vector_size(const BasicType bt) {
 2457   int max_size = max_vector_size(bt);
 2458   if ((UseSVE > 0) && (MaxVectorSize >= 16)) {
 2459     // Currently vector length less than SVE vector register size is not supported.
 2460     return max_size;
 2461   } else {
 2462     //  For the moment limit the vector size to 8 bytes with NEON.
 2463     int size = 8 / type2aelembytes(bt);
 2464     if (size < 2) size = 2;
 2465     return size;
 2466   }
 2467 }
 2468 
 2469 const bool Matcher::supports_scalable_vector() {
 2470   return UseSVE > 0;
 2471 }
 2472 
 2473 // Actual max scalable vector register length.
 2474 const int Matcher::scalable_vector_reg_size(const BasicType bt) {
 2475   return Matcher::max_vector_size(bt);
 2476 }
 2477 
 2478 // Vector ideal reg.
 2479 const uint Matcher::vector_ideal_reg(int len) {
 2480   if (UseSVE > 0 && 16 <= len && len <= 256) {
 2481     return Op_VecA;
 2482   }
 2483   switch(len) {
 2484     case  8: return Op_VecD;
 2485     case 16: return Op_VecX;
 2486   }
 2487   ShouldNotReachHere();
 2488   return 0;
 2489 }
 2490 
 2491 // AES support not yet implemented
 2492 const bool Matcher::pass_original_key_for_aes() {
 2493   return false;
 2494 }
 2495 
 2496 // aarch64 supports misaligned vectors store/load.
 2497 const bool Matcher::misaligned_vectors_ok() {
 2498   return true;
 2499 }
 2500 
 2501 // false => size gets scaled to BytesPerLong, ok.
 2502 const bool Matcher::init_array_count_is_in_bytes = false;
 2503 
 2504 // Use conditional move (CMOVL)
 2505 const int Matcher::long_cmove_cost() {
 2506   // long cmoves are no more expensive than int cmoves
 2507   return 0;
 2508 }
 2509 
 2510 const int Matcher::float_cmove_cost() {
 2511   // float cmoves are no more expensive than int cmoves
 2512   return 0;
 2513 }
 2514 
 2515 // Does the CPU require late expand (see block.cpp for description of late expand)?
 2516 const bool Matcher::require_postalloc_expand = false;
 2517 
 2518 // Do we need to mask the count passed to shift instructions or does
 2519 // the cpu only look at the lower 5/6 bits anyway?
 2520 const bool Matcher::need_masked_shift_count = false;
 2521 
 2522 // No support for generic vector operands.
 2523 const bool Matcher::supports_generic_vector_operands  = false;
 2524 
 2525 MachOper* Matcher::pd_specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg, bool is_temp) {
 2526   ShouldNotReachHere(); // generic vector operands not supported
 2527   return NULL;
 2528 }
 2529 
 2530 bool Matcher::is_generic_reg2reg_move(MachNode* m) {
 2531   ShouldNotReachHere();  // generic vector operands not supported
 2532   return false;
 2533 }
 2534 
 2535 bool Matcher::is_generic_vector(MachOper* opnd)  {
 2536   ShouldNotReachHere();  // generic vector operands not supported
 2537   return false;
 2538 }
 2539 
 2540 // This affects two different things:
 2541 //  - how Decode nodes are matched
 2542 //  - how ImplicitNullCheck opportunities are recognized
 2543 // If true, the matcher will try to remove all Decodes and match them
 2544 // (as operands) into nodes. NullChecks are not prepared to deal with
 2545 // Decodes by final_graph_reshaping().
 2546 // If false, final_graph_reshaping() forces the decode behind the Cmp
 2547 // for a NullCheck. The matcher matches the Decode node into a register.
 2548 // Implicit_null_check optimization moves the Decode along with the
 2549 // memory operation back up before the NullCheck.
 2550 bool Matcher::narrow_oop_use_complex_address() {
 2551   return CompressedOops::shift() == 0;
 2552 }
 2553 
 2554 bool Matcher::narrow_klass_use_complex_address() {
 2555 // TODO
 2556 // decide whether we need to set this to true
 2557   return false;
 2558 }
 2559 
 2560 bool Matcher::const_oop_prefer_decode() {
 2561   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
 2562   return CompressedOops::base() == NULL;
 2563 }
 2564 
 2565 bool Matcher::const_klass_prefer_decode() {
 2566   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
 2567   return CompressedKlassPointers::base() == NULL;
 2568 }
 2569 
 2570 // Is it better to copy float constants, or load them directly from
 2571 // memory?  Intel can load a float constant from a direct address,
 2572 // requiring no extra registers.  Most RISCs will have to materialize
 2573 // an address into a register first, so they would do better to copy
 2574 // the constant from stack.
 2575 const bool Matcher::rematerialize_float_constants = false;
 2576 
 2577 // If CPU can load and store mis-aligned doubles directly then no
 2578 // fixup is needed.  Else we split the double into 2 integer pieces
 2579 // and move it piece-by-piece.  Only happens when passing doubles into
 2580 // C code as the Java calling convention forces doubles to be aligned.
 2581 const bool Matcher::misaligned_doubles_ok = true;
 2582 
 2583 // No-op on amd64
 2584 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
 2585   Unimplemented();
 2586 }
 2587 
 2588 // Advertise here if the CPU requires explicit rounding operations to implement strictfp mode.
 2589 const bool Matcher::strict_fp_requires_explicit_rounding = false;
 2590 
 2591 // Are floats converted to double when stored to stack during
 2592 // deoptimization?
 2593 bool Matcher::float_in_double() { return false; }
 2594 
 2595 // Do ints take an entire long register or just half?
 2596 // The relevant question is how the int is callee-saved:
 2597 // the whole long is written but de-opt'ing will have to extract
 2598 // the relevant 32 bits.
 2599 const bool Matcher::int_in_long = true;
 2600 
 2601 // Return whether or not this register is ever used as an argument.
 2602 // This function is used on startup to build the trampoline stubs in
 2603 // generateOptoStub.  Registers not mentioned will be killed by the VM
 2604 // call in the trampoline, and arguments in those registers not be
 2605 // available to the callee.
 2606 bool Matcher::can_be_java_arg(int reg)
 2607 {
 2608   return
 2609     reg ==  R0_num || reg == R0_H_num ||
 2610     reg ==  R1_num || reg == R1_H_num ||
 2611     reg ==  R2_num || reg == R2_H_num ||
 2612     reg ==  R3_num || reg == R3_H_num ||
 2613     reg ==  R4_num || reg == R4_H_num ||
 2614     reg ==  R5_num || reg == R5_H_num ||
 2615     reg ==  R6_num || reg == R6_H_num ||
 2616     reg ==  R7_num || reg == R7_H_num ||
 2617     reg ==  V0_num || reg == V0_H_num ||
 2618     reg ==  V1_num || reg == V1_H_num ||
 2619     reg ==  V2_num || reg == V2_H_num ||
 2620     reg ==  V3_num || reg == V3_H_num ||
 2621     reg ==  V4_num || reg == V4_H_num ||
 2622     reg ==  V5_num || reg == V5_H_num ||
 2623     reg ==  V6_num || reg == V6_H_num ||
 2624     reg ==  V7_num || reg == V7_H_num;
 2625 }
 2626 
 2627 bool Matcher::is_spillable_arg(int reg)
 2628 {
 2629   return can_be_java_arg(reg);
 2630 }
 2631 
 2632 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
 2633   return false;
 2634 }
 2635 
 2636 RegMask Matcher::divI_proj_mask() {
 2637   ShouldNotReachHere();
 2638   return RegMask();
 2639 }
 2640 
 2641 // Register for MODI projection of divmodI.
 2642 RegMask Matcher::modI_proj_mask() {
 2643   ShouldNotReachHere();
 2644   return RegMask();
 2645 }
 2646 
 2647 // Register for DIVL projection of divmodL.
 2648 RegMask Matcher::divL_proj_mask() {
 2649   ShouldNotReachHere();
 2650   return RegMask();
 2651 }
 2652 
 2653 // Register for MODL projection of divmodL.
 2654 RegMask Matcher::modL_proj_mask() {
 2655   ShouldNotReachHere();
 2656   return RegMask();
 2657 }
 2658 
 2659 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
 2660   return FP_REG_mask();
 2661 }
 2662 
 2663 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
 2664   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
 2665     Node* u = addp->fast_out(i);
 2666     if (u->is_Mem()) {
 2667       int opsize = u->as_Mem()->memory_size();
 2668       assert(opsize > 0, "unexpected memory operand size");
 2669       if (u->as_Mem()->memory_size() != (1<<shift)) {
 2670         return false;
 2671       }
 2672     }
 2673   }
 2674   return true;
 2675 }
 2676 
 2677 const bool Matcher::convi2l_type_required = false;
 2678 
 2679 // Should the matcher clone input 'm' of node 'n'?
 2680 bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
 2681   if (is_vshift_con_pattern(n, m)) { // ShiftV src (ShiftCntV con)
 2682     mstack.push(m, Visit);           // m = ShiftCntV
 2683     return true;
 2684   }
 2685   return false;
 2686 }
 2687 
 2688 // Should the Matcher clone shifts on addressing modes, expecting them
 2689 // to be subsumed into complex addressing expressions or compute them
 2690 // into registers?
 2691 bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
 2692   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
 2693     return true;
 2694   }
 2695 
 2696   Node *off = m->in(AddPNode::Offset);
 2697   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
 2698       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
 2699       // Are there other uses besides address expressions?
 2700       !is_visited(off)) {
 2701     address_visited.set(off->_idx); // Flag as address_visited
 2702     mstack.push(off->in(2), Visit);
 2703     Node *conv = off->in(1);
 2704     if (conv->Opcode() == Op_ConvI2L &&
 2705         // Are there other uses besides address expressions?
 2706         !is_visited(conv)) {
 2707       address_visited.set(conv->_idx); // Flag as address_visited
 2708       mstack.push(conv->in(1), Pre_Visit);
 2709     } else {
 2710       mstack.push(conv, Pre_Visit);
 2711     }
 2712     address_visited.test_set(m->_idx); // Flag as address_visited
 2713     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2714     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2715     return true;
 2716   } else if (off->Opcode() == Op_ConvI2L &&
 2717              // Are there other uses besides address expressions?
 2718              !is_visited(off)) {
 2719     address_visited.test_set(m->_idx); // Flag as address_visited
 2720     address_visited.set(off->_idx); // Flag as address_visited
 2721     mstack.push(off->in(1), Pre_Visit);
 2722     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2723     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2724     return true;
 2725   }
 2726   return false;
 2727 }
 2728 
 2729 void Compile::reshape_address(AddPNode* addp) {
 2730 }
 2731 
 2732 
 2733 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
 2734   C2_MacroAssembler _masm(&cbuf);                                       \
 2735   {                                                                     \
 2736     guarantee(INDEX == -1, "mode not permitted for volatile");          \
 2737     guarantee(DISP == 0, "mode not permitted for volatile");            \
 2738     guarantee(SCALE == 0, "mode not permitted for volatile");           \
 2739     __ INSN(REG, as_Register(BASE));                                    \
 2740   }
 2741 
 2742 
 2743 static Address mem2address(int opcode, Register base, int index, int size, int disp)
 2744   {
 2745     Address::extend scale;
 2746 
 2747     // Hooboy, this is fugly.  We need a way to communicate to the
 2748     // encoder that the index needs to be sign extended, so we have to
 2749     // enumerate all the cases.
 2750     switch (opcode) {
 2751     case INDINDEXSCALEDI2L:
 2752     case INDINDEXSCALEDI2LN:
 2753     case INDINDEXI2L:
 2754     case INDINDEXI2LN:
 2755       scale = Address::sxtw(size);
 2756       break;
 2757     default:
 2758       scale = Address::lsl(size);
 2759     }
 2760 
 2761     if (index == -1) {
 2762       return Address(base, disp);
 2763     } else {
 2764       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2765       return Address(base, as_Register(index), scale);
 2766     }
 2767   }
 2768 
 2769 
 2770 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
 2771 typedef void (MacroAssembler::* mem_insn2)(Register Rt, Register adr);
 2772 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
 2773 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
 2774                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
 2775 
 2776   // Used for all non-volatile memory accesses.  The use of
 2777   // $mem->opcode() to discover whether this pattern uses sign-extended
 2778   // offsets is something of a kludge.
 2779   static void loadStore(C2_MacroAssembler masm, mem_insn insn,
 2780                         Register reg, int opcode,
 2781                         Register base, int index, int scale, int disp,
 2782                         int size_in_memory)
 2783   {
 2784     Address addr = mem2address(opcode, base, index, scale, disp);
 2785     if (addr.getMode() == Address::base_plus_offset) {
 2786       /* If we get an out-of-range offset it is a bug in the compiler,
 2787          so we assert here. */
 2788       assert(Address::offset_ok_for_immed(addr.offset(), exact_log2(size_in_memory)),
 2789              "c2 compiler bug");
 2790       /* Fix up any out-of-range offsets. */
 2791       assert_different_registers(rscratch1, base);
 2792       assert_different_registers(rscratch1, reg);
 2793       addr = masm.legitimize_address(addr, size_in_memory, rscratch1);
 2794     }
 2795     (masm.*insn)(reg, addr);
 2796   }
 2797 
 2798   static void loadStore(C2_MacroAssembler masm, mem_float_insn insn,
 2799                         FloatRegister reg, int opcode,
 2800                         Register base, int index, int size, int disp,
 2801                         int size_in_memory)
 2802   {
 2803     Address::extend scale;
 2804 
 2805     switch (opcode) {
 2806     case INDINDEXSCALEDI2L:
 2807     case INDINDEXSCALEDI2LN:
 2808       scale = Address::sxtw(size);
 2809       break;
 2810     default:
 2811       scale = Address::lsl(size);
 2812     }
 2813 
 2814     if (index == -1) {
 2815       /* If we get an out-of-range offset it is a bug in the compiler,
 2816          so we assert here. */
 2817       assert(Address::offset_ok_for_immed(disp, exact_log2(size_in_memory)), "c2 compiler bug");
 2818       /* Fix up any out-of-range offsets. */
 2819       assert_different_registers(rscratch1, base);
 2820       Address addr = Address(base, disp);
 2821       addr = masm.legitimize_address(addr, size_in_memory, rscratch1);
 2822       (masm.*insn)(reg, addr);
 2823     } else {
 2824       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2825       (masm.*insn)(reg, Address(base, as_Register(index), scale));
 2826     }
 2827   }
 2828 
 2829   static void loadStore(C2_MacroAssembler masm, mem_vector_insn insn,
 2830                         FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
 2831                         int opcode, Register base, int index, int size, int disp)
 2832   {
 2833     if (index == -1) {
 2834       (masm.*insn)(reg, T, Address(base, disp));
 2835     } else {
 2836       assert(disp == 0, "unsupported address mode");
 2837       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
 2838     }
 2839   }
 2840 
 2841 %}
 2842 
 2843 
 2844 
 2845 //----------ENCODING BLOCK-----------------------------------------------------
 2846 // This block specifies the encoding classes used by the compiler to
 2847 // output byte streams.  Encoding classes are parameterized macros
 2848 // used by Machine Instruction Nodes in order to generate the bit
 2849 // encoding of the instruction.  Operands specify their base encoding
 2850 // interface with the interface keyword.  There are currently
 2851 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
 2852 // COND_INTER.  REG_INTER causes an operand to generate a function
 2853 // which returns its register number when queried.  CONST_INTER causes
 2854 // an operand to generate a function which returns the value of the
 2855 // constant when queried.  MEMORY_INTER causes an operand to generate
 2856 // four functions which return the Base Register, the Index Register,
 2857 // the Scale Value, and the Offset Value of the operand when queried.
 2858 // COND_INTER causes an operand to generate six functions which return
 2859 // the encoding code (ie - encoding bits for the instruction)
 2860 // associated with each basic boolean condition for a conditional
 2861 // instruction.
 2862 //
 2863 // Instructions specify two basic values for encoding.  Again, a
 2864 // function is available to check if the constant displacement is an
 2865 // oop. They use the ins_encode keyword to specify their encoding
 2866 // classes (which must be a sequence of enc_class names, and their
 2867 // parameters, specified in the encoding block), and they use the
 2868 // opcode keyword to specify, in order, their primary, secondary, and
 2869 // tertiary opcode.  Only the opcode sections which a particular
 2870 // instruction needs for encoding need to be specified.
 2871 encode %{
 2872   // Build emit functions for each basic byte or larger field in the
 2873   // intel encoding scheme (opcode, rm, sib, immediate), and call them
 2874   // from C++ code in the enc_class source block.  Emit functions will
 2875   // live in the main source block for now.  In future, we can
 2876   // generalize this by adding a syntax that specifies the sizes of
 2877   // fields in an order, so that the adlc can build the emit functions
 2878   // automagically
 2879 
 2880   // catch all for unimplemented encodings
 2881   enc_class enc_unimplemented %{
 2882     C2_MacroAssembler _masm(&cbuf);
 2883     __ unimplemented("C2 catch all");
 2884   %}
 2885 
 2886   // BEGIN Non-volatile memory access
 2887 
 2888   // This encoding class is generated automatically from ad_encode.m4.
 2889   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2890   enc_class aarch64_enc_ldrsbw(iRegI dst, memory1 mem) %{
 2891     Register dst_reg = as_Register($dst$$reg);
 2892     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
 2893                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2894   %}
 2895 
 2896   // This encoding class is generated automatically from ad_encode.m4.
 2897   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2898   enc_class aarch64_enc_ldrsb(iRegI dst, memory1 mem) %{
 2899     Register dst_reg = as_Register($dst$$reg);
 2900     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
 2901                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2902   %}
 2903 
 2904   // This encoding class is generated automatically from ad_encode.m4.
 2905   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2906   enc_class aarch64_enc_ldrb(iRegI dst, memory1 mem) %{
 2907     Register dst_reg = as_Register($dst$$reg);
 2908     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2909                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2910   %}
 2911 
 2912   // This encoding class is generated automatically from ad_encode.m4.
 2913   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2914   enc_class aarch64_enc_ldrb(iRegL dst, memory1 mem) %{
 2915     Register dst_reg = as_Register($dst$$reg);
 2916     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2917                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2918   %}
 2919 
 2920   // This encoding class is generated automatically from ad_encode.m4.
 2921   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2922   enc_class aarch64_enc_ldrshw(iRegI dst, memory2 mem) %{
 2923     Register dst_reg = as_Register($dst$$reg);
 2924     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
 2925                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2926   %}
 2927 
 2928   // This encoding class is generated automatically from ad_encode.m4.
 2929   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2930   enc_class aarch64_enc_ldrsh(iRegI dst, memory2 mem) %{
 2931     Register dst_reg = as_Register($dst$$reg);
 2932     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
 2933                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2934   %}
 2935 
 2936   // This encoding class is generated automatically from ad_encode.m4.
 2937   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2938   enc_class aarch64_enc_ldrh(iRegI dst, memory2 mem) %{
 2939     Register dst_reg = as_Register($dst$$reg);
 2940     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2941                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2942   %}
 2943 
 2944   // This encoding class is generated automatically from ad_encode.m4.
 2945   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2946   enc_class aarch64_enc_ldrh(iRegL dst, memory2 mem) %{
 2947     Register dst_reg = as_Register($dst$$reg);
 2948     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2949                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2950   %}
 2951 
 2952   // This encoding class is generated automatically from ad_encode.m4.
 2953   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2954   enc_class aarch64_enc_ldrw(iRegI dst, memory4 mem) %{
 2955     Register dst_reg = as_Register($dst$$reg);
 2956     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2957                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2958   %}
 2959 
 2960   // This encoding class is generated automatically from ad_encode.m4.
 2961   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2962   enc_class aarch64_enc_ldrw(iRegL dst, memory4 mem) %{
 2963     Register dst_reg = as_Register($dst$$reg);
 2964     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2965                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2966   %}
 2967 
 2968   // This encoding class is generated automatically from ad_encode.m4.
 2969   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2970   enc_class aarch64_enc_ldrsw(iRegL dst, memory4 mem) %{
 2971     Register dst_reg = as_Register($dst$$reg);
 2972     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
 2973                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2974   %}
 2975 
 2976   // This encoding class is generated automatically from ad_encode.m4.
 2977   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2978   enc_class aarch64_enc_ldr(iRegL dst, memory8 mem) %{
 2979     Register dst_reg = as_Register($dst$$reg);
 2980     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
 2981                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 2982   %}
 2983 
 2984   // This encoding class is generated automatically from ad_encode.m4.
 2985   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2986   enc_class aarch64_enc_ldrs(vRegF dst, memory4 mem) %{
 2987     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 2988     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
 2989                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2990   %}
 2991 
 2992   // This encoding class is generated automatically from ad_encode.m4.
 2993   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2994   enc_class aarch64_enc_ldrd(vRegD dst, memory8 mem) %{
 2995     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 2996     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
 2997                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 2998   %}
 2999 
 3000   // This encoding class is generated automatically from ad_encode.m4.
 3001   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3002   enc_class aarch64_enc_strb(iRegI src, memory1 mem) %{
 3003     Register src_reg = as_Register($src$$reg);
 3004     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
 3005                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 3006   %}
 3007 
 3008   // This encoding class is generated automatically from ad_encode.m4.
 3009   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3010   enc_class aarch64_enc_strb0(memory1 mem) %{
 3011     C2_MacroAssembler _masm(&cbuf);
 3012     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
 3013                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 3014   %}
 3015 
 3016   // This encoding class is generated automatically from ad_encode.m4.
 3017   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3018   enc_class aarch64_enc_strh(iRegI src, memory2 mem) %{
 3019     Register src_reg = as_Register($src$$reg);
 3020     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
 3021                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 3022   %}
 3023 
 3024   // This encoding class is generated automatically from ad_encode.m4.
 3025   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3026   enc_class aarch64_enc_strh0(memory2 mem) %{
 3027     C2_MacroAssembler _masm(&cbuf);
 3028     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
 3029                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 3030   %}
 3031 
 3032   // This encoding class is generated automatically from ad_encode.m4.
 3033   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3034   enc_class aarch64_enc_strw(iRegI src, memory4 mem) %{
 3035     Register src_reg = as_Register($src$$reg);
 3036     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
 3037                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3038   %}
 3039 
 3040   // This encoding class is generated automatically from ad_encode.m4.
 3041   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3042   enc_class aarch64_enc_strw0(memory4 mem) %{
 3043     C2_MacroAssembler _masm(&cbuf);
 3044     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
 3045                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3046   %}
 3047 
 3048   // This encoding class is generated automatically from ad_encode.m4.
 3049   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3050   enc_class aarch64_enc_str(iRegL src, memory8 mem) %{
 3051     Register src_reg = as_Register($src$$reg);
 3052     // we sometimes get asked to store the stack pointer into the
 3053     // current thread -- we cannot do that directly on AArch64
 3054     if (src_reg == r31_sp) {
 3055       C2_MacroAssembler _masm(&cbuf);
 3056       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 3057       __ mov(rscratch2, sp);
 3058       src_reg = rscratch2;
 3059     }
 3060     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
 3061                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3062   %}
 3063 
 3064   // This encoding class is generated automatically from ad_encode.m4.
 3065   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3066   enc_class aarch64_enc_str0(memory8 mem) %{
 3067     C2_MacroAssembler _masm(&cbuf);
 3068     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
 3069                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3070   %}
 3071 
 3072   // This encoding class is generated automatically from ad_encode.m4.
 3073   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3074   enc_class aarch64_enc_strs(vRegF src, memory4 mem) %{
 3075     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3076     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
 3077                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3078   %}
 3079 
 3080   // This encoding class is generated automatically from ad_encode.m4.
 3081   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3082   enc_class aarch64_enc_strd(vRegD src, memory8 mem) %{
 3083     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3084     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
 3085                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3086   %}
 3087 
 3088   // This encoding class is generated automatically from ad_encode.m4.
 3089   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3090   enc_class aarch64_enc_strw_immn(immN src, memory1 mem) %{
 3091     C2_MacroAssembler _masm(&cbuf);
 3092     address con = (address)$src$$constant;
 3093     // need to do this the hard way until we can manage relocs
 3094     // for 32 bit constants
 3095     __ movoop(rscratch2, (jobject)con);
 3096     if (con) __ encode_heap_oop_not_null(rscratch2);
 3097     loadStore(_masm, &MacroAssembler::strw, rscratch2, $mem->opcode(),
 3098                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3099   %}
 3100 
 3101   // This encoding class is generated automatically from ad_encode.m4.
 3102   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3103   enc_class aarch64_enc_strw_immnk(immN src, memory4 mem) %{
 3104     C2_MacroAssembler _masm(&cbuf);
 3105     address con = (address)$src$$constant;
 3106     // need to do this the hard way until we can manage relocs
 3107     // for 32 bit constants
 3108     __ movoop(rscratch2, (jobject)con);
 3109     __ encode_klass_not_null(rscratch2);
 3110     loadStore(_masm, &MacroAssembler::strw, rscratch2, $mem->opcode(),
 3111                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3112   %}
 3113 
 3114   // This encoding class is generated automatically from ad_encode.m4.
 3115   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3116   enc_class aarch64_enc_strb0_ordered(memory4 mem) %{
 3117       C2_MacroAssembler _masm(&cbuf);
 3118       __ membar(Assembler::StoreStore);
 3119       loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
 3120                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 3121   %}
 3122 
 3123   // END Non-volatile memory access
 3124 
 3125   // Vector loads and stores
 3126   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
 3127     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3128     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
 3129        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3130   %}
 3131 
 3132   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
 3133     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3134     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
 3135        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3136   %}
 3137 
 3138   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
 3139     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3140     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
 3141        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3142   %}
 3143 
 3144   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
 3145     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3146     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
 3147        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3148   %}
 3149 
 3150   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
 3151     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3152     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
 3153        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3154   %}
 3155 
 3156   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
 3157     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3158     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
 3159        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3160   %}
 3161 
 3162   // volatile loads and stores
 3163 
 3164   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
 3165     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3166                  rscratch1, stlrb);
 3167   %}
 3168 
 3169   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
 3170     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3171                  rscratch1, stlrh);
 3172   %}
 3173 
 3174   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
 3175     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3176                  rscratch1, stlrw);
 3177   %}
 3178 
 3179 
 3180   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
 3181     Register dst_reg = as_Register($dst$$reg);
 3182     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3183              rscratch1, ldarb);
 3184     __ sxtbw(dst_reg, dst_reg);
 3185   %}
 3186 
 3187   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
 3188     Register dst_reg = as_Register($dst$$reg);
 3189     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3190              rscratch1, ldarb);
 3191     __ sxtb(dst_reg, dst_reg);
 3192   %}
 3193 
 3194   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
 3195     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3196              rscratch1, ldarb);
 3197   %}
 3198 
 3199   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
 3200     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3201              rscratch1, ldarb);
 3202   %}
 3203 
 3204   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
 3205     Register dst_reg = as_Register($dst$$reg);
 3206     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3207              rscratch1, ldarh);
 3208     __ sxthw(dst_reg, dst_reg);
 3209   %}
 3210 
 3211   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
 3212     Register dst_reg = as_Register($dst$$reg);
 3213     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3214              rscratch1, ldarh);
 3215     __ sxth(dst_reg, dst_reg);
 3216   %}
 3217 
 3218   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
 3219     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3220              rscratch1, ldarh);
 3221   %}
 3222 
 3223   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
 3224     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3225              rscratch1, ldarh);
 3226   %}
 3227 
 3228   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
 3229     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3230              rscratch1, ldarw);
 3231   %}
 3232 
 3233   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
 3234     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3235              rscratch1, ldarw);
 3236   %}
 3237 
 3238   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
 3239     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3240              rscratch1, ldar);
 3241   %}
 3242 
 3243   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
 3244     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3245              rscratch1, ldarw);
 3246     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
 3247   %}
 3248 
 3249   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
 3250     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3251              rscratch1, ldar);
 3252     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
 3253   %}
 3254 
 3255   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
 3256     Register src_reg = as_Register($src$$reg);
 3257     // we sometimes get asked to store the stack pointer into the
 3258     // current thread -- we cannot do that directly on AArch64
 3259     if (src_reg == r31_sp) {
 3260       C2_MacroAssembler _masm(&cbuf);
 3261       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 3262       __ mov(rscratch2, sp);
 3263       src_reg = rscratch2;
 3264     }
 3265     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3266                  rscratch1, stlr);
 3267   %}
 3268 
 3269   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
 3270     {
 3271       C2_MacroAssembler _masm(&cbuf);
 3272       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3273       __ fmovs(rscratch2, src_reg);
 3274     }
 3275     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3276                  rscratch1, stlrw);
 3277   %}
 3278 
 3279   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
 3280     {
 3281       C2_MacroAssembler _masm(&cbuf);
 3282       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3283       __ fmovd(rscratch2, src_reg);
 3284     }
 3285     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3286                  rscratch1, stlr);
 3287   %}
 3288 
 3289   // synchronized read/update encodings
 3290 
 3291   enc_class aarch64_enc_ldaxr(iRegL dst, memory8 mem) %{
 3292     C2_MacroAssembler _masm(&cbuf);
 3293     Register dst_reg = as_Register($dst$$reg);
 3294     Register base = as_Register($mem$$base);
 3295     int index = $mem$$index;
 3296     int scale = $mem$$scale;
 3297     int disp = $mem$$disp;
 3298     if (index == -1) {
 3299        if (disp != 0) {
 3300         __ lea(rscratch1, Address(base, disp));
 3301         __ ldaxr(dst_reg, rscratch1);
 3302       } else {
 3303         // TODO
 3304         // should we ever get anything other than this case?
 3305         __ ldaxr(dst_reg, base);
 3306       }
 3307     } else {
 3308       Register index_reg = as_Register(index);
 3309       if (disp == 0) {
 3310         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
 3311         __ ldaxr(dst_reg, rscratch1);
 3312       } else {
 3313         __ lea(rscratch1, Address(base, disp));
 3314         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
 3315         __ ldaxr(dst_reg, rscratch1);
 3316       }
 3317     }
 3318   %}
 3319 
 3320   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory8 mem) %{
 3321     C2_MacroAssembler _masm(&cbuf);
 3322     Register src_reg = as_Register($src$$reg);
 3323     Register base = as_Register($mem$$base);
 3324     int index = $mem$$index;
 3325     int scale = $mem$$scale;
 3326     int disp = $mem$$disp;
 3327     if (index == -1) {
 3328        if (disp != 0) {
 3329         __ lea(rscratch2, Address(base, disp));
 3330         __ stlxr(rscratch1, src_reg, rscratch2);
 3331       } else {
 3332         // TODO
 3333         // should we ever get anything other than this case?
 3334         __ stlxr(rscratch1, src_reg, base);
 3335       }
 3336     } else {
 3337       Register index_reg = as_Register(index);
 3338       if (disp == 0) {
 3339         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
 3340         __ stlxr(rscratch1, src_reg, rscratch2);
 3341       } else {
 3342         __ lea(rscratch2, Address(base, disp));
 3343         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
 3344         __ stlxr(rscratch1, src_reg, rscratch2);
 3345       }
 3346     }
 3347     __ cmpw(rscratch1, zr);
 3348   %}
 3349 
 3350   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 3351     C2_MacroAssembler _masm(&cbuf);
 3352     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3353     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3354                Assembler::xword, /*acquire*/ false, /*release*/ true,
 3355                /*weak*/ false, noreg);
 3356   %}
 3357 
 3358   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3359     C2_MacroAssembler _masm(&cbuf);
 3360     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3361     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3362                Assembler::word, /*acquire*/ false, /*release*/ true,
 3363                /*weak*/ false, noreg);
 3364   %}
 3365 
 3366   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3367     C2_MacroAssembler _masm(&cbuf);
 3368     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3369     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3370                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 3371                /*weak*/ false, noreg);
 3372   %}
 3373 
 3374   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3375     C2_MacroAssembler _masm(&cbuf);
 3376     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3377     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3378                Assembler::byte, /*acquire*/ false, /*release*/ true,
 3379                /*weak*/ false, noreg);
 3380   %}
 3381 
 3382 
 3383   // The only difference between aarch64_enc_cmpxchg and
 3384   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
 3385   // CompareAndSwap sequence to serve as a barrier on acquiring a
 3386   // lock.
 3387   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 3388     C2_MacroAssembler _masm(&cbuf);
 3389     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3390     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3391                Assembler::xword, /*acquire*/ true, /*release*/ true,
 3392                /*weak*/ false, noreg);
 3393   %}
 3394 
 3395   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3396     C2_MacroAssembler _masm(&cbuf);
 3397     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3398     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3399                Assembler::word, /*acquire*/ true, /*release*/ true,
 3400                /*weak*/ false, noreg);
 3401   %}
 3402 
 3403   enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3404     C2_MacroAssembler _masm(&cbuf);
 3405     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3406     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3407                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 3408                /*weak*/ false, noreg);
 3409   %}
 3410 
 3411   enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3412     C2_MacroAssembler _masm(&cbuf);
 3413     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3414     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3415                Assembler::byte, /*acquire*/ true, /*release*/ true,
 3416                /*weak*/ false, noreg);
 3417   %}
 3418 
 3419   // auxiliary used for CompareAndSwapX to set result register
 3420   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
 3421     C2_MacroAssembler _masm(&cbuf);
 3422     Register res_reg = as_Register($res$$reg);
 3423     __ cset(res_reg, Assembler::EQ);
 3424   %}
 3425 
 3426   // prefetch encodings
 3427 
 3428   enc_class aarch64_enc_prefetchw(memory mem) %{
 3429     C2_MacroAssembler _masm(&cbuf);
 3430     Register base = as_Register($mem$$base);
 3431     int index = $mem$$index;
 3432     int scale = $mem$$scale;
 3433     int disp = $mem$$disp;
 3434     if (index == -1) {
 3435       __ prfm(Address(base, disp), PSTL1KEEP);
 3436     } else {
 3437       Register index_reg = as_Register(index);
 3438       if (disp == 0) {
 3439         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3440       } else {
 3441         __ lea(rscratch1, Address(base, disp));
 3442 	__ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3443       }
 3444     }
 3445   %}
 3446 
 3447   /// mov envcodings
 3448 
 3449   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
 3450     C2_MacroAssembler _masm(&cbuf);
 3451     uint32_t con = (uint32_t)$src$$constant;
 3452     Register dst_reg = as_Register($dst$$reg);
 3453     if (con == 0) {
 3454       __ movw(dst_reg, zr);
 3455     } else {
 3456       __ movw(dst_reg, con);
 3457     }
 3458   %}
 3459 
 3460   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
 3461     C2_MacroAssembler _masm(&cbuf);
 3462     Register dst_reg = as_Register($dst$$reg);
 3463     uint64_t con = (uint64_t)$src$$constant;
 3464     if (con == 0) {
 3465       __ mov(dst_reg, zr);
 3466     } else {
 3467       __ mov(dst_reg, con);
 3468     }
 3469   %}
 3470 
 3471   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
 3472     C2_MacroAssembler _masm(&cbuf);
 3473     Register dst_reg = as_Register($dst$$reg);
 3474     address con = (address)$src$$constant;
 3475     if (con == NULL || con == (address)1) {
 3476       ShouldNotReachHere();
 3477     } else {
 3478       relocInfo::relocType rtype = $src->constant_reloc();
 3479       if (rtype == relocInfo::oop_type) {
 3480         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
 3481       } else if (rtype == relocInfo::metadata_type) {
 3482         __ mov_metadata(dst_reg, (Metadata*)con);
 3483       } else {
 3484         assert(rtype == relocInfo::none, "unexpected reloc type");
 3485         if (con < (address)(uintptr_t)os::vm_page_size()) {
 3486           __ mov(dst_reg, con);
 3487         } else {
 3488           uintptr_t offset;
 3489           __ adrp(dst_reg, con, offset);
 3490           __ add(dst_reg, dst_reg, offset);
 3491         }
 3492       }
 3493     }
 3494   %}
 3495 
 3496   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
 3497     C2_MacroAssembler _masm(&cbuf);
 3498     Register dst_reg = as_Register($dst$$reg);
 3499     __ mov(dst_reg, zr);
 3500   %}
 3501 
 3502   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
 3503     C2_MacroAssembler _masm(&cbuf);
 3504     Register dst_reg = as_Register($dst$$reg);
 3505     __ mov(dst_reg, (uint64_t)1);
 3506   %}
 3507 
 3508   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
 3509     C2_MacroAssembler _masm(&cbuf);
 3510     __ load_byte_map_base($dst$$Register);
 3511   %}
 3512 
 3513   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
 3514     C2_MacroAssembler _masm(&cbuf);
 3515     Register dst_reg = as_Register($dst$$reg);
 3516     address con = (address)$src$$constant;
 3517     if (con == NULL) {
 3518       ShouldNotReachHere();
 3519     } else {
 3520       relocInfo::relocType rtype = $src->constant_reloc();
 3521       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
 3522       __ set_narrow_oop(dst_reg, (jobject)con);
 3523     }
 3524   %}
 3525 
 3526   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
 3527     C2_MacroAssembler _masm(&cbuf);
 3528     Register dst_reg = as_Register($dst$$reg);
 3529     __ mov(dst_reg, zr);
 3530   %}
 3531 
 3532   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
 3533     C2_MacroAssembler _masm(&cbuf);
 3534     Register dst_reg = as_Register($dst$$reg);
 3535     address con = (address)$src$$constant;
 3536     if (con == NULL) {
 3537       ShouldNotReachHere();
 3538     } else {
 3539       relocInfo::relocType rtype = $src->constant_reloc();
 3540       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
 3541       __ set_narrow_klass(dst_reg, (Klass *)con);
 3542     }
 3543   %}
 3544 
 3545   // arithmetic encodings
 3546 
 3547   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
 3548     C2_MacroAssembler _masm(&cbuf);
 3549     Register dst_reg = as_Register($dst$$reg);
 3550     Register src_reg = as_Register($src1$$reg);
 3551     int32_t con = (int32_t)$src2$$constant;
 3552     // add has primary == 0, subtract has primary == 1
 3553     if ($primary) { con = -con; }
 3554     if (con < 0) {
 3555       __ subw(dst_reg, src_reg, -con);
 3556     } else {
 3557       __ addw(dst_reg, src_reg, con);
 3558     }
 3559   %}
 3560 
 3561   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
 3562     C2_MacroAssembler _masm(&cbuf);
 3563     Register dst_reg = as_Register($dst$$reg);
 3564     Register src_reg = as_Register($src1$$reg);
 3565     int32_t con = (int32_t)$src2$$constant;
 3566     // add has primary == 0, subtract has primary == 1
 3567     if ($primary) { con = -con; }
 3568     if (con < 0) {
 3569       __ sub(dst_reg, src_reg, -con);
 3570     } else {
 3571       __ add(dst_reg, src_reg, con);
 3572     }
 3573   %}
 3574 
 3575   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
 3576     C2_MacroAssembler _masm(&cbuf);
 3577    Register dst_reg = as_Register($dst$$reg);
 3578    Register src1_reg = as_Register($src1$$reg);
 3579    Register src2_reg = as_Register($src2$$reg);
 3580     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3581   %}
 3582 
 3583   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
 3584     C2_MacroAssembler _masm(&cbuf);
 3585    Register dst_reg = as_Register($dst$$reg);
 3586    Register src1_reg = as_Register($src1$$reg);
 3587    Register src2_reg = as_Register($src2$$reg);
 3588     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3589   %}
 3590 
 3591   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
 3592     C2_MacroAssembler _masm(&cbuf);
 3593    Register dst_reg = as_Register($dst$$reg);
 3594    Register src1_reg = as_Register($src1$$reg);
 3595    Register src2_reg = as_Register($src2$$reg);
 3596     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3597   %}
 3598 
 3599   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
 3600     C2_MacroAssembler _masm(&cbuf);
 3601    Register dst_reg = as_Register($dst$$reg);
 3602    Register src1_reg = as_Register($src1$$reg);
 3603    Register src2_reg = as_Register($src2$$reg);
 3604     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3605   %}
 3606 
 3607   // compare instruction encodings
 3608 
 3609   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
 3610     C2_MacroAssembler _masm(&cbuf);
 3611     Register reg1 = as_Register($src1$$reg);
 3612     Register reg2 = as_Register($src2$$reg);
 3613     __ cmpw(reg1, reg2);
 3614   %}
 3615 
 3616   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
 3617     C2_MacroAssembler _masm(&cbuf);
 3618     Register reg = as_Register($src1$$reg);
 3619     int32_t val = $src2$$constant;
 3620     if (val >= 0) {
 3621       __ subsw(zr, reg, val);
 3622     } else {
 3623       __ addsw(zr, reg, -val);
 3624     }
 3625   %}
 3626 
 3627   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
 3628     C2_MacroAssembler _masm(&cbuf);
 3629     Register reg1 = as_Register($src1$$reg);
 3630     uint32_t val = (uint32_t)$src2$$constant;
 3631     __ movw(rscratch1, val);
 3632     __ cmpw(reg1, rscratch1);
 3633   %}
 3634 
 3635   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
 3636     C2_MacroAssembler _masm(&cbuf);
 3637     Register reg1 = as_Register($src1$$reg);
 3638     Register reg2 = as_Register($src2$$reg);
 3639     __ cmp(reg1, reg2);
 3640   %}
 3641 
 3642   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
 3643     C2_MacroAssembler _masm(&cbuf);
 3644     Register reg = as_Register($src1$$reg);
 3645     int64_t val = $src2$$constant;
 3646     if (val >= 0) {
 3647       __ subs(zr, reg, val);
 3648     } else if (val != -val) {
 3649       __ adds(zr, reg, -val);
 3650     } else {
 3651     // aargh, Long.MIN_VALUE is a special case
 3652       __ orr(rscratch1, zr, (uint64_t)val);
 3653       __ subs(zr, reg, rscratch1);
 3654     }
 3655   %}
 3656 
 3657   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
 3658     C2_MacroAssembler _masm(&cbuf);
 3659     Register reg1 = as_Register($src1$$reg);
 3660     uint64_t val = (uint64_t)$src2$$constant;
 3661     __ mov(rscratch1, val);
 3662     __ cmp(reg1, rscratch1);
 3663   %}
 3664 
 3665   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
 3666     C2_MacroAssembler _masm(&cbuf);
 3667     Register reg1 = as_Register($src1$$reg);
 3668     Register reg2 = as_Register($src2$$reg);
 3669     __ cmp(reg1, reg2);
 3670   %}
 3671 
 3672   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
 3673     C2_MacroAssembler _masm(&cbuf);
 3674     Register reg1 = as_Register($src1$$reg);
 3675     Register reg2 = as_Register($src2$$reg);
 3676     __ cmpw(reg1, reg2);
 3677   %}
 3678 
 3679   enc_class aarch64_enc_testp(iRegP src) %{
 3680     C2_MacroAssembler _masm(&cbuf);
 3681     Register reg = as_Register($src$$reg);
 3682     __ cmp(reg, zr);
 3683   %}
 3684 
 3685   enc_class aarch64_enc_testn(iRegN src) %{
 3686     C2_MacroAssembler _masm(&cbuf);
 3687     Register reg = as_Register($src$$reg);
 3688     __ cmpw(reg, zr);
 3689   %}
 3690 
 3691   enc_class aarch64_enc_b(label lbl) %{
 3692     C2_MacroAssembler _masm(&cbuf);
 3693     Label *L = $lbl$$label;
 3694     __ b(*L);
 3695   %}
 3696 
 3697   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
 3698     C2_MacroAssembler _masm(&cbuf);
 3699     Label *L = $lbl$$label;
 3700     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3701   %}
 3702 
 3703   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
 3704     C2_MacroAssembler _masm(&cbuf);
 3705     Label *L = $lbl$$label;
 3706     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3707   %}
 3708 
 3709   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
 3710   %{
 3711      Register sub_reg = as_Register($sub$$reg);
 3712      Register super_reg = as_Register($super$$reg);
 3713      Register temp_reg = as_Register($temp$$reg);
 3714      Register result_reg = as_Register($result$$reg);
 3715 
 3716      Label miss;
 3717      C2_MacroAssembler _masm(&cbuf);
 3718      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
 3719                                      NULL, &miss,
 3720                                      /*set_cond_codes:*/ true);
 3721      if ($primary) {
 3722        __ mov(result_reg, zr);
 3723      }
 3724      __ bind(miss);
 3725   %}
 3726 
 3727   enc_class aarch64_enc_java_static_call(method meth) %{
 3728     C2_MacroAssembler _masm(&cbuf);
 3729 
 3730     address addr = (address)$meth$$method;
 3731     address call;
 3732     if (!_method) {
 3733       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
 3734       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
 3735     } else {
 3736       int method_index = resolved_method_index(cbuf);
 3737       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
 3738                                                   : static_call_Relocation::spec(method_index);
 3739       call = __ trampoline_call(Address(addr, rspec), &cbuf);
 3740 
 3741       // Emit stub for static call
 3742       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
 3743       if (stub == NULL) {
 3744         ciEnv::current()->record_failure("CodeCache is full");
 3745         return;
 3746       }
 3747     }
 3748     if (call == NULL) {
 3749       ciEnv::current()->record_failure("CodeCache is full");
 3750       return;
 3751     } else if (UseSVE > 0 && Compile::current()->max_vector_size() >= 16) {
 3752       // Only non uncommon_trap calls need to reinitialize ptrue.
 3753       if (uncommon_trap_request() == 0) {
 3754         __ reinitialize_ptrue();
 3755       }
 3756     }
 3757   %}
 3758 
 3759   enc_class aarch64_enc_java_dynamic_call(method meth) %{
 3760     C2_MacroAssembler _masm(&cbuf);
 3761     int method_index = resolved_method_index(cbuf);
 3762     address call = __ ic_call((address)$meth$$method, method_index);
 3763     if (call == NULL) {
 3764       ciEnv::current()->record_failure("CodeCache is full");
 3765       return;
 3766     } else if (UseSVE > 0 && Compile::current()->max_vector_size() >= 16) {
 3767       __ reinitialize_ptrue();
 3768     }
 3769   %}
 3770 
 3771   enc_class aarch64_enc_call_epilog() %{
 3772     C2_MacroAssembler _masm(&cbuf);
 3773     if (VerifyStackAtCalls) {
 3774       // Check that stack depth is unchanged: find majik cookie on stack
 3775       __ call_Unimplemented();
 3776     }
 3777   %}
 3778 
 3779   enc_class aarch64_enc_java_to_runtime(method meth) %{
 3780     C2_MacroAssembler _masm(&cbuf);
 3781 
 3782     // some calls to generated routines (arraycopy code) are scheduled
 3783     // by C2 as runtime calls. if so we can call them using a br (they
 3784     // will be in a reachable segment) otherwise we have to use a blr
 3785     // which loads the absolute address into a register.
 3786     address entry = (address)$meth$$method;
 3787     CodeBlob *cb = CodeCache::find_blob(entry);
 3788     if (cb) {
 3789       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
 3790       if (call == NULL) {
 3791         ciEnv::current()->record_failure("CodeCache is full");
 3792         return;
 3793       }
 3794     } else {
 3795       Label retaddr;
 3796       __ adr(rscratch2, retaddr);
 3797       __ lea(rscratch1, RuntimeAddress(entry));
 3798       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
 3799       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
 3800       __ blr(rscratch1);
 3801       __ bind(retaddr);
 3802       __ add(sp, sp, 2 * wordSize);
 3803     }
 3804     if (UseSVE > 0 && Compile::current()->max_vector_size() >= 16) {
 3805       __ reinitialize_ptrue();
 3806     }
 3807   %}
 3808 
 3809   enc_class aarch64_enc_rethrow() %{
 3810     C2_MacroAssembler _masm(&cbuf);
 3811     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
 3812   %}
 3813 
 3814   enc_class aarch64_enc_ret() %{
 3815     C2_MacroAssembler _masm(&cbuf);
 3816 #ifdef ASSERT
 3817     if (UseSVE > 0 && Compile::current()->max_vector_size() >= 16) {
 3818       __ verify_ptrue();
 3819     }
 3820 #endif
 3821     __ ret(lr);
 3822   %}
 3823 
 3824   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
 3825     C2_MacroAssembler _masm(&cbuf);
 3826     Register target_reg = as_Register($jump_target$$reg);
 3827     __ br(target_reg);
 3828   %}
 3829 
 3830   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
 3831     C2_MacroAssembler _masm(&cbuf);
 3832     Register target_reg = as_Register($jump_target$$reg);
 3833     // exception oop should be in r0
 3834     // ret addr has been popped into lr
 3835     // callee expects it in r3
 3836     __ mov(r3, lr);
 3837     __ br(target_reg);
 3838   %}
 3839 
 3840   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
 3841     C2_MacroAssembler _masm(&cbuf);
 3842     Register oop = as_Register($object$$reg);
 3843     Register box = as_Register($box$$reg);
 3844     Register disp_hdr = as_Register($tmp$$reg);
 3845     Register tmp = as_Register($tmp2$$reg);
 3846     Label cont;
 3847     Label object_has_monitor;
 3848     Label cas_failed;
 3849 
 3850     assert_different_registers(oop, box, tmp, disp_hdr);
 3851 
 3852     // Load markWord from object into displaced_header.
 3853     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
 3854 
 3855     if (UseBiasedLocking && !UseOptoBiasInlining) {
 3856       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
 3857     }
 3858 
 3859     // Check for existing monitor
 3860     __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
 3861 
 3862     // Set tmp to be (markWord of object | UNLOCK_VALUE).
 3863     __ orr(tmp, disp_hdr, markWord::unlocked_value);
 3864 
 3865     // Initialize the box. (Must happen before we update the object mark!)
 3866     __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 3867 
 3868     // Compare object markWord with an unlocked value (tmp) and if
 3869     // equal exchange the stack address of our box with object markWord.
 3870     // On failure disp_hdr contains the possibly locked markWord.
 3871     __ cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
 3872                /*release*/ true, /*weak*/ false, disp_hdr);
 3873     __ br(Assembler::EQ, cont);
 3874 
 3875     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
 3876 
 3877     // If the compare-and-exchange succeeded, then we found an unlocked
 3878     // object, will have now locked it will continue at label cont
 3879 
 3880     __ bind(cas_failed);
 3881     // We did not see an unlocked object so try the fast recursive case.
 3882 
 3883     // Check if the owner is self by comparing the value in the
 3884     // markWord of object (disp_hdr) with the stack pointer.
 3885     __ mov(rscratch1, sp);
 3886     __ sub(disp_hdr, disp_hdr, rscratch1);
 3887     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
 3888     // If condition is true we are cont and hence we can store 0 as the
 3889     // displaced header in the box, which indicates that it is a recursive lock.
 3890     __ ands(tmp/*==0?*/, disp_hdr, tmp);   // Sets flags for result
 3891     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 3892 
 3893     __ b(cont);
 3894 
 3895     // Handle existing monitor.
 3896     __ bind(object_has_monitor);
 3897 
 3898     // The object's monitor m is unlocked iff m->owner == NULL,
 3899     // otherwise m->owner may contain a thread or a stack address.
 3900     //
 3901     // Try to CAS m->owner from NULL to current thread.
 3902     __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markWord::monitor_value));
 3903     __ cmpxchg(tmp, zr, rthread, Assembler::xword, /*acquire*/ true,
 3904                /*release*/ true, /*weak*/ false, noreg); // Sets flags for result
 3905 
 3906     // Store a non-null value into the box to avoid looking like a re-entrant
 3907     // lock. The fast-path monitor unlock code checks for
 3908     // markWord::monitor_value so use markWord::unused_mark which has the
 3909     // relevant bit set, and also matches ObjectSynchronizer::enter.
 3910     __ mov(tmp, (address)markWord::unused_mark().value());
 3911     __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 3912 
 3913     __ bind(cont);
 3914     // flag == EQ indicates success
 3915     // flag == NE indicates failure
 3916   %}
 3917 
 3918   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
 3919     C2_MacroAssembler _masm(&cbuf);
 3920     Register oop = as_Register($object$$reg);
 3921     Register box = as_Register($box$$reg);
 3922     Register disp_hdr = as_Register($tmp$$reg);
 3923     Register tmp = as_Register($tmp2$$reg);
 3924     Label cont;
 3925     Label object_has_monitor;
 3926 
 3927     assert_different_registers(oop, box, tmp, disp_hdr);
 3928 
 3929     if (UseBiasedLocking && !UseOptoBiasInlining) {
 3930       __ biased_locking_exit(oop, tmp, cont);
 3931     }
 3932 
 3933     // Find the lock address and load the displaced header from the stack.
 3934     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 3935 
 3936     // If the displaced header is 0, we have a recursive unlock.
 3937     __ cmp(disp_hdr, zr);
 3938     __ br(Assembler::EQ, cont);
 3939 
 3940     // Handle existing monitor.
 3941     __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
 3942     __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
 3943 
 3944     // Check if it is still a light weight lock, this is is true if we
 3945     // see the stack address of the basicLock in the markWord of the
 3946     // object.
 3947 
 3948     __ cmpxchg(oop, box, disp_hdr, Assembler::xword, /*acquire*/ false,
 3949                /*release*/ true, /*weak*/ false, tmp);
 3950     __ b(cont);
 3951 
 3952     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
 3953 
 3954     // Handle existing monitor.
 3955     __ bind(object_has_monitor);
 3956     STATIC_ASSERT(markWord::monitor_value <= INT_MAX);
 3957     __ add(tmp, tmp, -(int)markWord::monitor_value); // monitor
 3958     __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
 3959     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
 3960     __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
 3961     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
 3962     __ cmp(rscratch1, zr); // Sets flags for result
 3963     __ br(Assembler::NE, cont);
 3964 
 3965     __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
 3966     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
 3967     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
 3968     __ cmp(rscratch1, zr); // Sets flags for result
 3969     __ cbnz(rscratch1, cont);
 3970     // need a release store here
 3971     __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
 3972     __ stlr(zr, tmp); // set unowned
 3973 
 3974     __ bind(cont);
 3975     // flag == EQ indicates success
 3976     // flag == NE indicates failure
 3977   %}
 3978 
 3979 %}
 3980 
 3981 //----------FRAME--------------------------------------------------------------
 3982 // Definition of frame structure and management information.
 3983 //
 3984 //  S T A C K   L A Y O U T    Allocators stack-slot number
 3985 //                             |   (to get allocators register number
 3986 //  G  Owned by    |        |  v    add OptoReg::stack0())
 3987 //  r   CALLER     |        |
 3988 //  o     |        +--------+      pad to even-align allocators stack-slot
 3989 //  w     V        |  pad0  |        numbers; owned by CALLER
 3990 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
 3991 //  h     ^        |   in   |  5
 3992 //        |        |  args  |  4   Holes in incoming args owned by SELF
 3993 //  |     |        |        |  3
 3994 //  |     |        +--------+
 3995 //  V     |        | old out|      Empty on Intel, window on Sparc
 3996 //        |    old |preserve|      Must be even aligned.
 3997 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
 3998 //        |        |   in   |  3   area for Intel ret address
 3999 //     Owned by    |preserve|      Empty on Sparc.
 4000 //       SELF      +--------+
 4001 //        |        |  pad2  |  2   pad to align old SP
 4002 //        |        +--------+  1
 4003 //        |        | locks  |  0
 4004 //        |        +--------+----> OptoReg::stack0(), even aligned
 4005 //        |        |  pad1  | 11   pad to align new SP
 4006 //        |        +--------+
 4007 //        |        |        | 10
 4008 //        |        | spills |  9   spills
 4009 //        V        |        |  8   (pad0 slot for callee)
 4010 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
 4011 //        ^        |  out   |  7
 4012 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
 4013 //     Owned by    +--------+
 4014 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
 4015 //        |    new |preserve|      Must be even-aligned.
 4016 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
 4017 //        |        |        |
 4018 //
 4019 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
 4020 //         known from SELF's arguments and the Java calling convention.
 4021 //         Region 6-7 is determined per call site.
 4022 // Note 2: If the calling convention leaves holes in the incoming argument
 4023 //         area, those holes are owned by SELF.  Holes in the outgoing area
 4024 //         are owned by the CALLEE.  Holes should not be nessecary in the
 4025 //         incoming area, as the Java calling convention is completely under
 4026 //         the control of the AD file.  Doubles can be sorted and packed to
 4027 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
 4028 //         varargs C calling conventions.
 4029 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
 4030 //         even aligned with pad0 as needed.
 4031 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
 4032 //           (the latter is true on Intel but is it false on AArch64?)
 4033 //         region 6-11 is even aligned; it may be padded out more so that
 4034 //         the region from SP to FP meets the minimum stack alignment.
 4035 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
 4036 //         alignment.  Region 11, pad1, may be dynamically extended so that
 4037 //         SP meets the minimum alignment.
 4038 
 4039 frame %{
 4040   // What direction does stack grow in (assumed to be same for C & Java)
 4041   stack_direction(TOWARDS_LOW);
 4042 
 4043   // These three registers define part of the calling convention
 4044   // between compiled code and the interpreter.
 4045 
 4046   // Inline Cache Register or Method for I2C.
 4047   inline_cache_reg(R12);
 4048 
 4049   // Method Oop Register when calling interpreter.
 4050   interpreter_method_oop_reg(R12);
 4051 
 4052   // Number of stack slots consumed by locking an object
 4053   sync_stack_slots(2);
 4054 
 4055   // Compiled code's Frame Pointer
 4056   frame_pointer(R31);
 4057 
 4058   // Interpreter stores its frame pointer in a register which is
 4059   // stored to the stack by I2CAdaptors.
 4060   // I2CAdaptors convert from interpreted java to compiled java.
 4061   interpreter_frame_pointer(R29);
 4062 
 4063   // Stack alignment requirement
 4064   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
 4065 
 4066   // Number of stack slots between incoming argument block and the start of
 4067   // a new frame.  The PROLOG must add this many slots to the stack.  The
 4068   // EPILOG must remove this many slots. aarch64 needs two slots for
 4069   // return address and fp.
 4070   // TODO think this is correct but check
 4071   in_preserve_stack_slots(4);
 4072 
 4073   // Number of outgoing stack slots killed above the out_preserve_stack_slots
 4074   // for calls to C.  Supports the var-args backing area for register parms.
 4075   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
 4076 
 4077   // The after-PROLOG location of the return address.  Location of
 4078   // return address specifies a type (REG or STACK) and a number
 4079   // representing the register number (i.e. - use a register name) or
 4080   // stack slot.
 4081   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
 4082   // Otherwise, it is above the locks and verification slot and alignment word
 4083   // TODO this may well be correct but need to check why that - 2 is there
 4084   // ppc port uses 0 but we definitely need to allow for fixed_slots
 4085   // which folds in the space used for monitors
 4086   return_addr(STACK - 2 +
 4087               align_up((Compile::current()->in_preserve_stack_slots() +
 4088                         Compile::current()->fixed_slots()),
 4089                        stack_alignment_in_slots()));
 4090 
 4091   // Body of function which returns an integer array locating
 4092   // arguments either in registers or in stack slots.  Passed an array
 4093   // of ideal registers called "sig" and a "length" count.  Stack-slot
 4094   // offsets are based on outgoing arguments, i.e. a CALLER setting up
 4095   // arguments for a CALLEE.  Incoming stack arguments are
 4096   // automatically biased by the preserve_stack_slots field above.
 4097 
 4098   calling_convention
 4099   %{
 4100     // No difference between ingoing/outgoing just pass false
 4101     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
 4102   %}
 4103 
 4104   c_calling_convention
 4105   %{
 4106     // This is obviously always outgoing
 4107     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
 4108   %}
 4109 
 4110   // Location of compiled Java return values.  Same as C for now.
 4111   return_value
 4112   %{
 4113     // TODO do we allow ideal_reg == Op_RegN???
 4114     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
 4115            "only return normal values");
 4116 
 4117     static const int lo[Op_RegL + 1] = { // enum name
 4118       0,                                 // Op_Node
 4119       0,                                 // Op_Set
 4120       R0_num,                            // Op_RegN
 4121       R0_num,                            // Op_RegI
 4122       R0_num,                            // Op_RegP
 4123       V0_num,                            // Op_RegF
 4124       V0_num,                            // Op_RegD
 4125       R0_num                             // Op_RegL
 4126     };
 4127 
 4128     static const int hi[Op_RegL + 1] = { // enum name
 4129       0,                                 // Op_Node
 4130       0,                                 // Op_Set
 4131       OptoReg::Bad,                      // Op_RegN
 4132       OptoReg::Bad,                      // Op_RegI
 4133       R0_H_num,                          // Op_RegP
 4134       OptoReg::Bad,                      // Op_RegF
 4135       V0_H_num,                          // Op_RegD
 4136       R0_H_num                           // Op_RegL
 4137     };
 4138 
 4139     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
 4140   %}
 4141 %}
 4142 
 4143 //----------ATTRIBUTES---------------------------------------------------------
 4144 //----------Operand Attributes-------------------------------------------------
 4145 op_attrib op_cost(1);        // Required cost attribute
 4146 
 4147 //----------Instruction Attributes---------------------------------------------
 4148 ins_attrib ins_cost(INSN_COST); // Required cost attribute
 4149 ins_attrib ins_size(32);        // Required size attribute (in bits)
 4150 ins_attrib ins_short_branch(0); // Required flag: is this instruction
 4151                                 // a non-matching short branch variant
 4152                                 // of some long branch?
 4153 ins_attrib ins_alignment(4);    // Required alignment attribute (must
 4154                                 // be a power of 2) specifies the
 4155                                 // alignment that some part of the
 4156                                 // instruction (not necessarily the
 4157                                 // start) requires.  If > 1, a
 4158                                 // compute_padding() function must be
 4159                                 // provided for the instruction
 4160 
 4161 //----------OPERANDS-----------------------------------------------------------
 4162 // Operand definitions must precede instruction definitions for correct parsing
 4163 // in the ADLC because operands constitute user defined types which are used in
 4164 // instruction definitions.
 4165 
 4166 //----------Simple Operands----------------------------------------------------
 4167 
 4168 // Integer operands 32 bit
 4169 // 32 bit immediate
 4170 operand immI()
 4171 %{
 4172   match(ConI);
 4173 
 4174   op_cost(0);
 4175   format %{ %}
 4176   interface(CONST_INTER);
 4177 %}
 4178 
 4179 // 32 bit zero
 4180 operand immI0()
 4181 %{
 4182   predicate(n->get_int() == 0);
 4183   match(ConI);
 4184 
 4185   op_cost(0);
 4186   format %{ %}
 4187   interface(CONST_INTER);
 4188 %}
 4189 
 4190 // 32 bit unit increment
 4191 operand immI_1()
 4192 %{
 4193   predicate(n->get_int() == 1);
 4194   match(ConI);
 4195 
 4196   op_cost(0);
 4197   format %{ %}
 4198   interface(CONST_INTER);
 4199 %}
 4200 
 4201 // 32 bit unit decrement
 4202 operand immI_M1()
 4203 %{
 4204   predicate(n->get_int() == -1);
 4205   match(ConI);
 4206 
 4207   op_cost(0);
 4208   format %{ %}
 4209   interface(CONST_INTER);
 4210 %}
 4211 
 4212 // Shift values for add/sub extension shift
 4213 operand immIExt()
 4214 %{
 4215   predicate(0 <= n->get_int() && (n->get_int() <= 4));
 4216   match(ConI);
 4217 
 4218   op_cost(0);
 4219   format %{ %}
 4220   interface(CONST_INTER);
 4221 %}
 4222 
 4223 operand immI_le_4()
 4224 %{
 4225   predicate(n->get_int() <= 4);
 4226   match(ConI);
 4227 
 4228   op_cost(0);
 4229   format %{ %}
 4230   interface(CONST_INTER);
 4231 %}
 4232 
 4233 operand immI_31()
 4234 %{
 4235   predicate(n->get_int() == 31);
 4236   match(ConI);
 4237 
 4238   op_cost(0);
 4239   format %{ %}
 4240   interface(CONST_INTER);
 4241 %}
 4242 
 4243 operand immI_8()
 4244 %{
 4245   predicate(n->get_int() == 8);
 4246   match(ConI);
 4247 
 4248   op_cost(0);
 4249   format %{ %}
 4250   interface(CONST_INTER);
 4251 %}
 4252 
 4253 operand immI_16()
 4254 %{
 4255   predicate(n->get_int() == 16);
 4256   match(ConI);
 4257 
 4258   op_cost(0);
 4259   format %{ %}
 4260   interface(CONST_INTER);
 4261 %}
 4262 
 4263 operand immI_24()
 4264 %{
 4265   predicate(n->get_int() == 24);
 4266   match(ConI);
 4267 
 4268   op_cost(0);
 4269   format %{ %}
 4270   interface(CONST_INTER);
 4271 %}
 4272 
 4273 operand immI_32()
 4274 %{
 4275   predicate(n->get_int() == 32);
 4276   match(ConI);
 4277 
 4278   op_cost(0);
 4279   format %{ %}
 4280   interface(CONST_INTER);
 4281 %}
 4282 
 4283 operand immI_48()
 4284 %{
 4285   predicate(n->get_int() == 48);
 4286   match(ConI);
 4287 
 4288   op_cost(0);
 4289   format %{ %}
 4290   interface(CONST_INTER);
 4291 %}
 4292 
 4293 operand immI_56()
 4294 %{
 4295   predicate(n->get_int() == 56);
 4296   match(ConI);
 4297 
 4298   op_cost(0);
 4299   format %{ %}
 4300   interface(CONST_INTER);
 4301 %}
 4302 
 4303 operand immI_63()
 4304 %{
 4305   predicate(n->get_int() == 63);
 4306   match(ConI);
 4307 
 4308   op_cost(0);
 4309   format %{ %}
 4310   interface(CONST_INTER);
 4311 %}
 4312 
 4313 operand immI_64()
 4314 %{
 4315   predicate(n->get_int() == 64);
 4316   match(ConI);
 4317 
 4318   op_cost(0);
 4319   format %{ %}
 4320   interface(CONST_INTER);
 4321 %}
 4322 
 4323 operand immI_255()
 4324 %{
 4325   predicate(n->get_int() == 255);
 4326   match(ConI);
 4327 
 4328   op_cost(0);
 4329   format %{ %}
 4330   interface(CONST_INTER);
 4331 %}
 4332 
 4333 operand immI_65535()
 4334 %{
 4335   predicate(n->get_int() == 65535);
 4336   match(ConI);
 4337 
 4338   op_cost(0);
 4339   format %{ %}
 4340   interface(CONST_INTER);
 4341 %}
 4342 
 4343 operand immL_255()
 4344 %{
 4345   predicate(n->get_long() == 255L);
 4346   match(ConL);
 4347 
 4348   op_cost(0);
 4349   format %{ %}
 4350   interface(CONST_INTER);
 4351 %}
 4352 
 4353 operand immL_65535()
 4354 %{
 4355   predicate(n->get_long() == 65535L);
 4356   match(ConL);
 4357 
 4358   op_cost(0);
 4359   format %{ %}
 4360   interface(CONST_INTER);
 4361 %}
 4362 
 4363 operand immL_4294967295()
 4364 %{
 4365   predicate(n->get_long() == 4294967295L);
 4366   match(ConL);
 4367 
 4368   op_cost(0);
 4369   format %{ %}
 4370   interface(CONST_INTER);
 4371 %}
 4372 
 4373 operand immL_bitmask()
 4374 %{
 4375   predicate((n->get_long() != 0)
 4376             && ((n->get_long() & 0xc000000000000000l) == 0)
 4377             && is_power_of_2(n->get_long() + 1));
 4378   match(ConL);
 4379 
 4380   op_cost(0);
 4381   format %{ %}
 4382   interface(CONST_INTER);
 4383 %}
 4384 
 4385 operand immI_bitmask()
 4386 %{
 4387   predicate((n->get_int() != 0)
 4388             && ((n->get_int() & 0xc0000000) == 0)
 4389             && is_power_of_2(n->get_int() + 1));
 4390   match(ConI);
 4391 
 4392   op_cost(0);
 4393   format %{ %}
 4394   interface(CONST_INTER);
 4395 %}
 4396 
 4397 operand immL_positive_bitmaskI()
 4398 %{
 4399   predicate((n->get_long() != 0)
 4400             && ((julong)n->get_long() < 0x80000000ULL)
 4401             && is_power_of_2(n->get_long() + 1));
 4402   match(ConL);
 4403 
 4404   op_cost(0);
 4405   format %{ %}
 4406   interface(CONST_INTER);
 4407 %}
 4408 
 4409 // Scale values for scaled offset addressing modes (up to long but not quad)
 4410 operand immIScale()
 4411 %{
 4412   predicate(0 <= n->get_int() && (n->get_int() <= 3));
 4413   match(ConI);
 4414 
 4415   op_cost(0);
 4416   format %{ %}
 4417   interface(CONST_INTER);
 4418 %}
 4419 
 4420 // 26 bit signed offset -- for pc-relative branches
 4421 operand immI26()
 4422 %{
 4423   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
 4424   match(ConI);
 4425 
 4426   op_cost(0);
 4427   format %{ %}
 4428   interface(CONST_INTER);
 4429 %}
 4430 
 4431 // 19 bit signed offset -- for pc-relative loads
 4432 operand immI19()
 4433 %{
 4434   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
 4435   match(ConI);
 4436 
 4437   op_cost(0);
 4438   format %{ %}
 4439   interface(CONST_INTER);
 4440 %}
 4441 
 4442 // 12 bit unsigned offset -- for base plus immediate loads
 4443 operand immIU12()
 4444 %{
 4445   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
 4446   match(ConI);
 4447 
 4448   op_cost(0);
 4449   format %{ %}
 4450   interface(CONST_INTER);
 4451 %}
 4452 
 4453 operand immLU12()
 4454 %{
 4455   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
 4456   match(ConL);
 4457 
 4458   op_cost(0);
 4459   format %{ %}
 4460   interface(CONST_INTER);
 4461 %}
 4462 
 4463 // Offset for scaled or unscaled immediate loads and stores
 4464 operand immIOffset()
 4465 %{
 4466   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4467   match(ConI);
 4468 
 4469   op_cost(0);
 4470   format %{ %}
 4471   interface(CONST_INTER);
 4472 %}
 4473 
 4474 operand immIOffset1()
 4475 %{
 4476   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4477   match(ConI);
 4478 
 4479   op_cost(0);
 4480   format %{ %}
 4481   interface(CONST_INTER);
 4482 %}
 4483 
 4484 operand immIOffset2()
 4485 %{
 4486   predicate(Address::offset_ok_for_immed(n->get_int(), 1));
 4487   match(ConI);
 4488 
 4489   op_cost(0);
 4490   format %{ %}
 4491   interface(CONST_INTER);
 4492 %}
 4493 
 4494 operand immIOffset4()
 4495 %{
 4496   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
 4497   match(ConI);
 4498 
 4499   op_cost(0);
 4500   format %{ %}
 4501   interface(CONST_INTER);
 4502 %}
 4503 
 4504 operand immIOffset8()
 4505 %{
 4506   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
 4507   match(ConI);
 4508 
 4509   op_cost(0);
 4510   format %{ %}
 4511   interface(CONST_INTER);
 4512 %}
 4513 
 4514 operand immIOffset16()
 4515 %{
 4516   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
 4517   match(ConI);
 4518 
 4519   op_cost(0);
 4520   format %{ %}
 4521   interface(CONST_INTER);
 4522 %}
 4523 
 4524 operand immLoffset()
 4525 %{
 4526   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
 4527   match(ConL);
 4528 
 4529   op_cost(0);
 4530   format %{ %}
 4531   interface(CONST_INTER);
 4532 %}
 4533 
 4534 operand immLoffset1()
 4535 %{
 4536   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
 4537   match(ConL);
 4538 
 4539   op_cost(0);
 4540   format %{ %}
 4541   interface(CONST_INTER);
 4542 %}
 4543 
 4544 operand immLoffset2()
 4545 %{
 4546   predicate(Address::offset_ok_for_immed(n->get_long(), 1));
 4547   match(ConL);
 4548 
 4549   op_cost(0);
 4550   format %{ %}
 4551   interface(CONST_INTER);
 4552 %}
 4553 
 4554 operand immLoffset4()
 4555 %{
 4556   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
 4557   match(ConL);
 4558 
 4559   op_cost(0);
 4560   format %{ %}
 4561   interface(CONST_INTER);
 4562 %}
 4563 
 4564 operand immLoffset8()
 4565 %{
 4566   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
 4567   match(ConL);
 4568 
 4569   op_cost(0);
 4570   format %{ %}
 4571   interface(CONST_INTER);
 4572 %}
 4573 
 4574 operand immLoffset16()
 4575 %{
 4576   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
 4577   match(ConL);
 4578 
 4579   op_cost(0);
 4580   format %{ %}
 4581   interface(CONST_INTER);
 4582 %}
 4583 
 4584 // 8 bit signed value.
 4585 operand immI8()
 4586 %{
 4587   predicate(n->get_int() <= 127 && n->get_int() >= -128);
 4588   match(ConI);
 4589 
 4590   op_cost(0);
 4591   format %{ %}
 4592   interface(CONST_INTER);
 4593 %}
 4594 
 4595 // 8 bit signed value (simm8), or #simm8 LSL 8.
 4596 operand immI8_shift8()
 4597 %{
 4598   predicate((n->get_int() <= 127 && n->get_int() >= -128) ||
 4599             (n->get_int() <= 32512 && n->get_int() >= -32768 && (n->get_int() & 0xff) == 0));
 4600   match(ConI);
 4601 
 4602   op_cost(0);
 4603   format %{ %}
 4604   interface(CONST_INTER);
 4605 %}
 4606 
 4607 // 8 bit signed value (simm8), or #simm8 LSL 8.
 4608 operand immL8_shift8()
 4609 %{
 4610   predicate((n->get_long() <= 127 && n->get_long() >= -128) ||
 4611             (n->get_long() <= 32512 && n->get_long() >= -32768 && (n->get_long() & 0xff) == 0));
 4612   match(ConL);
 4613 
 4614   op_cost(0);
 4615   format %{ %}
 4616   interface(CONST_INTER);
 4617 %}
 4618 
 4619 // 32 bit integer valid for add sub immediate
 4620 operand immIAddSub()
 4621 %{
 4622   predicate(Assembler::operand_valid_for_add_sub_immediate((int64_t)n->get_int()));
 4623   match(ConI);
 4624   op_cost(0);
 4625   format %{ %}
 4626   interface(CONST_INTER);
 4627 %}
 4628 
 4629 // 32 bit unsigned integer valid for logical immediate
 4630 // TODO -- check this is right when e.g the mask is 0x80000000
 4631 operand immILog()
 4632 %{
 4633   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (uint64_t)n->get_int()));
 4634   match(ConI);
 4635 
 4636   op_cost(0);
 4637   format %{ %}
 4638   interface(CONST_INTER);
 4639 %}
 4640 
 4641 // Integer operands 64 bit
 4642 // 64 bit immediate
 4643 operand immL()
 4644 %{
 4645   match(ConL);
 4646 
 4647   op_cost(0);
 4648   format %{ %}
 4649   interface(CONST_INTER);
 4650 %}
 4651 
 4652 // 64 bit zero
 4653 operand immL0()
 4654 %{
 4655   predicate(n->get_long() == 0);
 4656   match(ConL);
 4657 
 4658   op_cost(0);
 4659   format %{ %}
 4660   interface(CONST_INTER);
 4661 %}
 4662 
 4663 // 64 bit unit increment
 4664 operand immL_1()
 4665 %{
 4666   predicate(n->get_long() == 1);
 4667   match(ConL);
 4668 
 4669   op_cost(0);
 4670   format %{ %}
 4671   interface(CONST_INTER);
 4672 %}
 4673 
 4674 // 64 bit unit decrement
 4675 operand immL_M1()
 4676 %{
 4677   predicate(n->get_long() == -1);
 4678   match(ConL);
 4679 
 4680   op_cost(0);
 4681   format %{ %}
 4682   interface(CONST_INTER);
 4683 %}
 4684 
 4685 // 32 bit offset of pc in thread anchor
 4686 
 4687 operand immL_pc_off()
 4688 %{
 4689   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
 4690                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
 4691   match(ConL);
 4692 
 4693   op_cost(0);
 4694   format %{ %}
 4695   interface(CONST_INTER);
 4696 %}
 4697 
 4698 // 64 bit integer valid for add sub immediate
 4699 operand immLAddSub()
 4700 %{
 4701   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
 4702   match(ConL);
 4703   op_cost(0);
 4704   format %{ %}
 4705   interface(CONST_INTER);
 4706 %}
 4707 
 4708 // 64 bit integer valid for logical immediate
 4709 operand immLLog()
 4710 %{
 4711   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (uint64_t)n->get_long()));
 4712   match(ConL);
 4713   op_cost(0);
 4714   format %{ %}
 4715   interface(CONST_INTER);
 4716 %}
 4717 
 4718 // Long Immediate: low 32-bit mask
 4719 operand immL_32bits()
 4720 %{
 4721   predicate(n->get_long() == 0xFFFFFFFFL);
 4722   match(ConL);
 4723   op_cost(0);
 4724   format %{ %}
 4725   interface(CONST_INTER);
 4726 %}
 4727 
 4728 // Pointer operands
 4729 // Pointer Immediate
 4730 operand immP()
 4731 %{
 4732   match(ConP);
 4733 
 4734   op_cost(0);
 4735   format %{ %}
 4736   interface(CONST_INTER);
 4737 %}
 4738 
 4739 // NULL Pointer Immediate
 4740 operand immP0()
 4741 %{
 4742   predicate(n->get_ptr() == 0);
 4743   match(ConP);
 4744 
 4745   op_cost(0);
 4746   format %{ %}
 4747   interface(CONST_INTER);
 4748 %}
 4749 
 4750 // Pointer Immediate One
 4751 // this is used in object initialization (initial object header)
 4752 operand immP_1()
 4753 %{
 4754   predicate(n->get_ptr() == 1);
 4755   match(ConP);
 4756 
 4757   op_cost(0);
 4758   format %{ %}
 4759   interface(CONST_INTER);
 4760 %}
 4761 
 4762 // Card Table Byte Map Base
 4763 operand immByteMapBase()
 4764 %{
 4765   // Get base of card map
 4766   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
 4767             (CardTable::CardValue*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
 4768   match(ConP);
 4769 
 4770   op_cost(0);
 4771   format %{ %}
 4772   interface(CONST_INTER);
 4773 %}
 4774 
 4775 // Pointer Immediate Minus One
 4776 // this is used when we want to write the current PC to the thread anchor
 4777 operand immP_M1()
 4778 %{
 4779   predicate(n->get_ptr() == -1);
 4780   match(ConP);
 4781 
 4782   op_cost(0);
 4783   format %{ %}
 4784   interface(CONST_INTER);
 4785 %}
 4786 
 4787 // Pointer Immediate Minus Two
 4788 // this is used when we want to write the current PC to the thread anchor
 4789 operand immP_M2()
 4790 %{
 4791   predicate(n->get_ptr() == -2);
 4792   match(ConP);
 4793 
 4794   op_cost(0);
 4795   format %{ %}
 4796   interface(CONST_INTER);
 4797 %}
 4798 
 4799 // Float and Double operands
 4800 // Double Immediate
 4801 operand immD()
 4802 %{
 4803   match(ConD);
 4804   op_cost(0);
 4805   format %{ %}
 4806   interface(CONST_INTER);
 4807 %}
 4808 
 4809 // Double Immediate: +0.0d
 4810 operand immD0()
 4811 %{
 4812   predicate(jlong_cast(n->getd()) == 0);
 4813   match(ConD);
 4814 
 4815   op_cost(0);
 4816   format %{ %}
 4817   interface(CONST_INTER);
 4818 %}
 4819 
 4820 // constant 'double +0.0'.
 4821 operand immDPacked()
 4822 %{
 4823   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
 4824   match(ConD);
 4825   op_cost(0);
 4826   format %{ %}
 4827   interface(CONST_INTER);
 4828 %}
 4829 
 4830 // Float Immediate
 4831 operand immF()
 4832 %{
 4833   match(ConF);
 4834   op_cost(0);
 4835   format %{ %}
 4836   interface(CONST_INTER);
 4837 %}
 4838 
 4839 // Float Immediate: +0.0f.
 4840 operand immF0()
 4841 %{
 4842   predicate(jint_cast(n->getf()) == 0);
 4843   match(ConF);
 4844 
 4845   op_cost(0);
 4846   format %{ %}
 4847   interface(CONST_INTER);
 4848 %}
 4849 
 4850 //
 4851 operand immFPacked()
 4852 %{
 4853   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
 4854   match(ConF);
 4855   op_cost(0);
 4856   format %{ %}
 4857   interface(CONST_INTER);
 4858 %}
 4859 
 4860 // Narrow pointer operands
 4861 // Narrow Pointer Immediate
 4862 operand immN()
 4863 %{
 4864   match(ConN);
 4865 
 4866   op_cost(0);
 4867   format %{ %}
 4868   interface(CONST_INTER);
 4869 %}
 4870 
 4871 // Narrow NULL Pointer Immediate
 4872 operand immN0()
 4873 %{
 4874   predicate(n->get_narrowcon() == 0);
 4875   match(ConN);
 4876 
 4877   op_cost(0);
 4878   format %{ %}
 4879   interface(CONST_INTER);
 4880 %}
 4881 
 4882 operand immNKlass()
 4883 %{
 4884   match(ConNKlass);
 4885 
 4886   op_cost(0);
 4887   format %{ %}
 4888   interface(CONST_INTER);
 4889 %}
 4890 
 4891 // Integer 32 bit Register Operands
 4892 // Integer 32 bitRegister (excludes SP)
 4893 operand iRegI()
 4894 %{
 4895   constraint(ALLOC_IN_RC(any_reg32));
 4896   match(RegI);
 4897   match(iRegINoSp);
 4898   op_cost(0);
 4899   format %{ %}
 4900   interface(REG_INTER);
 4901 %}
 4902 
 4903 // Integer 32 bit Register not Special
 4904 operand iRegINoSp()
 4905 %{
 4906   constraint(ALLOC_IN_RC(no_special_reg32));
 4907   match(RegI);
 4908   op_cost(0);
 4909   format %{ %}
 4910   interface(REG_INTER);
 4911 %}
 4912 
 4913 // Integer 64 bit Register Operands
 4914 // Integer 64 bit Register (includes SP)
 4915 operand iRegL()
 4916 %{
 4917   constraint(ALLOC_IN_RC(any_reg));
 4918   match(RegL);
 4919   match(iRegLNoSp);
 4920   op_cost(0);
 4921   format %{ %}
 4922   interface(REG_INTER);
 4923 %}
 4924 
 4925 // Integer 64 bit Register not Special
 4926 operand iRegLNoSp()
 4927 %{
 4928   constraint(ALLOC_IN_RC(no_special_reg));
 4929   match(RegL);
 4930   match(iRegL_R0);
 4931   format %{ %}
 4932   interface(REG_INTER);
 4933 %}
 4934 
 4935 // Pointer Register Operands
 4936 // Pointer Register
 4937 operand iRegP()
 4938 %{
 4939   constraint(ALLOC_IN_RC(ptr_reg));
 4940   match(RegP);
 4941   match(iRegPNoSp);
 4942   match(iRegP_R0);
 4943   //match(iRegP_R2);
 4944   //match(iRegP_R4);
 4945   //match(iRegP_R5);
 4946   match(thread_RegP);
 4947   op_cost(0);
 4948   format %{ %}
 4949   interface(REG_INTER);
 4950 %}
 4951 
 4952 // Pointer 64 bit Register not Special
 4953 operand iRegPNoSp()
 4954 %{
 4955   constraint(ALLOC_IN_RC(no_special_ptr_reg));
 4956   match(RegP);
 4957   // match(iRegP);
 4958   // match(iRegP_R0);
 4959   // match(iRegP_R2);
 4960   // match(iRegP_R4);
 4961   // match(iRegP_R5);
 4962   // match(thread_RegP);
 4963   op_cost(0);
 4964   format %{ %}
 4965   interface(REG_INTER);
 4966 %}
 4967 
 4968 // Pointer 64 bit Register R0 only
 4969 operand iRegP_R0()
 4970 %{
 4971   constraint(ALLOC_IN_RC(r0_reg));
 4972   match(RegP);
 4973   // match(iRegP);
 4974   match(iRegPNoSp);
 4975   op_cost(0);
 4976   format %{ %}
 4977   interface(REG_INTER);
 4978 %}
 4979 
 4980 // Pointer 64 bit Register R1 only
 4981 operand iRegP_R1()
 4982 %{
 4983   constraint(ALLOC_IN_RC(r1_reg));
 4984   match(RegP);
 4985   // match(iRegP);
 4986   match(iRegPNoSp);
 4987   op_cost(0);
 4988   format %{ %}
 4989   interface(REG_INTER);
 4990 %}
 4991 
 4992 // Pointer 64 bit Register R2 only
 4993 operand iRegP_R2()
 4994 %{
 4995   constraint(ALLOC_IN_RC(r2_reg));
 4996   match(RegP);
 4997   // match(iRegP);
 4998   match(iRegPNoSp);
 4999   op_cost(0);
 5000   format %{ %}
 5001   interface(REG_INTER);
 5002 %}
 5003 
 5004 // Pointer 64 bit Register R3 only
 5005 operand iRegP_R3()
 5006 %{
 5007   constraint(ALLOC_IN_RC(r3_reg));
 5008   match(RegP);
 5009   // match(iRegP);
 5010   match(iRegPNoSp);
 5011   op_cost(0);
 5012   format %{ %}
 5013   interface(REG_INTER);
 5014 %}
 5015 
 5016 // Pointer 64 bit Register R4 only
 5017 operand iRegP_R4()
 5018 %{
 5019   constraint(ALLOC_IN_RC(r4_reg));
 5020   match(RegP);
 5021   // match(iRegP);
 5022   match(iRegPNoSp);
 5023   op_cost(0);
 5024   format %{ %}
 5025   interface(REG_INTER);
 5026 %}
 5027 
 5028 // Pointer 64 bit Register R5 only
 5029 operand iRegP_R5()
 5030 %{
 5031   constraint(ALLOC_IN_RC(r5_reg));
 5032   match(RegP);
 5033   // match(iRegP);
 5034   match(iRegPNoSp);
 5035   op_cost(0);
 5036   format %{ %}
 5037   interface(REG_INTER);
 5038 %}
 5039 
 5040 // Pointer 64 bit Register R10 only
 5041 operand iRegP_R10()
 5042 %{
 5043   constraint(ALLOC_IN_RC(r10_reg));
 5044   match(RegP);
 5045   // match(iRegP);
 5046   match(iRegPNoSp);
 5047   op_cost(0);
 5048   format %{ %}
 5049   interface(REG_INTER);
 5050 %}
 5051 
 5052 // Long 64 bit Register R0 only
 5053 operand iRegL_R0()
 5054 %{
 5055   constraint(ALLOC_IN_RC(r0_reg));
 5056   match(RegL);
 5057   match(iRegLNoSp);
 5058   op_cost(0);
 5059   format %{ %}
 5060   interface(REG_INTER);
 5061 %}
 5062 
 5063 // Long 64 bit Register R2 only
 5064 operand iRegL_R2()
 5065 %{
 5066   constraint(ALLOC_IN_RC(r2_reg));
 5067   match(RegL);
 5068   match(iRegLNoSp);
 5069   op_cost(0);
 5070   format %{ %}
 5071   interface(REG_INTER);
 5072 %}
 5073 
 5074 // Long 64 bit Register R3 only
 5075 operand iRegL_R3()
 5076 %{
 5077   constraint(ALLOC_IN_RC(r3_reg));
 5078   match(RegL);
 5079   match(iRegLNoSp);
 5080   op_cost(0);
 5081   format %{ %}
 5082   interface(REG_INTER);
 5083 %}
 5084 
 5085 // Long 64 bit Register R11 only
 5086 operand iRegL_R11()
 5087 %{
 5088   constraint(ALLOC_IN_RC(r11_reg));
 5089   match(RegL);
 5090   match(iRegLNoSp);
 5091   op_cost(0);
 5092   format %{ %}
 5093   interface(REG_INTER);
 5094 %}
 5095 
 5096 // Pointer 64 bit Register FP only
 5097 operand iRegP_FP()
 5098 %{
 5099   constraint(ALLOC_IN_RC(fp_reg));
 5100   match(RegP);
 5101   // match(iRegP);
 5102   op_cost(0);
 5103   format %{ %}
 5104   interface(REG_INTER);
 5105 %}
 5106 
 5107 // Register R0 only
 5108 operand iRegI_R0()
 5109 %{
 5110   constraint(ALLOC_IN_RC(int_r0_reg));
 5111   match(RegI);
 5112   match(iRegINoSp);
 5113   op_cost(0);
 5114   format %{ %}
 5115   interface(REG_INTER);
 5116 %}
 5117 
 5118 // Register R2 only
 5119 operand iRegI_R2()
 5120 %{
 5121   constraint(ALLOC_IN_RC(int_r2_reg));
 5122   match(RegI);
 5123   match(iRegINoSp);
 5124   op_cost(0);
 5125   format %{ %}
 5126   interface(REG_INTER);
 5127 %}
 5128 
 5129 // Register R3 only
 5130 operand iRegI_R3()
 5131 %{
 5132   constraint(ALLOC_IN_RC(int_r3_reg));
 5133   match(RegI);
 5134   match(iRegINoSp);
 5135   op_cost(0);
 5136   format %{ %}
 5137   interface(REG_INTER);
 5138 %}
 5139 
 5140 
 5141 // Register R4 only
 5142 operand iRegI_R4()
 5143 %{
 5144   constraint(ALLOC_IN_RC(int_r4_reg));
 5145   match(RegI);
 5146   match(iRegINoSp);
 5147   op_cost(0);
 5148   format %{ %}
 5149   interface(REG_INTER);
 5150 %}
 5151 
 5152 
 5153 // Pointer Register Operands
 5154 // Narrow Pointer Register
 5155 operand iRegN()
 5156 %{
 5157   constraint(ALLOC_IN_RC(any_reg32));
 5158   match(RegN);
 5159   match(iRegNNoSp);
 5160   op_cost(0);
 5161   format %{ %}
 5162   interface(REG_INTER);
 5163 %}
 5164 
 5165 operand iRegN_R0()
 5166 %{
 5167   constraint(ALLOC_IN_RC(r0_reg));
 5168   match(iRegN);
 5169   op_cost(0);
 5170   format %{ %}
 5171   interface(REG_INTER);
 5172 %}
 5173 
 5174 operand iRegN_R2()
 5175 %{
 5176   constraint(ALLOC_IN_RC(r2_reg));
 5177   match(iRegN);
 5178   op_cost(0);
 5179   format %{ %}
 5180   interface(REG_INTER);
 5181 %}
 5182 
 5183 operand iRegN_R3()
 5184 %{
 5185   constraint(ALLOC_IN_RC(r3_reg));
 5186   match(iRegN);
 5187   op_cost(0);
 5188   format %{ %}
 5189   interface(REG_INTER);
 5190 %}
 5191 
 5192 // Integer 64 bit Register not Special
 5193 operand iRegNNoSp()
 5194 %{
 5195   constraint(ALLOC_IN_RC(no_special_reg32));
 5196   match(RegN);
 5197   op_cost(0);
 5198   format %{ %}
 5199   interface(REG_INTER);
 5200 %}
 5201 
 5202 // heap base register -- used for encoding immN0
 5203 
 5204 operand iRegIHeapbase()
 5205 %{
 5206   constraint(ALLOC_IN_RC(heapbase_reg));
 5207   match(RegI);
 5208   op_cost(0);
 5209   format %{ %}
 5210   interface(REG_INTER);
 5211 %}
 5212 
 5213 // Float Register
 5214 // Float register operands
 5215 operand vRegF()
 5216 %{
 5217   constraint(ALLOC_IN_RC(float_reg));
 5218   match(RegF);
 5219 
 5220   op_cost(0);
 5221   format %{ %}
 5222   interface(REG_INTER);
 5223 %}
 5224 
 5225 // Double Register
 5226 // Double register operands
 5227 operand vRegD()
 5228 %{
 5229   constraint(ALLOC_IN_RC(double_reg));
 5230   match(RegD);
 5231 
 5232   op_cost(0);
 5233   format %{ %}
 5234   interface(REG_INTER);
 5235 %}
 5236 
 5237 operand vecA()
 5238 %{
 5239   constraint(ALLOC_IN_RC(vectora_reg));
 5240   match(VecA);
 5241   op_cost(0);
 5242   format %{ %}
 5243   interface(REG_INTER);
 5244 %}
 5245 
 5246 operand vecD()
 5247 %{
 5248   constraint(ALLOC_IN_RC(vectord_reg));
 5249   match(VecD);
 5250 
 5251   op_cost(0);
 5252   format %{ %}
 5253   interface(REG_INTER);
 5254 %}
 5255 
 5256 operand vecX()
 5257 %{
 5258   constraint(ALLOC_IN_RC(vectorx_reg));
 5259   match(VecX);
 5260 
 5261   op_cost(0);
 5262   format %{ %}
 5263   interface(REG_INTER);
 5264 %}
 5265 
 5266 operand vRegD_V0()
 5267 %{
 5268   constraint(ALLOC_IN_RC(v0_reg));
 5269   match(RegD);
 5270   op_cost(0);
 5271   format %{ %}
 5272   interface(REG_INTER);
 5273 %}
 5274 
 5275 operand vRegD_V1()
 5276 %{
 5277   constraint(ALLOC_IN_RC(v1_reg));
 5278   match(RegD);
 5279   op_cost(0);
 5280   format %{ %}
 5281   interface(REG_INTER);
 5282 %}
 5283 
 5284 operand vRegD_V2()
 5285 %{
 5286   constraint(ALLOC_IN_RC(v2_reg));
 5287   match(RegD);
 5288   op_cost(0);
 5289   format %{ %}
 5290   interface(REG_INTER);
 5291 %}
 5292 
 5293 operand vRegD_V3()
 5294 %{
 5295   constraint(ALLOC_IN_RC(v3_reg));
 5296   match(RegD);
 5297   op_cost(0);
 5298   format %{ %}
 5299   interface(REG_INTER);
 5300 %}
 5301 
 5302 operand vRegD_V4()
 5303 %{
 5304   constraint(ALLOC_IN_RC(v4_reg));
 5305   match(RegD);
 5306   op_cost(0);
 5307   format %{ %}
 5308   interface(REG_INTER);
 5309 %}
 5310 
 5311 operand vRegD_V5()
 5312 %{
 5313   constraint(ALLOC_IN_RC(v5_reg));
 5314   match(RegD);
 5315   op_cost(0);
 5316   format %{ %}
 5317   interface(REG_INTER);
 5318 %}
 5319 
 5320 operand vRegD_V6()
 5321 %{
 5322   constraint(ALLOC_IN_RC(v6_reg));
 5323   match(RegD);
 5324   op_cost(0);
 5325   format %{ %}
 5326   interface(REG_INTER);
 5327 %}
 5328 
 5329 operand vRegD_V7()
 5330 %{
 5331   constraint(ALLOC_IN_RC(v7_reg));
 5332   match(RegD);
 5333   op_cost(0);
 5334   format %{ %}
 5335   interface(REG_INTER);
 5336 %}
 5337 
 5338 operand vRegD_V8()
 5339 %{
 5340   constraint(ALLOC_IN_RC(v8_reg));
 5341   match(RegD);
 5342   op_cost(0);
 5343   format %{ %}
 5344   interface(REG_INTER);
 5345 %}
 5346 
 5347 operand vRegD_V9()
 5348 %{
 5349   constraint(ALLOC_IN_RC(v9_reg));
 5350   match(RegD);
 5351   op_cost(0);
 5352   format %{ %}
 5353   interface(REG_INTER);
 5354 %}
 5355 
 5356 operand vRegD_V10()
 5357 %{
 5358   constraint(ALLOC_IN_RC(v10_reg));
 5359   match(RegD);
 5360   op_cost(0);
 5361   format %{ %}
 5362   interface(REG_INTER);
 5363 %}
 5364 
 5365 operand vRegD_V11()
 5366 %{
 5367   constraint(ALLOC_IN_RC(v11_reg));
 5368   match(RegD);
 5369   op_cost(0);
 5370   format %{ %}
 5371   interface(REG_INTER);
 5372 %}
 5373 
 5374 operand vRegD_V12()
 5375 %{
 5376   constraint(ALLOC_IN_RC(v12_reg));
 5377   match(RegD);
 5378   op_cost(0);
 5379   format %{ %}
 5380   interface(REG_INTER);
 5381 %}
 5382 
 5383 operand vRegD_V13()
 5384 %{
 5385   constraint(ALLOC_IN_RC(v13_reg));
 5386   match(RegD);
 5387   op_cost(0);
 5388   format %{ %}
 5389   interface(REG_INTER);
 5390 %}
 5391 
 5392 operand vRegD_V14()
 5393 %{
 5394   constraint(ALLOC_IN_RC(v14_reg));
 5395   match(RegD);
 5396   op_cost(0);
 5397   format %{ %}
 5398   interface(REG_INTER);
 5399 %}
 5400 
 5401 operand vRegD_V15()
 5402 %{
 5403   constraint(ALLOC_IN_RC(v15_reg));
 5404   match(RegD);
 5405   op_cost(0);
 5406   format %{ %}
 5407   interface(REG_INTER);
 5408 %}
 5409 
 5410 operand vRegD_V16()
 5411 %{
 5412   constraint(ALLOC_IN_RC(v16_reg));
 5413   match(RegD);
 5414   op_cost(0);
 5415   format %{ %}
 5416   interface(REG_INTER);
 5417 %}
 5418 
 5419 operand vRegD_V17()
 5420 %{
 5421   constraint(ALLOC_IN_RC(v17_reg));
 5422   match(RegD);
 5423   op_cost(0);
 5424   format %{ %}
 5425   interface(REG_INTER);
 5426 %}
 5427 
 5428 operand vRegD_V18()
 5429 %{
 5430   constraint(ALLOC_IN_RC(v18_reg));
 5431   match(RegD);
 5432   op_cost(0);
 5433   format %{ %}
 5434   interface(REG_INTER);
 5435 %}
 5436 
 5437 operand vRegD_V19()
 5438 %{
 5439   constraint(ALLOC_IN_RC(v19_reg));
 5440   match(RegD);
 5441   op_cost(0);
 5442   format %{ %}
 5443   interface(REG_INTER);
 5444 %}
 5445 
 5446 operand vRegD_V20()
 5447 %{
 5448   constraint(ALLOC_IN_RC(v20_reg));
 5449   match(RegD);
 5450   op_cost(0);
 5451   format %{ %}
 5452   interface(REG_INTER);
 5453 %}
 5454 
 5455 operand vRegD_V21()
 5456 %{
 5457   constraint(ALLOC_IN_RC(v21_reg));
 5458   match(RegD);
 5459   op_cost(0);
 5460   format %{ %}
 5461   interface(REG_INTER);
 5462 %}
 5463 
 5464 operand vRegD_V22()
 5465 %{
 5466   constraint(ALLOC_IN_RC(v22_reg));
 5467   match(RegD);
 5468   op_cost(0);
 5469   format %{ %}
 5470   interface(REG_INTER);
 5471 %}
 5472 
 5473 operand vRegD_V23()
 5474 %{
 5475   constraint(ALLOC_IN_RC(v23_reg));
 5476   match(RegD);
 5477   op_cost(0);
 5478   format %{ %}
 5479   interface(REG_INTER);
 5480 %}
 5481 
 5482 operand vRegD_V24()
 5483 %{
 5484   constraint(ALLOC_IN_RC(v24_reg));
 5485   match(RegD);
 5486   op_cost(0);
 5487   format %{ %}
 5488   interface(REG_INTER);
 5489 %}
 5490 
 5491 operand vRegD_V25()
 5492 %{
 5493   constraint(ALLOC_IN_RC(v25_reg));
 5494   match(RegD);
 5495   op_cost(0);
 5496   format %{ %}
 5497   interface(REG_INTER);
 5498 %}
 5499 
 5500 operand vRegD_V26()
 5501 %{
 5502   constraint(ALLOC_IN_RC(v26_reg));
 5503   match(RegD);
 5504   op_cost(0);
 5505   format %{ %}
 5506   interface(REG_INTER);
 5507 %}
 5508 
 5509 operand vRegD_V27()
 5510 %{
 5511   constraint(ALLOC_IN_RC(v27_reg));
 5512   match(RegD);
 5513   op_cost(0);
 5514   format %{ %}
 5515   interface(REG_INTER);
 5516 %}
 5517 
 5518 operand vRegD_V28()
 5519 %{
 5520   constraint(ALLOC_IN_RC(v28_reg));
 5521   match(RegD);
 5522   op_cost(0);
 5523   format %{ %}
 5524   interface(REG_INTER);
 5525 %}
 5526 
 5527 operand vRegD_V29()
 5528 %{
 5529   constraint(ALLOC_IN_RC(v29_reg));
 5530   match(RegD);
 5531   op_cost(0);
 5532   format %{ %}
 5533   interface(REG_INTER);
 5534 %}
 5535 
 5536 operand vRegD_V30()
 5537 %{
 5538   constraint(ALLOC_IN_RC(v30_reg));
 5539   match(RegD);
 5540   op_cost(0);
 5541   format %{ %}
 5542   interface(REG_INTER);
 5543 %}
 5544 
 5545 operand vRegD_V31()
 5546 %{
 5547   constraint(ALLOC_IN_RC(v31_reg));
 5548   match(RegD);
 5549   op_cost(0);
 5550   format %{ %}
 5551   interface(REG_INTER);
 5552 %}
 5553 
 5554 operand pRegGov()
 5555 %{
 5556   constraint(ALLOC_IN_RC(gov_pr));
 5557   match(RegVMask);
 5558   op_cost(0);
 5559   format %{ %}
 5560   interface(REG_INTER);
 5561 %}
 5562 
 5563 // Flags register, used as output of signed compare instructions
 5564 
 5565 // note that on AArch64 we also use this register as the output for
 5566 // for floating point compare instructions (CmpF CmpD). this ensures
 5567 // that ordered inequality tests use GT, GE, LT or LE none of which
 5568 // pass through cases where the result is unordered i.e. one or both
 5569 // inputs to the compare is a NaN. this means that the ideal code can
 5570 // replace e.g. a GT with an LE and not end up capturing the NaN case
 5571 // (where the comparison should always fail). EQ and NE tests are
 5572 // always generated in ideal code so that unordered folds into the NE
 5573 // case, matching the behaviour of AArch64 NE.
 5574 //
 5575 // This differs from x86 where the outputs of FP compares use a
 5576 // special FP flags registers and where compares based on this
 5577 // register are distinguished into ordered inequalities (cmpOpUCF) and
 5578 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
 5579 // to explicitly handle the unordered case in branches. x86 also has
 5580 // to include extra CMoveX rules to accept a cmpOpUCF input.
 5581 
 5582 operand rFlagsReg()
 5583 %{
 5584   constraint(ALLOC_IN_RC(int_flags));
 5585   match(RegFlags);
 5586 
 5587   op_cost(0);
 5588   format %{ "RFLAGS" %}
 5589   interface(REG_INTER);
 5590 %}
 5591 
 5592 // Flags register, used as output of unsigned compare instructions
 5593 operand rFlagsRegU()
 5594 %{
 5595   constraint(ALLOC_IN_RC(int_flags));
 5596   match(RegFlags);
 5597 
 5598   op_cost(0);
 5599   format %{ "RFLAGSU" %}
 5600   interface(REG_INTER);
 5601 %}
 5602 
 5603 // Special Registers
 5604 
 5605 // Method Register
 5606 operand inline_cache_RegP(iRegP reg)
 5607 %{
 5608   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
 5609   match(reg);
 5610   match(iRegPNoSp);
 5611   op_cost(0);
 5612   format %{ %}
 5613   interface(REG_INTER);
 5614 %}
 5615 
 5616 operand interpreter_method_oop_RegP(iRegP reg)
 5617 %{
 5618   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
 5619   match(reg);
 5620   match(iRegPNoSp);
 5621   op_cost(0);
 5622   format %{ %}
 5623   interface(REG_INTER);
 5624 %}
 5625 
 5626 // Thread Register
 5627 operand thread_RegP(iRegP reg)
 5628 %{
 5629   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
 5630   match(reg);
 5631   op_cost(0);
 5632   format %{ %}
 5633   interface(REG_INTER);
 5634 %}
 5635 
 5636 operand lr_RegP(iRegP reg)
 5637 %{
 5638   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
 5639   match(reg);
 5640   op_cost(0);
 5641   format %{ %}
 5642   interface(REG_INTER);
 5643 %}
 5644 
 5645 //----------Memory Operands----------------------------------------------------
 5646 
 5647 operand indirect(iRegP reg)
 5648 %{
 5649   constraint(ALLOC_IN_RC(ptr_reg));
 5650   match(reg);
 5651   op_cost(0);
 5652   format %{ "[$reg]" %}
 5653   interface(MEMORY_INTER) %{
 5654     base($reg);
 5655     index(0xffffffff);
 5656     scale(0x0);
 5657     disp(0x0);
 5658   %}
 5659 %}
 5660 
 5661 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
 5662 %{
 5663   constraint(ALLOC_IN_RC(ptr_reg));
 5664   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5665   match(AddP reg (LShiftL (ConvI2L ireg) scale));
 5666   op_cost(0);
 5667   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
 5668   interface(MEMORY_INTER) %{
 5669     base($reg);
 5670     index($ireg);
 5671     scale($scale);
 5672     disp(0x0);
 5673   %}
 5674 %}
 5675 
 5676 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
 5677 %{
 5678   constraint(ALLOC_IN_RC(ptr_reg));
 5679   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5680   match(AddP reg (LShiftL lreg scale));
 5681   op_cost(0);
 5682   format %{ "$reg, $lreg lsl($scale)" %}
 5683   interface(MEMORY_INTER) %{
 5684     base($reg);
 5685     index($lreg);
 5686     scale($scale);
 5687     disp(0x0);
 5688   %}
 5689 %}
 5690 
 5691 operand indIndexI2L(iRegP reg, iRegI ireg)
 5692 %{
 5693   constraint(ALLOC_IN_RC(ptr_reg));
 5694   match(AddP reg (ConvI2L ireg));
 5695   op_cost(0);
 5696   format %{ "$reg, $ireg, 0, I2L" %}
 5697   interface(MEMORY_INTER) %{
 5698     base($reg);
 5699     index($ireg);
 5700     scale(0x0);
 5701     disp(0x0);
 5702   %}
 5703 %}
 5704 
 5705 operand indIndex(iRegP reg, iRegL lreg)
 5706 %{
 5707   constraint(ALLOC_IN_RC(ptr_reg));
 5708   match(AddP reg lreg);
 5709   op_cost(0);
 5710   format %{ "$reg, $lreg" %}
 5711   interface(MEMORY_INTER) %{
 5712     base($reg);
 5713     index($lreg);
 5714     scale(0x0);
 5715     disp(0x0);
 5716   %}
 5717 %}
 5718 
 5719 operand indOffI(iRegP reg, immIOffset off)
 5720 %{
 5721   constraint(ALLOC_IN_RC(ptr_reg));
 5722   match(AddP reg off);
 5723   op_cost(0);
 5724   format %{ "[$reg, $off]" %}
 5725   interface(MEMORY_INTER) %{
 5726     base($reg);
 5727     index(0xffffffff);
 5728     scale(0x0);
 5729     disp($off);
 5730   %}
 5731 %}
 5732 
 5733 operand indOffI1(iRegP reg, immIOffset1 off)
 5734 %{
 5735   constraint(ALLOC_IN_RC(ptr_reg));
 5736   match(AddP reg off);
 5737   op_cost(0);
 5738   format %{ "[$reg, $off]" %}
 5739   interface(MEMORY_INTER) %{
 5740     base($reg);
 5741     index(0xffffffff);
 5742     scale(0x0);
 5743     disp($off);
 5744   %}
 5745 %}
 5746 
 5747 operand indOffI2(iRegP reg, immIOffset2 off)
 5748 %{
 5749   constraint(ALLOC_IN_RC(ptr_reg));
 5750   match(AddP reg off);
 5751   op_cost(0);
 5752   format %{ "[$reg, $off]" %}
 5753   interface(MEMORY_INTER) %{
 5754     base($reg);
 5755     index(0xffffffff);
 5756     scale(0x0);
 5757     disp($off);
 5758   %}
 5759 %}
 5760 
 5761 operand indOffI4(iRegP reg, immIOffset4 off)
 5762 %{
 5763   constraint(ALLOC_IN_RC(ptr_reg));
 5764   match(AddP reg off);
 5765   op_cost(0);
 5766   format %{ "[$reg, $off]" %}
 5767   interface(MEMORY_INTER) %{
 5768     base($reg);
 5769     index(0xffffffff);
 5770     scale(0x0);
 5771     disp($off);
 5772   %}
 5773 %}
 5774 
 5775 operand indOffI8(iRegP reg, immIOffset8 off)
 5776 %{
 5777   constraint(ALLOC_IN_RC(ptr_reg));
 5778   match(AddP reg off);
 5779   op_cost(0);
 5780   format %{ "[$reg, $off]" %}
 5781   interface(MEMORY_INTER) %{
 5782     base($reg);
 5783     index(0xffffffff);
 5784     scale(0x0);
 5785     disp($off);
 5786   %}
 5787 %}
 5788 
 5789 operand indOffI16(iRegP reg, immIOffset16 off)
 5790 %{
 5791   constraint(ALLOC_IN_RC(ptr_reg));
 5792   match(AddP reg off);
 5793   op_cost(0);
 5794   format %{ "[$reg, $off]" %}
 5795   interface(MEMORY_INTER) %{
 5796     base($reg);
 5797     index(0xffffffff);
 5798     scale(0x0);
 5799     disp($off);
 5800   %}
 5801 %}
 5802 
 5803 operand indOffL(iRegP reg, immLoffset off)
 5804 %{
 5805   constraint(ALLOC_IN_RC(ptr_reg));
 5806   match(AddP reg off);
 5807   op_cost(0);
 5808   format %{ "[$reg, $off]" %}
 5809   interface(MEMORY_INTER) %{
 5810     base($reg);
 5811     index(0xffffffff);
 5812     scale(0x0);
 5813     disp($off);
 5814   %}
 5815 %}
 5816 
 5817 operand indOffL1(iRegP reg, immLoffset1 off)
 5818 %{
 5819   constraint(ALLOC_IN_RC(ptr_reg));
 5820   match(AddP reg off);
 5821   op_cost(0);
 5822   format %{ "[$reg, $off]" %}
 5823   interface(MEMORY_INTER) %{
 5824     base($reg);
 5825     index(0xffffffff);
 5826     scale(0x0);
 5827     disp($off);
 5828   %}
 5829 %}
 5830 
 5831 operand indOffL2(iRegP reg, immLoffset2 off)
 5832 %{
 5833   constraint(ALLOC_IN_RC(ptr_reg));
 5834   match(AddP reg off);
 5835   op_cost(0);
 5836   format %{ "[$reg, $off]" %}
 5837   interface(MEMORY_INTER) %{
 5838     base($reg);
 5839     index(0xffffffff);
 5840     scale(0x0);
 5841     disp($off);
 5842   %}
 5843 %}
 5844 
 5845 operand indOffL4(iRegP reg, immLoffset4 off)
 5846 %{
 5847   constraint(ALLOC_IN_RC(ptr_reg));
 5848   match(AddP reg off);
 5849   op_cost(0);
 5850   format %{ "[$reg, $off]" %}
 5851   interface(MEMORY_INTER) %{
 5852     base($reg);
 5853     index(0xffffffff);
 5854     scale(0x0);
 5855     disp($off);
 5856   %}
 5857 %}
 5858 
 5859 operand indOffL8(iRegP reg, immLoffset8 off)
 5860 %{
 5861   constraint(ALLOC_IN_RC(ptr_reg));
 5862   match(AddP reg off);
 5863   op_cost(0);
 5864   format %{ "[$reg, $off]" %}
 5865   interface(MEMORY_INTER) %{
 5866     base($reg);
 5867     index(0xffffffff);
 5868     scale(0x0);
 5869     disp($off);
 5870   %}
 5871 %}
 5872 
 5873 operand indOffL16(iRegP reg, immLoffset16 off)
 5874 %{
 5875   constraint(ALLOC_IN_RC(ptr_reg));
 5876   match(AddP reg off);
 5877   op_cost(0);
 5878   format %{ "[$reg, $off]" %}
 5879   interface(MEMORY_INTER) %{
 5880     base($reg);
 5881     index(0xffffffff);
 5882     scale(0x0);
 5883     disp($off);
 5884   %}
 5885 %}
 5886 
 5887 operand indirectN(iRegN reg)
 5888 %{
 5889   predicate(CompressedOops::shift() == 0);
 5890   constraint(ALLOC_IN_RC(ptr_reg));
 5891   match(DecodeN reg);
 5892   op_cost(0);
 5893   format %{ "[$reg]\t# narrow" %}
 5894   interface(MEMORY_INTER) %{
 5895     base($reg);
 5896     index(0xffffffff);
 5897     scale(0x0);
 5898     disp(0x0);
 5899   %}
 5900 %}
 5901 
 5902 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
 5903 %{
 5904   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5905   constraint(ALLOC_IN_RC(ptr_reg));
 5906   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
 5907   op_cost(0);
 5908   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
 5909   interface(MEMORY_INTER) %{
 5910     base($reg);
 5911     index($ireg);
 5912     scale($scale);
 5913     disp(0x0);
 5914   %}
 5915 %}
 5916 
 5917 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
 5918 %{
 5919   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5920   constraint(ALLOC_IN_RC(ptr_reg));
 5921   match(AddP (DecodeN reg) (LShiftL lreg scale));
 5922   op_cost(0);
 5923   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
 5924   interface(MEMORY_INTER) %{
 5925     base($reg);
 5926     index($lreg);
 5927     scale($scale);
 5928     disp(0x0);
 5929   %}
 5930 %}
 5931 
 5932 operand indIndexI2LN(iRegN reg, iRegI ireg)
 5933 %{
 5934   predicate(CompressedOops::shift() == 0);
 5935   constraint(ALLOC_IN_RC(ptr_reg));
 5936   match(AddP (DecodeN reg) (ConvI2L ireg));
 5937   op_cost(0);
 5938   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
 5939   interface(MEMORY_INTER) %{
 5940     base($reg);
 5941     index($ireg);
 5942     scale(0x0);
 5943     disp(0x0);
 5944   %}
 5945 %}
 5946 
 5947 operand indIndexN(iRegN reg, iRegL lreg)
 5948 %{
 5949   predicate(CompressedOops::shift() == 0);
 5950   constraint(ALLOC_IN_RC(ptr_reg));
 5951   match(AddP (DecodeN reg) lreg);
 5952   op_cost(0);
 5953   format %{ "$reg, $lreg\t# narrow" %}
 5954   interface(MEMORY_INTER) %{
 5955     base($reg);
 5956     index($lreg);
 5957     scale(0x0);
 5958     disp(0x0);
 5959   %}
 5960 %}
 5961 
 5962 operand indOffIN(iRegN reg, immIOffset off)
 5963 %{
 5964   predicate(CompressedOops::shift() == 0);
 5965   constraint(ALLOC_IN_RC(ptr_reg));
 5966   match(AddP (DecodeN reg) off);
 5967   op_cost(0);
 5968   format %{ "[$reg, $off]\t# narrow" %}
 5969   interface(MEMORY_INTER) %{
 5970     base($reg);
 5971     index(0xffffffff);
 5972     scale(0x0);
 5973     disp($off);
 5974   %}
 5975 %}
 5976 
 5977 operand indOffLN(iRegN reg, immLoffset off)
 5978 %{
 5979   predicate(CompressedOops::shift() == 0);
 5980   constraint(ALLOC_IN_RC(ptr_reg));
 5981   match(AddP (DecodeN reg) off);
 5982   op_cost(0);
 5983   format %{ "[$reg, $off]\t# narrow" %}
 5984   interface(MEMORY_INTER) %{
 5985     base($reg);
 5986     index(0xffffffff);
 5987     scale(0x0);
 5988     disp($off);
 5989   %}
 5990 %}
 5991 
 5992 
 5993 
 5994 // AArch64 opto stubs need to write to the pc slot in the thread anchor
 5995 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
 5996 %{
 5997   constraint(ALLOC_IN_RC(ptr_reg));
 5998   match(AddP reg off);
 5999   op_cost(0);
 6000   format %{ "[$reg, $off]" %}
 6001   interface(MEMORY_INTER) %{
 6002     base($reg);
 6003     index(0xffffffff);
 6004     scale(0x0);
 6005     disp($off);
 6006   %}
 6007 %}
 6008 
 6009 //----------Special Memory Operands--------------------------------------------
 6010 // Stack Slot Operand - This operand is used for loading and storing temporary
 6011 //                      values on the stack where a match requires a value to
 6012 //                      flow through memory.
 6013 operand stackSlotP(sRegP reg)
 6014 %{
 6015   constraint(ALLOC_IN_RC(stack_slots));
 6016   op_cost(100);
 6017   // No match rule because this operand is only generated in matching
 6018   // match(RegP);
 6019   format %{ "[$reg]" %}
 6020   interface(MEMORY_INTER) %{
 6021     base(0x1e);  // RSP
 6022     index(0x0);  // No Index
 6023     scale(0x0);  // No Scale
 6024     disp($reg);  // Stack Offset
 6025   %}
 6026 %}
 6027 
 6028 operand stackSlotI(sRegI reg)
 6029 %{
 6030   constraint(ALLOC_IN_RC(stack_slots));
 6031   // No match rule because this operand is only generated in matching
 6032   // match(RegI);
 6033   format %{ "[$reg]" %}
 6034   interface(MEMORY_INTER) %{
 6035     base(0x1e);  // RSP
 6036     index(0x0);  // No Index
 6037     scale(0x0);  // No Scale
 6038     disp($reg);  // Stack Offset
 6039   %}
 6040 %}
 6041 
 6042 operand stackSlotF(sRegF reg)
 6043 %{
 6044   constraint(ALLOC_IN_RC(stack_slots));
 6045   // No match rule because this operand is only generated in matching
 6046   // match(RegF);
 6047   format %{ "[$reg]" %}
 6048   interface(MEMORY_INTER) %{
 6049     base(0x1e);  // RSP
 6050     index(0x0);  // No Index
 6051     scale(0x0);  // No Scale
 6052     disp($reg);  // Stack Offset
 6053   %}
 6054 %}
 6055 
 6056 operand stackSlotD(sRegD reg)
 6057 %{
 6058   constraint(ALLOC_IN_RC(stack_slots));
 6059   // No match rule because this operand is only generated in matching
 6060   // match(RegD);
 6061   format %{ "[$reg]" %}
 6062   interface(MEMORY_INTER) %{
 6063     base(0x1e);  // RSP
 6064     index(0x0);  // No Index
 6065     scale(0x0);  // No Scale
 6066     disp($reg);  // Stack Offset
 6067   %}
 6068 %}
 6069 
 6070 operand stackSlotL(sRegL reg)
 6071 %{
 6072   constraint(ALLOC_IN_RC(stack_slots));
 6073   // No match rule because this operand is only generated in matching
 6074   // match(RegL);
 6075   format %{ "[$reg]" %}
 6076   interface(MEMORY_INTER) %{
 6077     base(0x1e);  // RSP
 6078     index(0x0);  // No Index
 6079     scale(0x0);  // No Scale
 6080     disp($reg);  // Stack Offset
 6081   %}
 6082 %}
 6083 
 6084 // Operands for expressing Control Flow
 6085 // NOTE: Label is a predefined operand which should not be redefined in
 6086 //       the AD file. It is generically handled within the ADLC.
 6087 
 6088 //----------Conditional Branch Operands----------------------------------------
 6089 // Comparison Op  - This is the operation of the comparison, and is limited to
 6090 //                  the following set of codes:
 6091 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
 6092 //
 6093 // Other attributes of the comparison, such as unsignedness, are specified
 6094 // by the comparison instruction that sets a condition code flags register.
 6095 // That result is represented by a flags operand whose subtype is appropriate
 6096 // to the unsignedness (etc.) of the comparison.
 6097 //
 6098 // Later, the instruction which matches both the Comparison Op (a Bool) and
 6099 // the flags (produced by the Cmp) specifies the coding of the comparison op
 6100 // by matching a specific subtype of Bool operand below, such as cmpOpU.
 6101 
 6102 // used for signed integral comparisons and fp comparisons
 6103 
 6104 operand cmpOp()
 6105 %{
 6106   match(Bool);
 6107 
 6108   format %{ "" %}
 6109   interface(COND_INTER) %{
 6110     equal(0x0, "eq");
 6111     not_equal(0x1, "ne");
 6112     less(0xb, "lt");
 6113     greater_equal(0xa, "ge");
 6114     less_equal(0xd, "le");
 6115     greater(0xc, "gt");
 6116     overflow(0x6, "vs");
 6117     no_overflow(0x7, "vc");
 6118   %}
 6119 %}
 6120 
 6121 // used for unsigned integral comparisons
 6122 
 6123 operand cmpOpU()
 6124 %{
 6125   match(Bool);
 6126 
 6127   format %{ "" %}
 6128   interface(COND_INTER) %{
 6129     equal(0x0, "eq");
 6130     not_equal(0x1, "ne");
 6131     less(0x3, "lo");
 6132     greater_equal(0x2, "hs");
 6133     less_equal(0x9, "ls");
 6134     greater(0x8, "hi");
 6135     overflow(0x6, "vs");
 6136     no_overflow(0x7, "vc");
 6137   %}
 6138 %}
 6139 
 6140 // used for certain integral comparisons which can be
 6141 // converted to cbxx or tbxx instructions
 6142 
 6143 operand cmpOpEqNe()
 6144 %{
 6145   match(Bool);
 6146   op_cost(0);
 6147   predicate(n->as_Bool()->_test._test == BoolTest::ne
 6148             || n->as_Bool()->_test._test == BoolTest::eq);
 6149 
 6150   format %{ "" %}
 6151   interface(COND_INTER) %{
 6152     equal(0x0, "eq");
 6153     not_equal(0x1, "ne");
 6154     less(0xb, "lt");
 6155     greater_equal(0xa, "ge");
 6156     less_equal(0xd, "le");
 6157     greater(0xc, "gt");
 6158     overflow(0x6, "vs");
 6159     no_overflow(0x7, "vc");
 6160   %}
 6161 %}
 6162 
 6163 // used for certain integral comparisons which can be
 6164 // converted to cbxx or tbxx instructions
 6165 
 6166 operand cmpOpLtGe()
 6167 %{
 6168   match(Bool);
 6169   op_cost(0);
 6170 
 6171   predicate(n->as_Bool()->_test._test == BoolTest::lt
 6172             || n->as_Bool()->_test._test == BoolTest::ge);
 6173 
 6174   format %{ "" %}
 6175   interface(COND_INTER) %{
 6176     equal(0x0, "eq");
 6177     not_equal(0x1, "ne");
 6178     less(0xb, "lt");
 6179     greater_equal(0xa, "ge");
 6180     less_equal(0xd, "le");
 6181     greater(0xc, "gt");
 6182     overflow(0x6, "vs");
 6183     no_overflow(0x7, "vc");
 6184   %}
 6185 %}
 6186 
 6187 // used for certain unsigned integral comparisons which can be
 6188 // converted to cbxx or tbxx instructions
 6189 
 6190 operand cmpOpUEqNeLtGe()
 6191 %{
 6192   match(Bool);
 6193   op_cost(0);
 6194 
 6195   predicate(n->as_Bool()->_test._test == BoolTest::eq
 6196             || n->as_Bool()->_test._test == BoolTest::ne
 6197             || n->as_Bool()->_test._test == BoolTest::lt
 6198             || n->as_Bool()->_test._test == BoolTest::ge);
 6199 
 6200   format %{ "" %}
 6201   interface(COND_INTER) %{
 6202     equal(0x0, "eq");
 6203     not_equal(0x1, "ne");
 6204     less(0xb, "lt");
 6205     greater_equal(0xa, "ge");
 6206     less_equal(0xd, "le");
 6207     greater(0xc, "gt");
 6208     overflow(0x6, "vs");
 6209     no_overflow(0x7, "vc");
 6210   %}
 6211 %}
 6212 
 6213 // Special operand allowing long args to int ops to be truncated for free
 6214 
 6215 operand iRegL2I(iRegL reg) %{
 6216 
 6217   op_cost(0);
 6218 
 6219   match(ConvL2I reg);
 6220 
 6221   format %{ "l2i($reg)" %}
 6222 
 6223   interface(REG_INTER)
 6224 %}
 6225 
 6226 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
 6227 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
 6228 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
 6229 
 6230 //----------OPERAND CLASSES----------------------------------------------------
 6231 // Operand Classes are groups of operands that are used as to simplify
 6232 // instruction definitions by not requiring the AD writer to specify
 6233 // separate instructions for every form of operand when the
 6234 // instruction accepts multiple operand types with the same basic
 6235 // encoding and format. The classic case of this is memory operands.
 6236 
 6237 // memory is used to define read/write location for load/store
 6238 // instruction defs. we can turn a memory op into an Address
 6239 
 6240 opclass memory1(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI1, indOffL1,
 6241                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN);
 6242 
 6243 opclass memory2(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI2, indOffL2,
 6244                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN);
 6245 
 6246 opclass memory4(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI4, indOffL4,
 6247                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
 6248 
 6249 opclass memory8(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI8, indOffL8,
 6250                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
 6251 
 6252 // All of the memory operands. For the pipeline description.
 6253 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex,
 6254                indOffI1, indOffL1, indOffI2, indOffL2, indOffI4, indOffL4, indOffI8, indOffL8,
 6255                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
 6256 
 6257 
 6258 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
 6259 // operations. it allows the src to be either an iRegI or a (ConvL2I
 6260 // iRegL). in the latter case the l2i normally planted for a ConvL2I
 6261 // can be elided because the 32-bit instruction will just employ the
 6262 // lower 32 bits anyway.
 6263 //
 6264 // n.b. this does not elide all L2I conversions. if the truncated
 6265 // value is consumed by more than one operation then the ConvL2I
 6266 // cannot be bundled into the consuming nodes so an l2i gets planted
 6267 // (actually a movw $dst $src) and the downstream instructions consume
 6268 // the result of the l2i as an iRegI input. That's a shame since the
 6269 // movw is actually redundant but its not too costly.
 6270 
 6271 opclass iRegIorL2I(iRegI, iRegL2I);
 6272 
 6273 //----------PIPELINE-----------------------------------------------------------
 6274 // Rules which define the behavior of the target architectures pipeline.
 6275 
 6276 // For specific pipelines, eg A53, define the stages of that pipeline
 6277 //pipe_desc(ISS, EX1, EX2, WR);
 6278 #define ISS S0
 6279 #define EX1 S1
 6280 #define EX2 S2
 6281 #define WR  S3
 6282 
 6283 // Integer ALU reg operation
 6284 pipeline %{
 6285 
 6286 attributes %{
 6287   // ARM instructions are of fixed length
 6288   fixed_size_instructions;        // Fixed size instructions TODO does
 6289   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
 6290   // ARM instructions come in 32-bit word units
 6291   instruction_unit_size = 4;         // An instruction is 4 bytes long
 6292   instruction_fetch_unit_size = 64;  // The processor fetches one line
 6293   instruction_fetch_units = 1;       // of 64 bytes
 6294 
 6295   // List of nop instructions
 6296   nops( MachNop );
 6297 %}
 6298 
 6299 // We don't use an actual pipeline model so don't care about resources
 6300 // or description. we do use pipeline classes to introduce fixed
 6301 // latencies
 6302 
 6303 //----------RESOURCES----------------------------------------------------------
 6304 // Resources are the functional units available to the machine
 6305 
 6306 resources( INS0, INS1, INS01 = INS0 | INS1,
 6307            ALU0, ALU1, ALU = ALU0 | ALU1,
 6308            MAC,
 6309            DIV,
 6310            BRANCH,
 6311            LDST,
 6312            NEON_FP);
 6313 
 6314 //----------PIPELINE DESCRIPTION-----------------------------------------------
 6315 // Pipeline Description specifies the stages in the machine's pipeline
 6316 
 6317 // Define the pipeline as a generic 6 stage pipeline
 6318 pipe_desc(S0, S1, S2, S3, S4, S5);
 6319 
 6320 //----------PIPELINE CLASSES---------------------------------------------------
 6321 // Pipeline Classes describe the stages in which input and output are
 6322 // referenced by the hardware pipeline.
 6323 
 6324 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
 6325 %{
 6326   single_instruction;
 6327   src1   : S1(read);
 6328   src2   : S2(read);
 6329   dst    : S5(write);
 6330   INS01  : ISS;
 6331   NEON_FP : S5;
 6332 %}
 6333 
 6334 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
 6335 %{
 6336   single_instruction;
 6337   src1   : S1(read);
 6338   src2   : S2(read);
 6339   dst    : S5(write);
 6340   INS01  : ISS;
 6341   NEON_FP : S5;
 6342 %}
 6343 
 6344 pipe_class fp_uop_s(vRegF dst, vRegF src)
 6345 %{
 6346   single_instruction;
 6347   src    : S1(read);
 6348   dst    : S5(write);
 6349   INS01  : ISS;
 6350   NEON_FP : S5;
 6351 %}
 6352 
 6353 pipe_class fp_uop_d(vRegD dst, vRegD src)
 6354 %{
 6355   single_instruction;
 6356   src    : S1(read);
 6357   dst    : S5(write);
 6358   INS01  : ISS;
 6359   NEON_FP : S5;
 6360 %}
 6361 
 6362 pipe_class fp_d2f(vRegF dst, vRegD src)
 6363 %{
 6364   single_instruction;
 6365   src    : S1(read);
 6366   dst    : S5(write);
 6367   INS01  : ISS;
 6368   NEON_FP : S5;
 6369 %}
 6370 
 6371 pipe_class fp_f2d(vRegD dst, vRegF src)
 6372 %{
 6373   single_instruction;
 6374   src    : S1(read);
 6375   dst    : S5(write);
 6376   INS01  : ISS;
 6377   NEON_FP : S5;
 6378 %}
 6379 
 6380 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
 6381 %{
 6382   single_instruction;
 6383   src    : S1(read);
 6384   dst    : S5(write);
 6385   INS01  : ISS;
 6386   NEON_FP : S5;
 6387 %}
 6388 
 6389 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
 6390 %{
 6391   single_instruction;
 6392   src    : S1(read);
 6393   dst    : S5(write);
 6394   INS01  : ISS;
 6395   NEON_FP : S5;
 6396 %}
 6397 
 6398 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
 6399 %{
 6400   single_instruction;
 6401   src    : S1(read);
 6402   dst    : S5(write);
 6403   INS01  : ISS;
 6404   NEON_FP : S5;
 6405 %}
 6406 
 6407 pipe_class fp_l2f(vRegF dst, iRegL src)
 6408 %{
 6409   single_instruction;
 6410   src    : S1(read);
 6411   dst    : S5(write);
 6412   INS01  : ISS;
 6413   NEON_FP : S5;
 6414 %}
 6415 
 6416 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
 6417 %{
 6418   single_instruction;
 6419   src    : S1(read);
 6420   dst    : S5(write);
 6421   INS01  : ISS;
 6422   NEON_FP : S5;
 6423 %}
 6424 
 6425 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
 6426 %{
 6427   single_instruction;
 6428   src    : S1(read);
 6429   dst    : S5(write);
 6430   INS01  : ISS;
 6431   NEON_FP : S5;
 6432 %}
 6433 
 6434 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
 6435 %{
 6436   single_instruction;
 6437   src    : S1(read);
 6438   dst    : S5(write);
 6439   INS01  : ISS;
 6440   NEON_FP : S5;
 6441 %}
 6442 
 6443 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
 6444 %{
 6445   single_instruction;
 6446   src    : S1(read);
 6447   dst    : S5(write);
 6448   INS01  : ISS;
 6449   NEON_FP : S5;
 6450 %}
 6451 
 6452 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
 6453 %{
 6454   single_instruction;
 6455   src1   : S1(read);
 6456   src2   : S2(read);
 6457   dst    : S5(write);
 6458   INS0   : ISS;
 6459   NEON_FP : S5;
 6460 %}
 6461 
 6462 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
 6463 %{
 6464   single_instruction;
 6465   src1   : S1(read);
 6466   src2   : S2(read);
 6467   dst    : S5(write);
 6468   INS0   : ISS;
 6469   NEON_FP : S5;
 6470 %}
 6471 
 6472 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
 6473 %{
 6474   single_instruction;
 6475   cr     : S1(read);
 6476   src1   : S1(read);
 6477   src2   : S1(read);
 6478   dst    : S3(write);
 6479   INS01  : ISS;
 6480   NEON_FP : S3;
 6481 %}
 6482 
 6483 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
 6484 %{
 6485   single_instruction;
 6486   cr     : S1(read);
 6487   src1   : S1(read);
 6488   src2   : S1(read);
 6489   dst    : S3(write);
 6490   INS01  : ISS;
 6491   NEON_FP : S3;
 6492 %}
 6493 
 6494 pipe_class fp_imm_s(vRegF dst)
 6495 %{
 6496   single_instruction;
 6497   dst    : S3(write);
 6498   INS01  : ISS;
 6499   NEON_FP : S3;
 6500 %}
 6501 
 6502 pipe_class fp_imm_d(vRegD dst)
 6503 %{
 6504   single_instruction;
 6505   dst    : S3(write);
 6506   INS01  : ISS;
 6507   NEON_FP : S3;
 6508 %}
 6509 
 6510 pipe_class fp_load_constant_s(vRegF dst)
 6511 %{
 6512   single_instruction;
 6513   dst    : S4(write);
 6514   INS01  : ISS;
 6515   NEON_FP : S4;
 6516 %}
 6517 
 6518 pipe_class fp_load_constant_d(vRegD dst)
 6519 %{
 6520   single_instruction;
 6521   dst    : S4(write);
 6522   INS01  : ISS;
 6523   NEON_FP : S4;
 6524 %}
 6525 
 6526 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
 6527 %{
 6528   single_instruction;
 6529   dst    : S5(write);
 6530   src1   : S1(read);
 6531   src2   : S1(read);
 6532   INS01  : ISS;
 6533   NEON_FP : S5;
 6534 %}
 6535 
 6536 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
 6537 %{
 6538   single_instruction;
 6539   dst    : S5(write);
 6540   src1   : S1(read);
 6541   src2   : S1(read);
 6542   INS0   : ISS;
 6543   NEON_FP : S5;
 6544 %}
 6545 
 6546 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
 6547 %{
 6548   single_instruction;
 6549   dst    : S5(write);
 6550   src1   : S1(read);
 6551   src2   : S1(read);
 6552   dst    : S1(read);
 6553   INS01  : ISS;
 6554   NEON_FP : S5;
 6555 %}
 6556 
 6557 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
 6558 %{
 6559   single_instruction;
 6560   dst    : S5(write);
 6561   src1   : S1(read);
 6562   src2   : S1(read);
 6563   dst    : S1(read);
 6564   INS0   : ISS;
 6565   NEON_FP : S5;
 6566 %}
 6567 
 6568 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
 6569 %{
 6570   single_instruction;
 6571   dst    : S4(write);
 6572   src1   : S2(read);
 6573   src2   : S2(read);
 6574   INS01  : ISS;
 6575   NEON_FP : S4;
 6576 %}
 6577 
 6578 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
 6579 %{
 6580   single_instruction;
 6581   dst    : S4(write);
 6582   src1   : S2(read);
 6583   src2   : S2(read);
 6584   INS0   : ISS;
 6585   NEON_FP : S4;
 6586 %}
 6587 
 6588 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
 6589 %{
 6590   single_instruction;
 6591   dst    : S3(write);
 6592   src1   : S2(read);
 6593   src2   : S2(read);
 6594   INS01  : ISS;
 6595   NEON_FP : S3;
 6596 %}
 6597 
 6598 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
 6599 %{
 6600   single_instruction;
 6601   dst    : S3(write);
 6602   src1   : S2(read);
 6603   src2   : S2(read);
 6604   INS0   : ISS;
 6605   NEON_FP : S3;
 6606 %}
 6607 
 6608 pipe_class vshift64(vecD dst, vecD src, vecX shift)
 6609 %{
 6610   single_instruction;
 6611   dst    : S3(write);
 6612   src    : S1(read);
 6613   shift  : S1(read);
 6614   INS01  : ISS;
 6615   NEON_FP : S3;
 6616 %}
 6617 
 6618 pipe_class vshift128(vecX dst, vecX src, vecX shift)
 6619 %{
 6620   single_instruction;
 6621   dst    : S3(write);
 6622   src    : S1(read);
 6623   shift  : S1(read);
 6624   INS0   : ISS;
 6625   NEON_FP : S3;
 6626 %}
 6627 
 6628 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
 6629 %{
 6630   single_instruction;
 6631   dst    : S3(write);
 6632   src    : S1(read);
 6633   INS01  : ISS;
 6634   NEON_FP : S3;
 6635 %}
 6636 
 6637 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
 6638 %{
 6639   single_instruction;
 6640   dst    : S3(write);
 6641   src    : S1(read);
 6642   INS0   : ISS;
 6643   NEON_FP : S3;
 6644 %}
 6645 
 6646 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
 6647 %{
 6648   single_instruction;
 6649   dst    : S5(write);
 6650   src1   : S1(read);
 6651   src2   : S1(read);
 6652   INS01  : ISS;
 6653   NEON_FP : S5;
 6654 %}
 6655 
 6656 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
 6657 %{
 6658   single_instruction;
 6659   dst    : S5(write);
 6660   src1   : S1(read);
 6661   src2   : S1(read);
 6662   INS0   : ISS;
 6663   NEON_FP : S5;
 6664 %}
 6665 
 6666 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
 6667 %{
 6668   single_instruction;
 6669   dst    : S5(write);
 6670   src1   : S1(read);
 6671   src2   : S1(read);
 6672   INS0   : ISS;
 6673   NEON_FP : S5;
 6674 %}
 6675 
 6676 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
 6677 %{
 6678   single_instruction;
 6679   dst    : S5(write);
 6680   src1   : S1(read);
 6681   src2   : S1(read);
 6682   INS0   : ISS;
 6683   NEON_FP : S5;
 6684 %}
 6685 
 6686 pipe_class vsqrt_fp128(vecX dst, vecX src)
 6687 %{
 6688   single_instruction;
 6689   dst    : S5(write);
 6690   src    : S1(read);
 6691   INS0   : ISS;
 6692   NEON_FP : S5;
 6693 %}
 6694 
 6695 pipe_class vunop_fp64(vecD dst, vecD src)
 6696 %{
 6697   single_instruction;
 6698   dst    : S5(write);
 6699   src    : S1(read);
 6700   INS01  : ISS;
 6701   NEON_FP : S5;
 6702 %}
 6703 
 6704 pipe_class vunop_fp128(vecX dst, vecX src)
 6705 %{
 6706   single_instruction;
 6707   dst    : S5(write);
 6708   src    : S1(read);
 6709   INS0   : ISS;
 6710   NEON_FP : S5;
 6711 %}
 6712 
 6713 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
 6714 %{
 6715   single_instruction;
 6716   dst    : S3(write);
 6717   src    : S1(read);
 6718   INS01  : ISS;
 6719   NEON_FP : S3;
 6720 %}
 6721 
 6722 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
 6723 %{
 6724   single_instruction;
 6725   dst    : S3(write);
 6726   src    : S1(read);
 6727   INS01  : ISS;
 6728   NEON_FP : S3;
 6729 %}
 6730 
 6731 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
 6732 %{
 6733   single_instruction;
 6734   dst    : S3(write);
 6735   src    : S1(read);
 6736   INS01  : ISS;
 6737   NEON_FP : S3;
 6738 %}
 6739 
 6740 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
 6741 %{
 6742   single_instruction;
 6743   dst    : S3(write);
 6744   src    : S1(read);
 6745   INS01  : ISS;
 6746   NEON_FP : S3;
 6747 %}
 6748 
 6749 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
 6750 %{
 6751   single_instruction;
 6752   dst    : S3(write);
 6753   src    : S1(read);
 6754   INS01  : ISS;
 6755   NEON_FP : S3;
 6756 %}
 6757 
 6758 pipe_class vmovi_reg_imm64(vecD dst)
 6759 %{
 6760   single_instruction;
 6761   dst    : S3(write);
 6762   INS01  : ISS;
 6763   NEON_FP : S3;
 6764 %}
 6765 
 6766 pipe_class vmovi_reg_imm128(vecX dst)
 6767 %{
 6768   single_instruction;
 6769   dst    : S3(write);
 6770   INS0   : ISS;
 6771   NEON_FP : S3;
 6772 %}
 6773 
 6774 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
 6775 %{
 6776   single_instruction;
 6777   dst    : S5(write);
 6778   mem    : ISS(read);
 6779   INS01  : ISS;
 6780   NEON_FP : S3;
 6781 %}
 6782 
 6783 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
 6784 %{
 6785   single_instruction;
 6786   dst    : S5(write);
 6787   mem    : ISS(read);
 6788   INS01  : ISS;
 6789   NEON_FP : S3;
 6790 %}
 6791 
 6792 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
 6793 %{
 6794   single_instruction;
 6795   mem    : ISS(read);
 6796   src    : S2(read);
 6797   INS01  : ISS;
 6798   NEON_FP : S3;
 6799 %}
 6800 
 6801 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
 6802 %{
 6803   single_instruction;
 6804   mem    : ISS(read);
 6805   src    : S2(read);
 6806   INS01  : ISS;
 6807   NEON_FP : S3;
 6808 %}
 6809 
 6810 //------- Integer ALU operations --------------------------
 6811 
 6812 // Integer ALU reg-reg operation
 6813 // Operands needed in EX1, result generated in EX2
 6814 // Eg.  ADD     x0, x1, x2
 6815 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6816 %{
 6817   single_instruction;
 6818   dst    : EX2(write);
 6819   src1   : EX1(read);
 6820   src2   : EX1(read);
 6821   INS01  : ISS; // Dual issue as instruction 0 or 1
 6822   ALU    : EX2;
 6823 %}
 6824 
 6825 // Integer ALU reg-reg operation with constant shift
 6826 // Shifted register must be available in LATE_ISS instead of EX1
 6827 // Eg.  ADD     x0, x1, x2, LSL #2
 6828 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
 6829 %{
 6830   single_instruction;
 6831   dst    : EX2(write);
 6832   src1   : EX1(read);
 6833   src2   : ISS(read);
 6834   INS01  : ISS;
 6835   ALU    : EX2;
 6836 %}
 6837 
 6838 // Integer ALU reg operation with constant shift
 6839 // Eg.  LSL     x0, x1, #shift
 6840 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
 6841 %{
 6842   single_instruction;
 6843   dst    : EX2(write);
 6844   src1   : ISS(read);
 6845   INS01  : ISS;
 6846   ALU    : EX2;
 6847 %}
 6848 
 6849 // Integer ALU reg-reg operation with variable shift
 6850 // Both operands must be available in LATE_ISS instead of EX1
 6851 // Result is available in EX1 instead of EX2
 6852 // Eg.  LSLV    x0, x1, x2
 6853 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
 6854 %{
 6855   single_instruction;
 6856   dst    : EX1(write);
 6857   src1   : ISS(read);
 6858   src2   : ISS(read);
 6859   INS01  : ISS;
 6860   ALU    : EX1;
 6861 %}
 6862 
 6863 // Integer ALU reg-reg operation with extract
 6864 // As for _vshift above, but result generated in EX2
 6865 // Eg.  EXTR    x0, x1, x2, #N
 6866 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
 6867 %{
 6868   single_instruction;
 6869   dst    : EX2(write);
 6870   src1   : ISS(read);
 6871   src2   : ISS(read);
 6872   INS1   : ISS; // Can only dual issue as Instruction 1
 6873   ALU    : EX1;
 6874 %}
 6875 
 6876 // Integer ALU reg operation
 6877 // Eg.  NEG     x0, x1
 6878 pipe_class ialu_reg(iRegI dst, iRegI src)
 6879 %{
 6880   single_instruction;
 6881   dst    : EX2(write);
 6882   src    : EX1(read);
 6883   INS01  : ISS;
 6884   ALU    : EX2;
 6885 %}
 6886 
 6887 // Integer ALU reg mmediate operation
 6888 // Eg.  ADD     x0, x1, #N
 6889 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
 6890 %{
 6891   single_instruction;
 6892   dst    : EX2(write);
 6893   src1   : EX1(read);
 6894   INS01  : ISS;
 6895   ALU    : EX2;
 6896 %}
 6897 
 6898 // Integer ALU immediate operation (no source operands)
 6899 // Eg.  MOV     x0, #N
 6900 pipe_class ialu_imm(iRegI dst)
 6901 %{
 6902   single_instruction;
 6903   dst    : EX1(write);
 6904   INS01  : ISS;
 6905   ALU    : EX1;
 6906 %}
 6907 
 6908 //------- Compare operation -------------------------------
 6909 
 6910 // Compare reg-reg
 6911 // Eg.  CMP     x0, x1
 6912 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
 6913 %{
 6914   single_instruction;
 6915 //  fixed_latency(16);
 6916   cr     : EX2(write);
 6917   op1    : EX1(read);
 6918   op2    : EX1(read);
 6919   INS01  : ISS;
 6920   ALU    : EX2;
 6921 %}
 6922 
 6923 // Compare reg-reg
 6924 // Eg.  CMP     x0, #N
 6925 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
 6926 %{
 6927   single_instruction;
 6928 //  fixed_latency(16);
 6929   cr     : EX2(write);
 6930   op1    : EX1(read);
 6931   INS01  : ISS;
 6932   ALU    : EX2;
 6933 %}
 6934 
 6935 //------- Conditional instructions ------------------------
 6936 
 6937 // Conditional no operands
 6938 // Eg.  CSINC   x0, zr, zr, <cond>
 6939 pipe_class icond_none(iRegI dst, rFlagsReg cr)
 6940 %{
 6941   single_instruction;
 6942   cr     : EX1(read);
 6943   dst    : EX2(write);
 6944   INS01  : ISS;
 6945   ALU    : EX2;
 6946 %}
 6947 
 6948 // Conditional 2 operand
 6949 // EG.  CSEL    X0, X1, X2, <cond>
 6950 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
 6951 %{
 6952   single_instruction;
 6953   cr     : EX1(read);
 6954   src1   : EX1(read);
 6955   src2   : EX1(read);
 6956   dst    : EX2(write);
 6957   INS01  : ISS;
 6958   ALU    : EX2;
 6959 %}
 6960 
 6961 // Conditional 2 operand
 6962 // EG.  CSEL    X0, X1, X2, <cond>
 6963 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
 6964 %{
 6965   single_instruction;
 6966   cr     : EX1(read);
 6967   src    : EX1(read);
 6968   dst    : EX2(write);
 6969   INS01  : ISS;
 6970   ALU    : EX2;
 6971 %}
 6972 
 6973 //------- Multiply pipeline operations --------------------
 6974 
 6975 // Multiply reg-reg
 6976 // Eg.  MUL     w0, w1, w2
 6977 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6978 %{
 6979   single_instruction;
 6980   dst    : WR(write);
 6981   src1   : ISS(read);
 6982   src2   : ISS(read);
 6983   INS01  : ISS;
 6984   MAC    : WR;
 6985 %}
 6986 
 6987 // Multiply accumulate
 6988 // Eg.  MADD    w0, w1, w2, w3
 6989 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 6990 %{
 6991   single_instruction;
 6992   dst    : WR(write);
 6993   src1   : ISS(read);
 6994   src2   : ISS(read);
 6995   src3   : ISS(read);
 6996   INS01  : ISS;
 6997   MAC    : WR;
 6998 %}
 6999 
 7000 // Eg.  MUL     w0, w1, w2
 7001 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 7002 %{
 7003   single_instruction;
 7004   fixed_latency(3); // Maximum latency for 64 bit mul
 7005   dst    : WR(write);
 7006   src1   : ISS(read);
 7007   src2   : ISS(read);
 7008   INS01  : ISS;
 7009   MAC    : WR;
 7010 %}
 7011 
 7012 // Multiply accumulate
 7013 // Eg.  MADD    w0, w1, w2, w3
 7014 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 7015 %{
 7016   single_instruction;
 7017   fixed_latency(3); // Maximum latency for 64 bit mul
 7018   dst    : WR(write);
 7019   src1   : ISS(read);
 7020   src2   : ISS(read);
 7021   src3   : ISS(read);
 7022   INS01  : ISS;
 7023   MAC    : WR;
 7024 %}
 7025 
 7026 //------- Divide pipeline operations --------------------
 7027 
 7028 // Eg.  SDIV    w0, w1, w2
 7029 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 7030 %{
 7031   single_instruction;
 7032   fixed_latency(8); // Maximum latency for 32 bit divide
 7033   dst    : WR(write);
 7034   src1   : ISS(read);
 7035   src2   : ISS(read);
 7036   INS0   : ISS; // Can only dual issue as instruction 0
 7037   DIV    : WR;
 7038 %}
 7039 
 7040 // Eg.  SDIV    x0, x1, x2
 7041 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 7042 %{
 7043   single_instruction;
 7044   fixed_latency(16); // Maximum latency for 64 bit divide
 7045   dst    : WR(write);
 7046   src1   : ISS(read);
 7047   src2   : ISS(read);
 7048   INS0   : ISS; // Can only dual issue as instruction 0
 7049   DIV    : WR;
 7050 %}
 7051 
 7052 //------- Load pipeline operations ------------------------
 7053 
 7054 // Load - prefetch
 7055 // Eg.  PFRM    <mem>
 7056 pipe_class iload_prefetch(memory mem)
 7057 %{
 7058   single_instruction;
 7059   mem    : ISS(read);
 7060   INS01  : ISS;
 7061   LDST   : WR;
 7062 %}
 7063 
 7064 // Load - reg, mem
 7065 // Eg.  LDR     x0, <mem>
 7066 pipe_class iload_reg_mem(iRegI dst, memory mem)
 7067 %{
 7068   single_instruction;
 7069   dst    : WR(write);
 7070   mem    : ISS(read);
 7071   INS01  : ISS;
 7072   LDST   : WR;
 7073 %}
 7074 
 7075 // Load - reg, reg
 7076 // Eg.  LDR     x0, [sp, x1]
 7077 pipe_class iload_reg_reg(iRegI dst, iRegI src)
 7078 %{
 7079   single_instruction;
 7080   dst    : WR(write);
 7081   src    : ISS(read);
 7082   INS01  : ISS;
 7083   LDST   : WR;
 7084 %}
 7085 
 7086 //------- Store pipeline operations -----------------------
 7087 
 7088 // Store - zr, mem
 7089 // Eg.  STR     zr, <mem>
 7090 pipe_class istore_mem(memory mem)
 7091 %{
 7092   single_instruction;
 7093   mem    : ISS(read);
 7094   INS01  : ISS;
 7095   LDST   : WR;
 7096 %}
 7097 
 7098 // Store - reg, mem
 7099 // Eg.  STR     x0, <mem>
 7100 pipe_class istore_reg_mem(iRegI src, memory mem)
 7101 %{
 7102   single_instruction;
 7103   mem    : ISS(read);
 7104   src    : EX2(read);
 7105   INS01  : ISS;
 7106   LDST   : WR;
 7107 %}
 7108 
 7109 // Store - reg, reg
 7110 // Eg. STR      x0, [sp, x1]
 7111 pipe_class istore_reg_reg(iRegI dst, iRegI src)
 7112 %{
 7113   single_instruction;
 7114   dst    : ISS(read);
 7115   src    : EX2(read);
 7116   INS01  : ISS;
 7117   LDST   : WR;
 7118 %}
 7119 
 7120 //------- Store pipeline operations -----------------------
 7121 
 7122 // Branch
 7123 pipe_class pipe_branch()
 7124 %{
 7125   single_instruction;
 7126   INS01  : ISS;
 7127   BRANCH : EX1;
 7128 %}
 7129 
 7130 // Conditional branch
 7131 pipe_class pipe_branch_cond(rFlagsReg cr)
 7132 %{
 7133   single_instruction;
 7134   cr     : EX1(read);
 7135   INS01  : ISS;
 7136   BRANCH : EX1;
 7137 %}
 7138 
 7139 // Compare & Branch
 7140 // EG.  CBZ/CBNZ
 7141 pipe_class pipe_cmp_branch(iRegI op1)
 7142 %{
 7143   single_instruction;
 7144   op1    : EX1(read);
 7145   INS01  : ISS;
 7146   BRANCH : EX1;
 7147 %}
 7148 
 7149 //------- Synchronisation operations ----------------------
 7150 
 7151 // Any operation requiring serialization.
 7152 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
 7153 pipe_class pipe_serial()
 7154 %{
 7155   single_instruction;
 7156   force_serialization;
 7157   fixed_latency(16);
 7158   INS01  : ISS(2); // Cannot dual issue with any other instruction
 7159   LDST   : WR;
 7160 %}
 7161 
 7162 // Generic big/slow expanded idiom - also serialized
 7163 pipe_class pipe_slow()
 7164 %{
 7165   instruction_count(10);
 7166   multiple_bundles;
 7167   force_serialization;
 7168   fixed_latency(16);
 7169   INS01  : ISS(2); // Cannot dual issue with any other instruction
 7170   LDST   : WR;
 7171 %}
 7172 
 7173 // Empty pipeline class
 7174 pipe_class pipe_class_empty()
 7175 %{
 7176   single_instruction;
 7177   fixed_latency(0);
 7178 %}
 7179 
 7180 // Default pipeline class.
 7181 pipe_class pipe_class_default()
 7182 %{
 7183   single_instruction;
 7184   fixed_latency(2);
 7185 %}
 7186 
 7187 // Pipeline class for compares.
 7188 pipe_class pipe_class_compare()
 7189 %{
 7190   single_instruction;
 7191   fixed_latency(16);
 7192 %}
 7193 
 7194 // Pipeline class for memory operations.
 7195 pipe_class pipe_class_memory()
 7196 %{
 7197   single_instruction;
 7198   fixed_latency(16);
 7199 %}
 7200 
 7201 // Pipeline class for call.
 7202 pipe_class pipe_class_call()
 7203 %{
 7204   single_instruction;
 7205   fixed_latency(100);
 7206 %}
 7207 
 7208 // Define the class for the Nop node.
 7209 define %{
 7210    MachNop = pipe_class_empty;
 7211 %}
 7212 
 7213 %}
 7214 //----------INSTRUCTIONS-------------------------------------------------------
 7215 //
 7216 // match      -- States which machine-independent subtree may be replaced
 7217 //               by this instruction.
 7218 // ins_cost   -- The estimated cost of this instruction is used by instruction
 7219 //               selection to identify a minimum cost tree of machine
 7220 //               instructions that matches a tree of machine-independent
 7221 //               instructions.
 7222 // format     -- A string providing the disassembly for this instruction.
 7223 //               The value of an instruction's operand may be inserted
 7224 //               by referring to it with a '$' prefix.
 7225 // opcode     -- Three instruction opcodes may be provided.  These are referred
 7226 //               to within an encode class as $primary, $secondary, and $tertiary
 7227 //               rrspectively.  The primary opcode is commonly used to
 7228 //               indicate the type of machine instruction, while secondary
 7229 //               and tertiary are often used for prefix options or addressing
 7230 //               modes.
 7231 // ins_encode -- A list of encode classes with parameters. The encode class
 7232 //               name must have been defined in an 'enc_class' specification
 7233 //               in the encode section of the architecture description.
 7234 
 7235 // ============================================================================
 7236 // Memory (Load/Store) Instructions
 7237 
 7238 // Load Instructions
 7239 
 7240 // Load Byte (8 bit signed)
 7241 instruct loadB(iRegINoSp dst, memory1 mem)
 7242 %{
 7243   match(Set dst (LoadB mem));
 7244   predicate(!needs_acquiring_load(n));
 7245 
 7246   ins_cost(4 * INSN_COST);
 7247   format %{ "ldrsbw  $dst, $mem\t# byte" %}
 7248 
 7249   ins_encode(aarch64_enc_ldrsbw(dst, mem));
 7250 
 7251   ins_pipe(iload_reg_mem);
 7252 %}
 7253 
 7254 // Load Byte (8 bit signed) into long
 7255 instruct loadB2L(iRegLNoSp dst, memory1 mem)
 7256 %{
 7257   match(Set dst (ConvI2L (LoadB mem)));
 7258   predicate(!needs_acquiring_load(n->in(1)));
 7259 
 7260   ins_cost(4 * INSN_COST);
 7261   format %{ "ldrsb  $dst, $mem\t# byte" %}
 7262 
 7263   ins_encode(aarch64_enc_ldrsb(dst, mem));
 7264 
 7265   ins_pipe(iload_reg_mem);
 7266 %}
 7267 
 7268 // Load Byte (8 bit unsigned)
 7269 instruct loadUB(iRegINoSp dst, memory1 mem)
 7270 %{
 7271   match(Set dst (LoadUB mem));
 7272   predicate(!needs_acquiring_load(n));
 7273 
 7274   ins_cost(4 * INSN_COST);
 7275   format %{ "ldrbw  $dst, $mem\t# byte" %}
 7276 
 7277   ins_encode(aarch64_enc_ldrb(dst, mem));
 7278 
 7279   ins_pipe(iload_reg_mem);
 7280 %}
 7281 
 7282 // Load Byte (8 bit unsigned) into long
 7283 instruct loadUB2L(iRegLNoSp dst, memory1 mem)
 7284 %{
 7285   match(Set dst (ConvI2L (LoadUB mem)));
 7286   predicate(!needs_acquiring_load(n->in(1)));
 7287 
 7288   ins_cost(4 * INSN_COST);
 7289   format %{ "ldrb  $dst, $mem\t# byte" %}
 7290 
 7291   ins_encode(aarch64_enc_ldrb(dst, mem));
 7292 
 7293   ins_pipe(iload_reg_mem);
 7294 %}
 7295 
 7296 // Load Short (16 bit signed)
 7297 instruct loadS(iRegINoSp dst, memory2 mem)
 7298 %{
 7299   match(Set dst (LoadS mem));
 7300   predicate(!needs_acquiring_load(n));
 7301 
 7302   ins_cost(4 * INSN_COST);
 7303   format %{ "ldrshw  $dst, $mem\t# short" %}
 7304 
 7305   ins_encode(aarch64_enc_ldrshw(dst, mem));
 7306 
 7307   ins_pipe(iload_reg_mem);
 7308 %}
 7309 
 7310 // Load Short (16 bit signed) into long
 7311 instruct loadS2L(iRegLNoSp dst, memory2 mem)
 7312 %{
 7313   match(Set dst (ConvI2L (LoadS mem)));
 7314   predicate(!needs_acquiring_load(n->in(1)));
 7315 
 7316   ins_cost(4 * INSN_COST);
 7317   format %{ "ldrsh  $dst, $mem\t# short" %}
 7318 
 7319   ins_encode(aarch64_enc_ldrsh(dst, mem));
 7320 
 7321   ins_pipe(iload_reg_mem);
 7322 %}
 7323 
 7324 // Load Char (16 bit unsigned)
 7325 instruct loadUS(iRegINoSp dst, memory2 mem)
 7326 %{
 7327   match(Set dst (LoadUS mem));
 7328   predicate(!needs_acquiring_load(n));
 7329 
 7330   ins_cost(4 * INSN_COST);
 7331   format %{ "ldrh  $dst, $mem\t# short" %}
 7332 
 7333   ins_encode(aarch64_enc_ldrh(dst, mem));
 7334 
 7335   ins_pipe(iload_reg_mem);
 7336 %}
 7337 
 7338 // Load Short/Char (16 bit unsigned) into long
 7339 instruct loadUS2L(iRegLNoSp dst, memory2 mem)
 7340 %{
 7341   match(Set dst (ConvI2L (LoadUS mem)));
 7342   predicate(!needs_acquiring_load(n->in(1)));
 7343 
 7344   ins_cost(4 * INSN_COST);
 7345   format %{ "ldrh  $dst, $mem\t# short" %}
 7346 
 7347   ins_encode(aarch64_enc_ldrh(dst, mem));
 7348 
 7349   ins_pipe(iload_reg_mem);
 7350 %}
 7351 
 7352 // Load Integer (32 bit signed)
 7353 instruct loadI(iRegINoSp dst, memory4 mem)
 7354 %{
 7355   match(Set dst (LoadI mem));
 7356   predicate(!needs_acquiring_load(n));
 7357 
 7358   ins_cost(4 * INSN_COST);
 7359   format %{ "ldrw  $dst, $mem\t# int" %}
 7360 
 7361   ins_encode(aarch64_enc_ldrw(dst, mem));
 7362 
 7363   ins_pipe(iload_reg_mem);
 7364 %}
 7365 
 7366 // Load Integer (32 bit signed) into long
 7367 instruct loadI2L(iRegLNoSp dst, memory4 mem)
 7368 %{
 7369   match(Set dst (ConvI2L (LoadI mem)));
 7370   predicate(!needs_acquiring_load(n->in(1)));
 7371 
 7372   ins_cost(4 * INSN_COST);
 7373   format %{ "ldrsw  $dst, $mem\t# int" %}
 7374 
 7375   ins_encode(aarch64_enc_ldrsw(dst, mem));
 7376 
 7377   ins_pipe(iload_reg_mem);
 7378 %}
 7379 
 7380 // Load Integer (32 bit unsigned) into long
 7381 instruct loadUI2L(iRegLNoSp dst, memory4 mem, immL_32bits mask)
 7382 %{
 7383   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 7384   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
 7385 
 7386   ins_cost(4 * INSN_COST);
 7387   format %{ "ldrw  $dst, $mem\t# int" %}
 7388 
 7389   ins_encode(aarch64_enc_ldrw(dst, mem));
 7390 
 7391   ins_pipe(iload_reg_mem);
 7392 %}
 7393 
 7394 // Load Long (64 bit signed)
 7395 instruct loadL(iRegLNoSp dst, memory8 mem)
 7396 %{
 7397   match(Set dst (LoadL mem));
 7398   predicate(!needs_acquiring_load(n));
 7399 
 7400   ins_cost(4 * INSN_COST);
 7401   format %{ "ldr  $dst, $mem\t# int" %}
 7402 
 7403   ins_encode(aarch64_enc_ldr(dst, mem));
 7404 
 7405   ins_pipe(iload_reg_mem);
 7406 %}
 7407 
 7408 // Load Range
 7409 instruct loadRange(iRegINoSp dst, memory4 mem)
 7410 %{
 7411   match(Set dst (LoadRange mem));
 7412 
 7413   ins_cost(4 * INSN_COST);
 7414   format %{ "ldrw  $dst, $mem\t# range" %}
 7415 
 7416   ins_encode(aarch64_enc_ldrw(dst, mem));
 7417 
 7418   ins_pipe(iload_reg_mem);
 7419 %}
 7420 
 7421 // Load Pointer
 7422 instruct loadP(iRegPNoSp dst, memory8 mem)
 7423 %{
 7424   match(Set dst (LoadP mem));
 7425   predicate(!needs_acquiring_load(n) && (n->as_Load()->barrier_data() == 0));
 7426 
 7427   ins_cost(4 * INSN_COST);
 7428   format %{ "ldr  $dst, $mem\t# ptr" %}
 7429 
 7430   ins_encode(aarch64_enc_ldr(dst, mem));
 7431 
 7432   ins_pipe(iload_reg_mem);
 7433 %}
 7434 
 7435 // Load Compressed Pointer
 7436 instruct loadN(iRegNNoSp dst, memory4 mem)
 7437 %{
 7438   match(Set dst (LoadN mem));
 7439   predicate(!needs_acquiring_load(n));
 7440 
 7441   ins_cost(4 * INSN_COST);
 7442   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
 7443 
 7444   ins_encode(aarch64_enc_ldrw(dst, mem));
 7445 
 7446   ins_pipe(iload_reg_mem);
 7447 %}
 7448 
 7449 // Load Klass Pointer
 7450 instruct loadKlass(iRegPNoSp dst, memory8 mem)
 7451 %{
 7452   match(Set dst (LoadKlass mem));
 7453   predicate(!needs_acquiring_load(n));
 7454 
 7455   ins_cost(4 * INSN_COST);
 7456   format %{ "ldr  $dst, $mem\t# class" %}
 7457 
 7458   ins_encode(aarch64_enc_ldr(dst, mem));
 7459 
 7460   ins_pipe(iload_reg_mem);
 7461 %}
 7462 
 7463 // Load Narrow Klass Pointer
 7464 instruct loadNKlass(iRegNNoSp dst, memory4 mem)
 7465 %{
 7466   match(Set dst (LoadNKlass mem));
 7467   predicate(!needs_acquiring_load(n));
 7468 
 7469   ins_cost(4 * INSN_COST);
 7470   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
 7471 
 7472   ins_encode(aarch64_enc_ldrw(dst, mem));
 7473 
 7474   ins_pipe(iload_reg_mem);
 7475 %}
 7476 
 7477 // Load Float
 7478 instruct loadF(vRegF dst, memory4 mem)
 7479 %{
 7480   match(Set dst (LoadF mem));
 7481   predicate(!needs_acquiring_load(n));
 7482 
 7483   ins_cost(4 * INSN_COST);
 7484   format %{ "ldrs  $dst, $mem\t# float" %}
 7485 
 7486   ins_encode( aarch64_enc_ldrs(dst, mem) );
 7487 
 7488   ins_pipe(pipe_class_memory);
 7489 %}
 7490 
 7491 // Load Double
 7492 instruct loadD(vRegD dst, memory8 mem)
 7493 %{
 7494   match(Set dst (LoadD mem));
 7495   predicate(!needs_acquiring_load(n));
 7496 
 7497   ins_cost(4 * INSN_COST);
 7498   format %{ "ldrd  $dst, $mem\t# double" %}
 7499 
 7500   ins_encode( aarch64_enc_ldrd(dst, mem) );
 7501 
 7502   ins_pipe(pipe_class_memory);
 7503 %}
 7504 
 7505 
 7506 // Load Int Constant
 7507 instruct loadConI(iRegINoSp dst, immI src)
 7508 %{
 7509   match(Set dst src);
 7510 
 7511   ins_cost(INSN_COST);
 7512   format %{ "mov $dst, $src\t# int" %}
 7513 
 7514   ins_encode( aarch64_enc_movw_imm(dst, src) );
 7515 
 7516   ins_pipe(ialu_imm);
 7517 %}
 7518 
 7519 // Load Long Constant
 7520 instruct loadConL(iRegLNoSp dst, immL src)
 7521 %{
 7522   match(Set dst src);
 7523 
 7524   ins_cost(INSN_COST);
 7525   format %{ "mov $dst, $src\t# long" %}
 7526 
 7527   ins_encode( aarch64_enc_mov_imm(dst, src) );
 7528 
 7529   ins_pipe(ialu_imm);
 7530 %}
 7531 
 7532 // Load Pointer Constant
 7533 
 7534 instruct loadConP(iRegPNoSp dst, immP con)
 7535 %{
 7536   match(Set dst con);
 7537 
 7538   ins_cost(INSN_COST * 4);
 7539   format %{
 7540     "mov  $dst, $con\t# ptr\n\t"
 7541   %}
 7542 
 7543   ins_encode(aarch64_enc_mov_p(dst, con));
 7544 
 7545   ins_pipe(ialu_imm);
 7546 %}
 7547 
 7548 // Load Null Pointer Constant
 7549 
 7550 instruct loadConP0(iRegPNoSp dst, immP0 con)
 7551 %{
 7552   match(Set dst con);
 7553 
 7554   ins_cost(INSN_COST);
 7555   format %{ "mov  $dst, $con\t# NULL ptr" %}
 7556 
 7557   ins_encode(aarch64_enc_mov_p0(dst, con));
 7558 
 7559   ins_pipe(ialu_imm);
 7560 %}
 7561 
 7562 // Load Pointer Constant One
 7563 
 7564 instruct loadConP1(iRegPNoSp dst, immP_1 con)
 7565 %{
 7566   match(Set dst con);
 7567 
 7568   ins_cost(INSN_COST);
 7569   format %{ "mov  $dst, $con\t# NULL ptr" %}
 7570 
 7571   ins_encode(aarch64_enc_mov_p1(dst, con));
 7572 
 7573   ins_pipe(ialu_imm);
 7574 %}
 7575 
 7576 // Load Byte Map Base Constant
 7577 
 7578 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
 7579 %{
 7580   match(Set dst con);
 7581 
 7582   ins_cost(INSN_COST);
 7583   format %{ "adr  $dst, $con\t# Byte Map Base" %}
 7584 
 7585   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
 7586 
 7587   ins_pipe(ialu_imm);
 7588 %}
 7589 
 7590 // Load Narrow Pointer Constant
 7591 
 7592 instruct loadConN(iRegNNoSp dst, immN con)
 7593 %{
 7594   match(Set dst con);
 7595 
 7596   ins_cost(INSN_COST * 4);
 7597   format %{ "mov  $dst, $con\t# compressed ptr" %}
 7598 
 7599   ins_encode(aarch64_enc_mov_n(dst, con));
 7600 
 7601   ins_pipe(ialu_imm);
 7602 %}
 7603 
 7604 // Load Narrow Null Pointer Constant
 7605 
 7606 instruct loadConN0(iRegNNoSp dst, immN0 con)
 7607 %{
 7608   match(Set dst con);
 7609 
 7610   ins_cost(INSN_COST);
 7611   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
 7612 
 7613   ins_encode(aarch64_enc_mov_n0(dst, con));
 7614 
 7615   ins_pipe(ialu_imm);
 7616 %}
 7617 
 7618 // Load Narrow Klass Constant
 7619 
 7620 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
 7621 %{
 7622   match(Set dst con);
 7623 
 7624   ins_cost(INSN_COST);
 7625   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
 7626 
 7627   ins_encode(aarch64_enc_mov_nk(dst, con));
 7628 
 7629   ins_pipe(ialu_imm);
 7630 %}
 7631 
 7632 // Load Packed Float Constant
 7633 
 7634 instruct loadConF_packed(vRegF dst, immFPacked con) %{
 7635   match(Set dst con);
 7636   ins_cost(INSN_COST * 4);
 7637   format %{ "fmovs  $dst, $con"%}
 7638   ins_encode %{
 7639     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
 7640   %}
 7641 
 7642   ins_pipe(fp_imm_s);
 7643 %}
 7644 
 7645 // Load Float Constant
 7646 
 7647 instruct loadConF(vRegF dst, immF con) %{
 7648   match(Set dst con);
 7649 
 7650   ins_cost(INSN_COST * 4);
 7651 
 7652   format %{
 7653     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 7654   %}
 7655 
 7656   ins_encode %{
 7657     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
 7658   %}
 7659 
 7660   ins_pipe(fp_load_constant_s);
 7661 %}
 7662 
 7663 // Load Packed Double Constant
 7664 
 7665 instruct loadConD_packed(vRegD dst, immDPacked con) %{
 7666   match(Set dst con);
 7667   ins_cost(INSN_COST);
 7668   format %{ "fmovd  $dst, $con"%}
 7669   ins_encode %{
 7670     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
 7671   %}
 7672 
 7673   ins_pipe(fp_imm_d);
 7674 %}
 7675 
 7676 // Load Double Constant
 7677 
 7678 instruct loadConD(vRegD dst, immD con) %{
 7679   match(Set dst con);
 7680 
 7681   ins_cost(INSN_COST * 5);
 7682   format %{
 7683     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 7684   %}
 7685 
 7686   ins_encode %{
 7687     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
 7688   %}
 7689 
 7690   ins_pipe(fp_load_constant_d);
 7691 %}
 7692 
 7693 // Store Instructions
 7694 
 7695 // Store CMS card-mark Immediate
 7696 instruct storeimmCM0(immI0 zero, memory1 mem)
 7697 %{
 7698   match(Set mem (StoreCM mem zero));
 7699 
 7700   ins_cost(INSN_COST);
 7701   format %{ "storestore (elided)\n\t"
 7702             "strb zr, $mem\t# byte" %}
 7703 
 7704   ins_encode(aarch64_enc_strb0(mem));
 7705 
 7706   ins_pipe(istore_mem);
 7707 %}
 7708 
 7709 // Store CMS card-mark Immediate with intervening StoreStore
 7710 // needed when using CMS with no conditional card marking
 7711 instruct storeimmCM0_ordered(immI0 zero, memory1 mem)
 7712 %{
 7713   match(Set mem (StoreCM mem zero));
 7714 
 7715   ins_cost(INSN_COST * 2);
 7716   format %{ "storestore\n\t"
 7717             "dmb ishst"
 7718             "\n\tstrb zr, $mem\t# byte" %}
 7719 
 7720   ins_encode(aarch64_enc_strb0_ordered(mem));
 7721 
 7722   ins_pipe(istore_mem);
 7723 %}
 7724 
 7725 // Store Byte
 7726 instruct storeB(iRegIorL2I src, memory1 mem)
 7727 %{
 7728   match(Set mem (StoreB mem src));
 7729   predicate(!needs_releasing_store(n));
 7730 
 7731   ins_cost(INSN_COST);
 7732   format %{ "strb  $src, $mem\t# byte" %}
 7733 
 7734   ins_encode(aarch64_enc_strb(src, mem));
 7735 
 7736   ins_pipe(istore_reg_mem);
 7737 %}
 7738 
 7739 
 7740 instruct storeimmB0(immI0 zero, memory1 mem)
 7741 %{
 7742   match(Set mem (StoreB mem zero));
 7743   predicate(!needs_releasing_store(n));
 7744 
 7745   ins_cost(INSN_COST);
 7746   format %{ "strb rscractch2, $mem\t# byte" %}
 7747 
 7748   ins_encode(aarch64_enc_strb0(mem));
 7749 
 7750   ins_pipe(istore_mem);
 7751 %}
 7752 
 7753 // Store Char/Short
 7754 instruct storeC(iRegIorL2I src, memory2 mem)
 7755 %{
 7756   match(Set mem (StoreC mem src));
 7757   predicate(!needs_releasing_store(n));
 7758 
 7759   ins_cost(INSN_COST);
 7760   format %{ "strh  $src, $mem\t# short" %}
 7761 
 7762   ins_encode(aarch64_enc_strh(src, mem));
 7763 
 7764   ins_pipe(istore_reg_mem);
 7765 %}
 7766 
 7767 instruct storeimmC0(immI0 zero, memory2 mem)
 7768 %{
 7769   match(Set mem (StoreC mem zero));
 7770   predicate(!needs_releasing_store(n));
 7771 
 7772   ins_cost(INSN_COST);
 7773   format %{ "strh  zr, $mem\t# short" %}
 7774 
 7775   ins_encode(aarch64_enc_strh0(mem));
 7776 
 7777   ins_pipe(istore_mem);
 7778 %}
 7779 
 7780 // Store Integer
 7781 
 7782 instruct storeI(iRegIorL2I src, memory4 mem)
 7783 %{
 7784   match(Set mem(StoreI mem src));
 7785   predicate(!needs_releasing_store(n));
 7786 
 7787   ins_cost(INSN_COST);
 7788   format %{ "strw  $src, $mem\t# int" %}
 7789 
 7790   ins_encode(aarch64_enc_strw(src, mem));
 7791 
 7792   ins_pipe(istore_reg_mem);
 7793 %}
 7794 
 7795 instruct storeimmI0(immI0 zero, memory4 mem)
 7796 %{
 7797   match(Set mem(StoreI mem zero));
 7798   predicate(!needs_releasing_store(n));
 7799 
 7800   ins_cost(INSN_COST);
 7801   format %{ "strw  zr, $mem\t# int" %}
 7802 
 7803   ins_encode(aarch64_enc_strw0(mem));
 7804 
 7805   ins_pipe(istore_mem);
 7806 %}
 7807 
 7808 // Store Long (64 bit signed)
 7809 instruct storeL(iRegL src, memory8 mem)
 7810 %{
 7811   match(Set mem (StoreL mem src));
 7812   predicate(!needs_releasing_store(n));
 7813 
 7814   ins_cost(INSN_COST);
 7815   format %{ "str  $src, $mem\t# int" %}
 7816 
 7817   ins_encode(aarch64_enc_str(src, mem));
 7818 
 7819   ins_pipe(istore_reg_mem);
 7820 %}
 7821 
 7822 // Store Long (64 bit signed)
 7823 instruct storeimmL0(immL0 zero, memory8 mem)
 7824 %{
 7825   match(Set mem (StoreL mem zero));
 7826   predicate(!needs_releasing_store(n));
 7827 
 7828   ins_cost(INSN_COST);
 7829   format %{ "str  zr, $mem\t# int" %}
 7830 
 7831   ins_encode(aarch64_enc_str0(mem));
 7832 
 7833   ins_pipe(istore_mem);
 7834 %}
 7835 
 7836 // Store Pointer
 7837 instruct storeP(iRegP src, memory8 mem)
 7838 %{
 7839   match(Set mem (StoreP mem src));
 7840   predicate(!needs_releasing_store(n));
 7841 
 7842   ins_cost(INSN_COST);
 7843   format %{ "str  $src, $mem\t# ptr" %}
 7844 
 7845   ins_encode(aarch64_enc_str(src, mem));
 7846 
 7847   ins_pipe(istore_reg_mem);
 7848 %}
 7849 
 7850 // Store Pointer
 7851 instruct storeimmP0(immP0 zero, memory8 mem)
 7852 %{
 7853   match(Set mem (StoreP mem zero));
 7854   predicate(!needs_releasing_store(n));
 7855 
 7856   ins_cost(INSN_COST);
 7857   format %{ "str zr, $mem\t# ptr" %}
 7858 
 7859   ins_encode(aarch64_enc_str0(mem));
 7860 
 7861   ins_pipe(istore_mem);
 7862 %}
 7863 
 7864 // Store Compressed Pointer
 7865 instruct storeN(iRegN src, memory4 mem)
 7866 %{
 7867   match(Set mem (StoreN mem src));
 7868   predicate(!needs_releasing_store(n));
 7869 
 7870   ins_cost(INSN_COST);
 7871   format %{ "strw  $src, $mem\t# compressed ptr" %}
 7872 
 7873   ins_encode(aarch64_enc_strw(src, mem));
 7874 
 7875   ins_pipe(istore_reg_mem);
 7876 %}
 7877 
 7878 instruct storeImmN0(immN0 zero, memory4 mem)
 7879 %{
 7880   match(Set mem (StoreN mem zero));
 7881   predicate(!needs_releasing_store(n));
 7882 
 7883   ins_cost(INSN_COST);
 7884   format %{ "strw  zr, $mem\t# compressed ptr" %}
 7885 
 7886   ins_encode(aarch64_enc_strw0(mem));
 7887 
 7888   ins_pipe(istore_mem);
 7889 %}
 7890 
 7891 // Store Float
 7892 instruct storeF(vRegF src, memory4 mem)
 7893 %{
 7894   match(Set mem (StoreF mem src));
 7895   predicate(!needs_releasing_store(n));
 7896 
 7897   ins_cost(INSN_COST);
 7898   format %{ "strs  $src, $mem\t# float" %}
 7899 
 7900   ins_encode( aarch64_enc_strs(src, mem) );
 7901 
 7902   ins_pipe(pipe_class_memory);
 7903 %}
 7904 
 7905 // TODO
 7906 // implement storeImmF0 and storeFImmPacked
 7907 
 7908 // Store Double
 7909 instruct storeD(vRegD src, memory8 mem)
 7910 %{
 7911   match(Set mem (StoreD mem src));
 7912   predicate(!needs_releasing_store(n));
 7913 
 7914   ins_cost(INSN_COST);
 7915   format %{ "strd  $src, $mem\t# double" %}
 7916 
 7917   ins_encode( aarch64_enc_strd(src, mem) );
 7918 
 7919   ins_pipe(pipe_class_memory);
 7920 %}
 7921 
 7922 // Store Compressed Klass Pointer
 7923 instruct storeNKlass(iRegN src, memory4 mem)
 7924 %{
 7925   predicate(!needs_releasing_store(n));
 7926   match(Set mem (StoreNKlass mem src));
 7927 
 7928   ins_cost(INSN_COST);
 7929   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
 7930 
 7931   ins_encode(aarch64_enc_strw(src, mem));
 7932 
 7933   ins_pipe(istore_reg_mem);
 7934 %}
 7935 
 7936 // TODO
 7937 // implement storeImmD0 and storeDImmPacked
 7938 
 7939 // prefetch instructions
 7940 // Must be safe to execute with invalid address (cannot fault).
 7941 
 7942 instruct prefetchalloc( memory8 mem ) %{
 7943   match(PrefetchAllocation mem);
 7944 
 7945   ins_cost(INSN_COST);
 7946   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
 7947 
 7948   ins_encode( aarch64_enc_prefetchw(mem) );
 7949 
 7950   ins_pipe(iload_prefetch);
 7951 %}
 7952 
 7953 //  ---------------- volatile loads and stores ----------------
 7954 
 7955 // Load Byte (8 bit signed)
 7956 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7957 %{
 7958   match(Set dst (LoadB mem));
 7959 
 7960   ins_cost(VOLATILE_REF_COST);
 7961   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7962 
 7963   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7964 
 7965   ins_pipe(pipe_serial);
 7966 %}
 7967 
 7968 // Load Byte (8 bit signed) into long
 7969 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7970 %{
 7971   match(Set dst (ConvI2L (LoadB mem)));
 7972 
 7973   ins_cost(VOLATILE_REF_COST);
 7974   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7975 
 7976   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7977 
 7978   ins_pipe(pipe_serial);
 7979 %}
 7980 
 7981 // Load Byte (8 bit unsigned)
 7982 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7983 %{
 7984   match(Set dst (LoadUB mem));
 7985 
 7986   ins_cost(VOLATILE_REF_COST);
 7987   format %{ "ldarb  $dst, $mem\t# byte" %}
 7988 
 7989   ins_encode(aarch64_enc_ldarb(dst, mem));
 7990 
 7991   ins_pipe(pipe_serial);
 7992 %}
 7993 
 7994 // Load Byte (8 bit unsigned) into long
 7995 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7996 %{
 7997   match(Set dst (ConvI2L (LoadUB mem)));
 7998 
 7999   ins_cost(VOLATILE_REF_COST);
 8000   format %{ "ldarb  $dst, $mem\t# byte" %}
 8001 
 8002   ins_encode(aarch64_enc_ldarb(dst, mem));
 8003 
 8004   ins_pipe(pipe_serial);
 8005 %}
 8006 
 8007 // Load Short (16 bit signed)
 8008 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 8009 %{
 8010   match(Set dst (LoadS mem));
 8011 
 8012   ins_cost(VOLATILE_REF_COST);
 8013   format %{ "ldarshw  $dst, $mem\t# short" %}
 8014 
 8015   ins_encode(aarch64_enc_ldarshw(dst, mem));
 8016 
 8017   ins_pipe(pipe_serial);
 8018 %}
 8019 
 8020 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 8021 %{
 8022   match(Set dst (LoadUS mem));
 8023 
 8024   ins_cost(VOLATILE_REF_COST);
 8025   format %{ "ldarhw  $dst, $mem\t# short" %}
 8026 
 8027   ins_encode(aarch64_enc_ldarhw(dst, mem));
 8028 
 8029   ins_pipe(pipe_serial);
 8030 %}
 8031 
 8032 // Load Short/Char (16 bit unsigned) into long
 8033 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 8034 %{
 8035   match(Set dst (ConvI2L (LoadUS mem)));
 8036 
 8037   ins_cost(VOLATILE_REF_COST);
 8038   format %{ "ldarh  $dst, $mem\t# short" %}
 8039 
 8040   ins_encode(aarch64_enc_ldarh(dst, mem));
 8041 
 8042   ins_pipe(pipe_serial);
 8043 %}
 8044 
 8045 // Load Short/Char (16 bit signed) into long
 8046 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 8047 %{
 8048   match(Set dst (ConvI2L (LoadS mem)));
 8049 
 8050   ins_cost(VOLATILE_REF_COST);
 8051   format %{ "ldarh  $dst, $mem\t# short" %}
 8052 
 8053   ins_encode(aarch64_enc_ldarsh(dst, mem));
 8054 
 8055   ins_pipe(pipe_serial);
 8056 %}
 8057 
 8058 // Load Integer (32 bit signed)
 8059 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 8060 %{
 8061   match(Set dst (LoadI mem));
 8062 
 8063   ins_cost(VOLATILE_REF_COST);
 8064   format %{ "ldarw  $dst, $mem\t# int" %}
 8065 
 8066   ins_encode(aarch64_enc_ldarw(dst, mem));
 8067 
 8068   ins_pipe(pipe_serial);
 8069 %}
 8070 
 8071 // Load Integer (32 bit unsigned) into long
 8072 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
 8073 %{
 8074   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 8075 
 8076   ins_cost(VOLATILE_REF_COST);
 8077   format %{ "ldarw  $dst, $mem\t# int" %}
 8078 
 8079   ins_encode(aarch64_enc_ldarw(dst, mem));
 8080 
 8081   ins_pipe(pipe_serial);
 8082 %}
 8083 
 8084 // Load Long (64 bit signed)
 8085 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 8086 %{
 8087   match(Set dst (LoadL mem));
 8088 
 8089   ins_cost(VOLATILE_REF_COST);
 8090   format %{ "ldar  $dst, $mem\t# int" %}
 8091 
 8092   ins_encode(aarch64_enc_ldar(dst, mem));
 8093 
 8094   ins_pipe(pipe_serial);
 8095 %}
 8096 
 8097 // Load Pointer
 8098 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
 8099 %{
 8100   match(Set dst (LoadP mem));
 8101   predicate(n->as_Load()->barrier_data() == 0);
 8102 
 8103   ins_cost(VOLATILE_REF_COST);
 8104   format %{ "ldar  $dst, $mem\t# ptr" %}
 8105 
 8106   ins_encode(aarch64_enc_ldar(dst, mem));
 8107 
 8108   ins_pipe(pipe_serial);
 8109 %}
 8110 
 8111 // Load Compressed Pointer
 8112 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
 8113 %{
 8114   match(Set dst (LoadN mem));
 8115 
 8116   ins_cost(VOLATILE_REF_COST);
 8117   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
 8118 
 8119   ins_encode(aarch64_enc_ldarw(dst, mem));
 8120 
 8121   ins_pipe(pipe_serial);
 8122 %}
 8123 
 8124 // Load Float
 8125 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
 8126 %{
 8127   match(Set dst (LoadF mem));
 8128 
 8129   ins_cost(VOLATILE_REF_COST);
 8130   format %{ "ldars  $dst, $mem\t# float" %}
 8131 
 8132   ins_encode( aarch64_enc_fldars(dst, mem) );
 8133 
 8134   ins_pipe(pipe_serial);
 8135 %}
 8136 
 8137 // Load Double
 8138 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
 8139 %{
 8140   match(Set dst (LoadD mem));
 8141 
 8142   ins_cost(VOLATILE_REF_COST);
 8143   format %{ "ldard  $dst, $mem\t# double" %}
 8144 
 8145   ins_encode( aarch64_enc_fldard(dst, mem) );
 8146 
 8147   ins_pipe(pipe_serial);
 8148 %}
 8149 
 8150 // Store Byte
 8151 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 8152 %{
 8153   match(Set mem (StoreB mem src));
 8154 
 8155   ins_cost(VOLATILE_REF_COST);
 8156   format %{ "stlrb  $src, $mem\t# byte" %}
 8157 
 8158   ins_encode(aarch64_enc_stlrb(src, mem));
 8159 
 8160   ins_pipe(pipe_class_memory);
 8161 %}
 8162 
 8163 // Store Char/Short
 8164 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 8165 %{
 8166   match(Set mem (StoreC mem src));
 8167 
 8168   ins_cost(VOLATILE_REF_COST);
 8169   format %{ "stlrh  $src, $mem\t# short" %}
 8170 
 8171   ins_encode(aarch64_enc_stlrh(src, mem));
 8172 
 8173   ins_pipe(pipe_class_memory);
 8174 %}
 8175 
 8176 // Store Integer
 8177 
 8178 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 8179 %{
 8180   match(Set mem(StoreI mem src));
 8181 
 8182   ins_cost(VOLATILE_REF_COST);
 8183   format %{ "stlrw  $src, $mem\t# int" %}
 8184 
 8185   ins_encode(aarch64_enc_stlrw(src, mem));
 8186 
 8187   ins_pipe(pipe_class_memory);
 8188 %}
 8189 
 8190 // Store Long (64 bit signed)
 8191 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
 8192 %{
 8193   match(Set mem (StoreL mem src));
 8194 
 8195   ins_cost(VOLATILE_REF_COST);
 8196   format %{ "stlr  $src, $mem\t# int" %}
 8197 
 8198   ins_encode(aarch64_enc_stlr(src, mem));
 8199 
 8200   ins_pipe(pipe_class_memory);
 8201 %}
 8202 
 8203 // Store Pointer
 8204 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
 8205 %{
 8206   match(Set mem (StoreP mem src));
 8207 
 8208   ins_cost(VOLATILE_REF_COST);
 8209   format %{ "stlr  $src, $mem\t# ptr" %}
 8210 
 8211   ins_encode(aarch64_enc_stlr(src, mem));
 8212 
 8213   ins_pipe(pipe_class_memory);
 8214 %}
 8215 
 8216 // Store Compressed Pointer
 8217 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
 8218 %{
 8219   match(Set mem (StoreN mem src));
 8220 
 8221   ins_cost(VOLATILE_REF_COST);
 8222   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
 8223 
 8224   ins_encode(aarch64_enc_stlrw(src, mem));
 8225 
 8226   ins_pipe(pipe_class_memory);
 8227 %}
 8228 
 8229 // Store Float
 8230 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
 8231 %{
 8232   match(Set mem (StoreF mem src));
 8233 
 8234   ins_cost(VOLATILE_REF_COST);
 8235   format %{ "stlrs  $src, $mem\t# float" %}
 8236 
 8237   ins_encode( aarch64_enc_fstlrs(src, mem) );
 8238 
 8239   ins_pipe(pipe_class_memory);
 8240 %}
 8241 
 8242 // TODO
 8243 // implement storeImmF0 and storeFImmPacked
 8244 
 8245 // Store Double
 8246 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
 8247 %{
 8248   match(Set mem (StoreD mem src));
 8249 
 8250   ins_cost(VOLATILE_REF_COST);
 8251   format %{ "stlrd  $src, $mem\t# double" %}
 8252 
 8253   ins_encode( aarch64_enc_fstlrd(src, mem) );
 8254 
 8255   ins_pipe(pipe_class_memory);
 8256 %}
 8257 
 8258 //  ---------------- end of volatile loads and stores ----------------
 8259 
 8260 instruct cacheWB(indirect addr)
 8261 %{
 8262   predicate(VM_Version::supports_data_cache_line_flush());
 8263   match(CacheWB addr);
 8264 
 8265   ins_cost(100);
 8266   format %{"cache wb $addr" %}
 8267   ins_encode %{
 8268     assert($addr->index_position() < 0, "should be");
 8269     assert($addr$$disp == 0, "should be");
 8270     __ cache_wb(Address($addr$$base$$Register, 0));
 8271   %}
 8272   ins_pipe(pipe_slow); // XXX
 8273 %}
 8274 
 8275 instruct cacheWBPreSync()
 8276 %{
 8277   predicate(VM_Version::supports_data_cache_line_flush());
 8278   match(CacheWBPreSync);
 8279 
 8280   ins_cost(100);
 8281   format %{"cache wb presync" %}
 8282   ins_encode %{
 8283     __ cache_wbsync(true);
 8284   %}
 8285   ins_pipe(pipe_slow); // XXX
 8286 %}
 8287 
 8288 instruct cacheWBPostSync()
 8289 %{
 8290   predicate(VM_Version::supports_data_cache_line_flush());
 8291   match(CacheWBPostSync);
 8292 
 8293   ins_cost(100);
 8294   format %{"cache wb postsync" %}
 8295   ins_encode %{
 8296     __ cache_wbsync(false);
 8297   %}
 8298   ins_pipe(pipe_slow); // XXX
 8299 %}
 8300 
 8301 // ============================================================================
 8302 // BSWAP Instructions
 8303 
 8304 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
 8305   match(Set dst (ReverseBytesI src));
 8306 
 8307   ins_cost(INSN_COST);
 8308   format %{ "revw  $dst, $src" %}
 8309 
 8310   ins_encode %{
 8311     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
 8312   %}
 8313 
 8314   ins_pipe(ialu_reg);
 8315 %}
 8316 
 8317 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
 8318   match(Set dst (ReverseBytesL src));
 8319 
 8320   ins_cost(INSN_COST);
 8321   format %{ "rev  $dst, $src" %}
 8322 
 8323   ins_encode %{
 8324     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
 8325   %}
 8326 
 8327   ins_pipe(ialu_reg);
 8328 %}
 8329 
 8330 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
 8331   match(Set dst (ReverseBytesUS src));
 8332 
 8333   ins_cost(INSN_COST);
 8334   format %{ "rev16w  $dst, $src" %}
 8335 
 8336   ins_encode %{
 8337     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 8338   %}
 8339 
 8340   ins_pipe(ialu_reg);
 8341 %}
 8342 
 8343 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
 8344   match(Set dst (ReverseBytesS src));
 8345 
 8346   ins_cost(INSN_COST);
 8347   format %{ "rev16w  $dst, $src\n\t"
 8348             "sbfmw $dst, $dst, #0, #15" %}
 8349 
 8350   ins_encode %{
 8351     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 8352     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
 8353   %}
 8354 
 8355   ins_pipe(ialu_reg);
 8356 %}
 8357 
 8358 // ============================================================================
 8359 // Zero Count Instructions
 8360 
 8361 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 8362   match(Set dst (CountLeadingZerosI src));
 8363 
 8364   ins_cost(INSN_COST);
 8365   format %{ "clzw  $dst, $src" %}
 8366   ins_encode %{
 8367     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
 8368   %}
 8369 
 8370   ins_pipe(ialu_reg);
 8371 %}
 8372 
 8373 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
 8374   match(Set dst (CountLeadingZerosL src));
 8375 
 8376   ins_cost(INSN_COST);
 8377   format %{ "clz   $dst, $src" %}
 8378   ins_encode %{
 8379     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
 8380   %}
 8381 
 8382   ins_pipe(ialu_reg);
 8383 %}
 8384 
 8385 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 8386   match(Set dst (CountTrailingZerosI src));
 8387 
 8388   ins_cost(INSN_COST * 2);
 8389   format %{ "rbitw  $dst, $src\n\t"
 8390             "clzw   $dst, $dst" %}
 8391   ins_encode %{
 8392     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
 8393     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
 8394   %}
 8395 
 8396   ins_pipe(ialu_reg);
 8397 %}
 8398 
 8399 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
 8400   match(Set dst (CountTrailingZerosL src));
 8401 
 8402   ins_cost(INSN_COST * 2);
 8403   format %{ "rbit   $dst, $src\n\t"
 8404             "clz    $dst, $dst" %}
 8405   ins_encode %{
 8406     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
 8407     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
 8408   %}
 8409 
 8410   ins_pipe(ialu_reg);
 8411 %}
 8412 
 8413 //---------- Population Count Instructions -------------------------------------
 8414 //
 8415 
 8416 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
 8417   predicate(UsePopCountInstruction);
 8418   match(Set dst (PopCountI src));
 8419   effect(TEMP tmp);
 8420   ins_cost(INSN_COST * 13);
 8421 
 8422   format %{ "movw   $src, $src\n\t"
 8423             "mov    $tmp, $src\t# vector (1D)\n\t"
 8424             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8425             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8426             "mov    $dst, $tmp\t# vector (1D)" %}
 8427   ins_encode %{
 8428     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
 8429     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
 8430     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8431     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8432     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
 8433   %}
 8434 
 8435   ins_pipe(pipe_class_default);
 8436 %}
 8437 
 8438 instruct popCountI_mem(iRegINoSp dst, memory4 mem, vRegF tmp) %{
 8439   predicate(UsePopCountInstruction);
 8440   match(Set dst (PopCountI (LoadI mem)));
 8441   effect(TEMP tmp);
 8442   ins_cost(INSN_COST * 13);
 8443 
 8444   format %{ "ldrs   $tmp, $mem\n\t"
 8445             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8446             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8447             "mov    $dst, $tmp\t# vector (1D)" %}
 8448   ins_encode %{
 8449     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 8450     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
 8451               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 8452     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8453     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8454     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
 8455   %}
 8456 
 8457   ins_pipe(pipe_class_default);
 8458 %}
 8459 
 8460 // Note: Long.bitCount(long) returns an int.
 8461 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
 8462   predicate(UsePopCountInstruction);
 8463   match(Set dst (PopCountL src));
 8464   effect(TEMP tmp);
 8465   ins_cost(INSN_COST * 13);
 8466 
 8467   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
 8468             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8469             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8470             "mov    $dst, $tmp\t# vector (1D)" %}
 8471   ins_encode %{
 8472     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
 8473     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8474     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8475     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
 8476   %}
 8477 
 8478   ins_pipe(pipe_class_default);
 8479 %}
 8480 
 8481 instruct popCountL_mem(iRegINoSp dst, memory8 mem, vRegD tmp) %{
 8482   predicate(UsePopCountInstruction);
 8483   match(Set dst (PopCountL (LoadL mem)));
 8484   effect(TEMP tmp);
 8485   ins_cost(INSN_COST * 13);
 8486 
 8487   format %{ "ldrd   $tmp, $mem\n\t"
 8488             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8489             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8490             "mov    $dst, $tmp\t# vector (1D)" %}
 8491   ins_encode %{
 8492     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 8493     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
 8494               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 8495     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8496     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8497     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
 8498   %}
 8499 
 8500   ins_pipe(pipe_class_default);
 8501 %}
 8502 
 8503 // ============================================================================
 8504 // MemBar Instruction
 8505 
 8506 instruct load_fence() %{
 8507   match(LoadFence);
 8508   ins_cost(VOLATILE_REF_COST);
 8509 
 8510   format %{ "load_fence" %}
 8511 
 8512   ins_encode %{
 8513     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 8514   %}
 8515   ins_pipe(pipe_serial);
 8516 %}
 8517 
 8518 instruct unnecessary_membar_acquire() %{
 8519   predicate(unnecessary_acquire(n));
 8520   match(MemBarAcquire);
 8521   ins_cost(0);
 8522 
 8523   format %{ "membar_acquire (elided)" %}
 8524 
 8525   ins_encode %{
 8526     __ block_comment("membar_acquire (elided)");
 8527   %}
 8528 
 8529   ins_pipe(pipe_class_empty);
 8530 %}
 8531 
 8532 instruct membar_acquire() %{
 8533   match(MemBarAcquire);
 8534   ins_cost(VOLATILE_REF_COST);
 8535 
 8536   format %{ "membar_acquire\n\t"
 8537             "dmb ish" %}
 8538 
 8539   ins_encode %{
 8540     __ block_comment("membar_acquire");
 8541     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 8542   %}
 8543 
 8544   ins_pipe(pipe_serial);
 8545 %}
 8546 
 8547 
 8548 instruct membar_acquire_lock() %{
 8549   match(MemBarAcquireLock);
 8550   ins_cost(VOLATILE_REF_COST);
 8551 
 8552   format %{ "membar_acquire_lock (elided)" %}
 8553 
 8554   ins_encode %{
 8555     __ block_comment("membar_acquire_lock (elided)");
 8556   %}
 8557 
 8558   ins_pipe(pipe_serial);
 8559 %}
 8560 
 8561 instruct store_fence() %{
 8562   match(StoreFence);
 8563   ins_cost(VOLATILE_REF_COST);
 8564 
 8565   format %{ "store_fence" %}
 8566 
 8567   ins_encode %{
 8568     __ membar(Assembler::LoadStore|Assembler::StoreStore);
 8569   %}
 8570   ins_pipe(pipe_serial);
 8571 %}
 8572 
 8573 instruct unnecessary_membar_release() %{
 8574   predicate(unnecessary_release(n));
 8575   match(MemBarRelease);
 8576   ins_cost(0);
 8577 
 8578   format %{ "membar_release (elided)" %}
 8579 
 8580   ins_encode %{
 8581     __ block_comment("membar_release (elided)");
 8582   %}
 8583   ins_pipe(pipe_serial);
 8584 %}
 8585 
 8586 instruct membar_release() %{
 8587   match(MemBarRelease);
 8588   ins_cost(VOLATILE_REF_COST);
 8589 
 8590   format %{ "membar_release\n\t"
 8591             "dmb ish" %}
 8592 
 8593   ins_encode %{
 8594     __ block_comment("membar_release");
 8595     __ membar(Assembler::LoadStore|Assembler::StoreStore);
 8596   %}
 8597   ins_pipe(pipe_serial);
 8598 %}
 8599 
 8600 instruct membar_storestore() %{
 8601   match(MemBarStoreStore);
 8602   ins_cost(VOLATILE_REF_COST);
 8603 
 8604   format %{ "MEMBAR-store-store" %}
 8605 
 8606   ins_encode %{
 8607     __ membar(Assembler::StoreStore);
 8608   %}
 8609   ins_pipe(pipe_serial);
 8610 %}
 8611 
 8612 instruct membar_release_lock() %{
 8613   match(MemBarReleaseLock);
 8614   ins_cost(VOLATILE_REF_COST);
 8615 
 8616   format %{ "membar_release_lock (elided)" %}
 8617 
 8618   ins_encode %{
 8619     __ block_comment("membar_release_lock (elided)");
 8620   %}
 8621 
 8622   ins_pipe(pipe_serial);
 8623 %}
 8624 
 8625 instruct unnecessary_membar_volatile() %{
 8626   predicate(unnecessary_volatile(n));
 8627   match(MemBarVolatile);
 8628   ins_cost(0);
 8629 
 8630   format %{ "membar_volatile (elided)" %}
 8631 
 8632   ins_encode %{
 8633     __ block_comment("membar_volatile (elided)");
 8634   %}
 8635 
 8636   ins_pipe(pipe_serial);
 8637 %}
 8638 
 8639 instruct membar_volatile() %{
 8640   match(MemBarVolatile);
 8641   ins_cost(VOLATILE_REF_COST*100);
 8642 
 8643   format %{ "membar_volatile\n\t"
 8644              "dmb ish"%}
 8645 
 8646   ins_encode %{
 8647     __ block_comment("membar_volatile");
 8648     __ membar(Assembler::StoreLoad);
 8649   %}
 8650 
 8651   ins_pipe(pipe_serial);
 8652 %}
 8653 
 8654 // ============================================================================
 8655 // Cast/Convert Instructions
 8656 
 8657 instruct castX2P(iRegPNoSp dst, iRegL src) %{
 8658   match(Set dst (CastX2P src));
 8659 
 8660   ins_cost(INSN_COST);
 8661   format %{ "mov $dst, $src\t# long -> ptr" %}
 8662 
 8663   ins_encode %{
 8664     if ($dst$$reg != $src$$reg) {
 8665       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8666     }
 8667   %}
 8668 
 8669   ins_pipe(ialu_reg);
 8670 %}
 8671 
 8672 instruct castP2X(iRegLNoSp dst, iRegP src) %{
 8673   match(Set dst (CastP2X src));
 8674 
 8675   ins_cost(INSN_COST);
 8676   format %{ "mov $dst, $src\t# ptr -> long" %}
 8677 
 8678   ins_encode %{
 8679     if ($dst$$reg != $src$$reg) {
 8680       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8681     }
 8682   %}
 8683 
 8684   ins_pipe(ialu_reg);
 8685 %}
 8686 
 8687 // Convert oop into int for vectors alignment masking
 8688 instruct convP2I(iRegINoSp dst, iRegP src) %{
 8689   match(Set dst (ConvL2I (CastP2X src)));
 8690 
 8691   ins_cost(INSN_COST);
 8692   format %{ "movw $dst, $src\t# ptr -> int" %}
 8693   ins_encode %{
 8694     __ movw($dst$$Register, $src$$Register);
 8695   %}
 8696 
 8697   ins_pipe(ialu_reg);
 8698 %}
 8699 
 8700 // Convert compressed oop into int for vectors alignment masking
 8701 // in case of 32bit oops (heap < 4Gb).
 8702 instruct convN2I(iRegINoSp dst, iRegN src)
 8703 %{
 8704   predicate(CompressedOops::shift() == 0);
 8705   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
 8706 
 8707   ins_cost(INSN_COST);
 8708   format %{ "mov dst, $src\t# compressed ptr -> int" %}
 8709   ins_encode %{
 8710     __ movw($dst$$Register, $src$$Register);
 8711   %}
 8712 
 8713   ins_pipe(ialu_reg);
 8714 %}
 8715 
 8716 
 8717 // Convert oop pointer into compressed form
 8718 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8719   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
 8720   match(Set dst (EncodeP src));
 8721   effect(KILL cr);
 8722   ins_cost(INSN_COST * 3);
 8723   format %{ "encode_heap_oop $dst, $src" %}
 8724   ins_encode %{
 8725     Register s = $src$$Register;
 8726     Register d = $dst$$Register;
 8727     __ encode_heap_oop(d, s);
 8728   %}
 8729   ins_pipe(ialu_reg);
 8730 %}
 8731 
 8732 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8733   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
 8734   match(Set dst (EncodeP src));
 8735   ins_cost(INSN_COST * 3);
 8736   format %{ "encode_heap_oop_not_null $dst, $src" %}
 8737   ins_encode %{
 8738     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
 8739   %}
 8740   ins_pipe(ialu_reg);
 8741 %}
 8742 
 8743 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8744   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
 8745             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
 8746   match(Set dst (DecodeN src));
 8747   ins_cost(INSN_COST * 3);
 8748   format %{ "decode_heap_oop $dst, $src" %}
 8749   ins_encode %{
 8750     Register s = $src$$Register;
 8751     Register d = $dst$$Register;
 8752     __ decode_heap_oop(d, s);
 8753   %}
 8754   ins_pipe(ialu_reg);
 8755 %}
 8756 
 8757 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8758   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
 8759             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
 8760   match(Set dst (DecodeN src));
 8761   ins_cost(INSN_COST * 3);
 8762   format %{ "decode_heap_oop_not_null $dst, $src" %}
 8763   ins_encode %{
 8764     Register s = $src$$Register;
 8765     Register d = $dst$$Register;
 8766     __ decode_heap_oop_not_null(d, s);
 8767   %}
 8768   ins_pipe(ialu_reg);
 8769 %}
 8770 
 8771 // n.b. AArch64 implementations of encode_klass_not_null and
 8772 // decode_klass_not_null do not modify the flags register so, unlike
 8773 // Intel, we don't kill CR as a side effect here
 8774 
 8775 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
 8776   match(Set dst (EncodePKlass src));
 8777 
 8778   ins_cost(INSN_COST * 3);
 8779   format %{ "encode_klass_not_null $dst,$src" %}
 8780 
 8781   ins_encode %{
 8782     Register src_reg = as_Register($src$$reg);
 8783     Register dst_reg = as_Register($dst$$reg);
 8784     __ encode_klass_not_null(dst_reg, src_reg);
 8785   %}
 8786 
 8787    ins_pipe(ialu_reg);
 8788 %}
 8789 
 8790 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
 8791   match(Set dst (DecodeNKlass src));
 8792 
 8793   ins_cost(INSN_COST * 3);
 8794   format %{ "decode_klass_not_null $dst,$src" %}
 8795 
 8796   ins_encode %{
 8797     Register src_reg = as_Register($src$$reg);
 8798     Register dst_reg = as_Register($dst$$reg);
 8799     if (dst_reg != src_reg) {
 8800       __ decode_klass_not_null(dst_reg, src_reg);
 8801     } else {
 8802       __ decode_klass_not_null(dst_reg);
 8803     }
 8804   %}
 8805 
 8806    ins_pipe(ialu_reg);
 8807 %}
 8808 
 8809 instruct checkCastPP(iRegPNoSp dst)
 8810 %{
 8811   match(Set dst (CheckCastPP dst));
 8812 
 8813   size(0);
 8814   format %{ "# checkcastPP of $dst" %}
 8815   ins_encode(/* empty encoding */);
 8816   ins_pipe(pipe_class_empty);
 8817 %}
 8818 
 8819 instruct castPP(iRegPNoSp dst)
 8820 %{
 8821   match(Set dst (CastPP dst));
 8822 
 8823   size(0);
 8824   format %{ "# castPP of $dst" %}
 8825   ins_encode(/* empty encoding */);
 8826   ins_pipe(pipe_class_empty);
 8827 %}
 8828 
 8829 instruct castII(iRegI dst)
 8830 %{
 8831   match(Set dst (CastII dst));
 8832 
 8833   size(0);
 8834   format %{ "# castII of $dst" %}
 8835   ins_encode(/* empty encoding */);
 8836   ins_cost(0);
 8837   ins_pipe(pipe_class_empty);
 8838 %}
 8839 
 8840 // ============================================================================
 8841 // Atomic operation instructions
 8842 //
 8843 // Intel and SPARC both implement Ideal Node LoadPLocked and
 8844 // Store{PIL}Conditional instructions using a normal load for the
 8845 // LoadPLocked and a CAS for the Store{PIL}Conditional.
 8846 //
 8847 // The ideal code appears only to use LoadPLocked/StorePLocked as a
 8848 // pair to lock object allocations from Eden space when not using
 8849 // TLABs.
 8850 //
 8851 // There does not appear to be a Load{IL}Locked Ideal Node and the
 8852 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
 8853 // and to use StoreIConditional only for 32-bit and StoreLConditional
 8854 // only for 64-bit.
 8855 //
 8856 // We implement LoadPLocked and StorePLocked instructions using,
 8857 // respectively the AArch64 hw load-exclusive and store-conditional
 8858 // instructions. Whereas we must implement each of
 8859 // Store{IL}Conditional using a CAS which employs a pair of
 8860 // instructions comprising a load-exclusive followed by a
 8861 // store-conditional.
 8862 
 8863 
 8864 // Locked-load (linked load) of the current heap-top
 8865 // used when updating the eden heap top
 8866 // implemented using ldaxr on AArch64
 8867 
 8868 instruct loadPLocked(iRegPNoSp dst, indirect mem)
 8869 %{
 8870   match(Set dst (LoadPLocked mem));
 8871 
 8872   ins_cost(VOLATILE_REF_COST);
 8873 
 8874   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
 8875 
 8876   ins_encode(aarch64_enc_ldaxr(dst, mem));
 8877 
 8878   ins_pipe(pipe_serial);
 8879 %}
 8880 
 8881 // Conditional-store of the updated heap-top.
 8882 // Used during allocation of the shared heap.
 8883 // Sets flag (EQ) on success.
 8884 // implemented using stlxr on AArch64.
 8885 
 8886 instruct storePConditional(memory8 heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
 8887 %{
 8888   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
 8889 
 8890   ins_cost(VOLATILE_REF_COST);
 8891 
 8892  // TODO
 8893  // do we need to do a store-conditional release or can we just use a
 8894  // plain store-conditional?
 8895 
 8896   format %{
 8897     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
 8898     "cmpw rscratch1, zr\t# EQ on successful write"
 8899   %}
 8900 
 8901   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
 8902 
 8903   ins_pipe(pipe_serial);
 8904 %}
 8905 
 8906 
 8907 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
 8908 // when attempting to rebias a lock towards the current thread.  We
 8909 // must use the acquire form of cmpxchg in order to guarantee acquire
 8910 // semantics in this case.
 8911 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
 8912 %{
 8913   match(Set cr (StoreLConditional mem (Binary oldval newval)));
 8914 
 8915   ins_cost(VOLATILE_REF_COST);
 8916 
 8917   format %{
 8918     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
 8919     "cmpw rscratch1, zr\t# EQ on successful write"
 8920   %}
 8921 
 8922   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
 8923 
 8924   ins_pipe(pipe_slow);
 8925 %}
 8926 
 8927 // storeIConditional also has acquire semantics, for no better reason
 8928 // than matching storeLConditional.  At the time of writing this
 8929 // comment storeIConditional was not used anywhere by AArch64.
 8930 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
 8931 %{
 8932   match(Set cr (StoreIConditional mem (Binary oldval newval)));
 8933 
 8934   ins_cost(VOLATILE_REF_COST);
 8935 
 8936   format %{
 8937     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
 8938     "cmpw rscratch1, zr\t# EQ on successful write"
 8939   %}
 8940 
 8941   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
 8942 
 8943   ins_pipe(pipe_slow);
 8944 %}
 8945 
 8946 // standard CompareAndSwapX when we are using barriers
 8947 // these have higher priority than the rules selected by a predicate
 8948 
 8949 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
 8950 // can't match them
 8951 
 8952 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8953 
 8954   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 8955   ins_cost(2 * VOLATILE_REF_COST);
 8956 
 8957   effect(KILL cr);
 8958 
 8959   format %{
 8960     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8961     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8962   %}
 8963 
 8964   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
 8965             aarch64_enc_cset_eq(res));
 8966 
 8967   ins_pipe(pipe_slow);
 8968 %}
 8969 
 8970 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8971 
 8972   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 8973   ins_cost(2 * VOLATILE_REF_COST);
 8974 
 8975   effect(KILL cr);
 8976 
 8977   format %{
 8978     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8979     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8980   %}
 8981 
 8982   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
 8983             aarch64_enc_cset_eq(res));
 8984 
 8985   ins_pipe(pipe_slow);
 8986 %}
 8987 
 8988 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8989 
 8990   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 8991   ins_cost(2 * VOLATILE_REF_COST);
 8992 
 8993   effect(KILL cr);
 8994 
 8995  format %{
 8996     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8997     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8998  %}
 8999 
 9000  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 9001             aarch64_enc_cset_eq(res));
 9002 
 9003   ins_pipe(pipe_slow);
 9004 %}
 9005 
 9006 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 9007 
 9008   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 9009   ins_cost(2 * VOLATILE_REF_COST);
 9010 
 9011   effect(KILL cr);
 9012 
 9013  format %{
 9014     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 9015     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9016  %}
 9017 
 9018  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 9019             aarch64_enc_cset_eq(res));
 9020 
 9021   ins_pipe(pipe_slow);
 9022 %}
 9023 
 9024 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9025 
 9026   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 9027   predicate(n->as_LoadStore()->barrier_data() == 0);
 9028   ins_cost(2 * VOLATILE_REF_COST);
 9029 
 9030   effect(KILL cr);
 9031 
 9032  format %{
 9033     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 9034     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9035  %}
 9036 
 9037  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 9038             aarch64_enc_cset_eq(res));
 9039 
 9040   ins_pipe(pipe_slow);
 9041 %}
 9042 
 9043 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 9044 
 9045   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 9046   ins_cost(2 * VOLATILE_REF_COST);
 9047 
 9048   effect(KILL cr);
 9049 
 9050  format %{
 9051     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 9052     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9053  %}
 9054 
 9055  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 9056             aarch64_enc_cset_eq(res));
 9057 
 9058   ins_pipe(pipe_slow);
 9059 %}
 9060 
 9061 // alternative CompareAndSwapX when we are eliding barriers
 9062 
 9063 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 9064 
 9065   predicate(needs_acquiring_load_exclusive(n));
 9066   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 9067   ins_cost(VOLATILE_REF_COST);
 9068 
 9069   effect(KILL cr);
 9070 
 9071   format %{
 9072     "cmpxchgb_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 9073     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9074   %}
 9075 
 9076   ins_encode(aarch64_enc_cmpxchgb_acq(mem, oldval, newval),
 9077             aarch64_enc_cset_eq(res));
 9078 
 9079   ins_pipe(pipe_slow);
 9080 %}
 9081 
 9082 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 9083 
 9084   predicate(needs_acquiring_load_exclusive(n));
 9085   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 9086   ins_cost(VOLATILE_REF_COST);
 9087 
 9088   effect(KILL cr);
 9089 
 9090   format %{
 9091     "cmpxchgs_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 9092     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9093   %}
 9094 
 9095   ins_encode(aarch64_enc_cmpxchgs_acq(mem, oldval, newval),
 9096             aarch64_enc_cset_eq(res));
 9097 
 9098   ins_pipe(pipe_slow);
 9099 %}
 9100 
 9101 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 9102 
 9103   predicate(needs_acquiring_load_exclusive(n));
 9104   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 9105   ins_cost(VOLATILE_REF_COST);
 9106 
 9107   effect(KILL cr);
 9108 
 9109  format %{
 9110     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 9111     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9112  %}
 9113 
 9114  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 9115             aarch64_enc_cset_eq(res));
 9116 
 9117   ins_pipe(pipe_slow);
 9118 %}
 9119 
 9120 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 9121 
 9122   predicate(needs_acquiring_load_exclusive(n));
 9123   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 9124   ins_cost(VOLATILE_REF_COST);
 9125 
 9126   effect(KILL cr);
 9127 
 9128  format %{
 9129     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 9130     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9131  %}
 9132 
 9133  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 9134             aarch64_enc_cset_eq(res));
 9135 
 9136   ins_pipe(pipe_slow);
 9137 %}
 9138 
 9139 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9140 
 9141   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9142   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 9143   ins_cost(VOLATILE_REF_COST);
 9144 
 9145   effect(KILL cr);
 9146 
 9147  format %{
 9148     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 9149     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9150  %}
 9151 
 9152  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 9153             aarch64_enc_cset_eq(res));
 9154 
 9155   ins_pipe(pipe_slow);
 9156 %}
 9157 
 9158 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 9159 
 9160   predicate(needs_acquiring_load_exclusive(n));
 9161   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 9162   ins_cost(VOLATILE_REF_COST);
 9163 
 9164   effect(KILL cr);
 9165 
 9166  format %{
 9167     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 9168     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9169  %}
 9170 
 9171  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 9172             aarch64_enc_cset_eq(res));
 9173 
 9174   ins_pipe(pipe_slow);
 9175 %}
 9176 
 9177 
 9178 // ---------------------------------------------------------------------
 9179 
 9180 
 9181 // BEGIN This section of the file is automatically generated. Do not edit --------------
 9182 
 9183 // Sundry CAS operations.  Note that release is always true,
 9184 // regardless of the memory ordering of the CAS.  This is because we
 9185 // need the volatile case to be sequentially consistent but there is
 9186 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
 9187 // can't check the type of memory ordering here, so we always emit a
 9188 // STLXR.
 9189 
 9190 // This section is generated from aarch64_ad_cas.m4
 9191 
 9192 
 9193 
 9194 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9195   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 9196   ins_cost(2 * VOLATILE_REF_COST);
 9197   effect(TEMP_DEF res, KILL cr);
 9198   format %{
 9199     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9200   %}
 9201   ins_encode %{
 9202     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9203                Assembler::byte, /*acquire*/ false, /*release*/ true,
 9204                /*weak*/ false, $res$$Register);
 9205     __ sxtbw($res$$Register, $res$$Register);
 9206   %}
 9207   ins_pipe(pipe_slow);
 9208 %}
 9209 
 9210 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9211   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 9212   ins_cost(2 * VOLATILE_REF_COST);
 9213   effect(TEMP_DEF res, KILL cr);
 9214   format %{
 9215     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9216   %}
 9217   ins_encode %{
 9218     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9219                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 9220                /*weak*/ false, $res$$Register);
 9221     __ sxthw($res$$Register, $res$$Register);
 9222   %}
 9223   ins_pipe(pipe_slow);
 9224 %}
 9225 
 9226 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9227   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 9228   ins_cost(2 * VOLATILE_REF_COST);
 9229   effect(TEMP_DEF res, KILL cr);
 9230   format %{
 9231     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9232   %}
 9233   ins_encode %{
 9234     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9235                Assembler::word, /*acquire*/ false, /*release*/ true,
 9236                /*weak*/ false, $res$$Register);
 9237   %}
 9238   ins_pipe(pipe_slow);
 9239 %}
 9240 
 9241 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9242   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 9243   ins_cost(2 * VOLATILE_REF_COST);
 9244   effect(TEMP_DEF res, KILL cr);
 9245   format %{
 9246     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9247   %}
 9248   ins_encode %{
 9249     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9250                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9251                /*weak*/ false, $res$$Register);
 9252   %}
 9253   ins_pipe(pipe_slow);
 9254 %}
 9255 
 9256 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9257   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 9258   ins_cost(2 * VOLATILE_REF_COST);
 9259   effect(TEMP_DEF res, KILL cr);
 9260   format %{
 9261     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9262   %}
 9263   ins_encode %{
 9264     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9265                Assembler::word, /*acquire*/ false, /*release*/ true,
 9266                /*weak*/ false, $res$$Register);
 9267   %}
 9268   ins_pipe(pipe_slow);
 9269 %}
 9270 
 9271 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9272   predicate(n->as_LoadStore()->barrier_data() == 0);
 9273   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 9274   ins_cost(2 * VOLATILE_REF_COST);
 9275   effect(TEMP_DEF res, KILL cr);
 9276   format %{
 9277     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9278   %}
 9279   ins_encode %{
 9280     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9281                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9282                /*weak*/ false, $res$$Register);
 9283   %}
 9284   ins_pipe(pipe_slow);
 9285 %}
 9286 
 9287 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9288   predicate(needs_acquiring_load_exclusive(n));
 9289   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 9290   ins_cost(VOLATILE_REF_COST);
 9291   effect(TEMP_DEF res, KILL cr);
 9292   format %{
 9293     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9294   %}
 9295   ins_encode %{
 9296     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9297                Assembler::byte, /*acquire*/ true, /*release*/ true,
 9298                /*weak*/ false, $res$$Register);
 9299     __ sxtbw($res$$Register, $res$$Register);
 9300   %}
 9301   ins_pipe(pipe_slow);
 9302 %}
 9303 
 9304 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9305   predicate(needs_acquiring_load_exclusive(n));
 9306   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 9307   ins_cost(VOLATILE_REF_COST);
 9308   effect(TEMP_DEF res, KILL cr);
 9309   format %{
 9310     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9311   %}
 9312   ins_encode %{
 9313     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9314                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 9315                /*weak*/ false, $res$$Register);
 9316     __ sxthw($res$$Register, $res$$Register);
 9317   %}
 9318   ins_pipe(pipe_slow);
 9319 %}
 9320 
 9321 
 9322 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9323   predicate(needs_acquiring_load_exclusive(n));
 9324   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 9325   ins_cost(VOLATILE_REF_COST);
 9326   effect(TEMP_DEF res, KILL cr);
 9327   format %{
 9328     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9329   %}
 9330   ins_encode %{
 9331     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9332                Assembler::word, /*acquire*/ true, /*release*/ true,
 9333                /*weak*/ false, $res$$Register);
 9334   %}
 9335   ins_pipe(pipe_slow);
 9336 %}
 9337 
 9338 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9339   predicate(needs_acquiring_load_exclusive(n));
 9340   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 9341   ins_cost(VOLATILE_REF_COST);
 9342   effect(TEMP_DEF res, KILL cr);
 9343   format %{
 9344     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9345   %}
 9346   ins_encode %{
 9347     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9348                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9349                /*weak*/ false, $res$$Register);
 9350   %}
 9351   ins_pipe(pipe_slow);
 9352 %}
 9353 
 9354 
 9355 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9356   predicate(needs_acquiring_load_exclusive(n));
 9357   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 9358   ins_cost(VOLATILE_REF_COST);
 9359   effect(TEMP_DEF res, KILL cr);
 9360   format %{
 9361     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9362   %}
 9363   ins_encode %{
 9364     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9365                Assembler::word, /*acquire*/ true, /*release*/ true,
 9366                /*weak*/ false, $res$$Register);
 9367   %}
 9368   ins_pipe(pipe_slow);
 9369 %}
 9370 
 9371 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9372   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9373   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 9374   ins_cost(VOLATILE_REF_COST);
 9375   effect(TEMP_DEF res, KILL cr);
 9376   format %{
 9377     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9378   %}
 9379   ins_encode %{
 9380     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9381                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9382                /*weak*/ false, $res$$Register);
 9383   %}
 9384   ins_pipe(pipe_slow);
 9385 %}
 9386 
 9387 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9388   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 9389   ins_cost(2 * VOLATILE_REF_COST);
 9390   effect(KILL cr);
 9391   format %{
 9392     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9393     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9394   %}
 9395   ins_encode %{
 9396     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9397                Assembler::byte, /*acquire*/ false, /*release*/ true,
 9398                /*weak*/ true, noreg);
 9399     __ csetw($res$$Register, Assembler::EQ);
 9400   %}
 9401   ins_pipe(pipe_slow);
 9402 %}
 9403 
 9404 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9405   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 9406   ins_cost(2 * VOLATILE_REF_COST);
 9407   effect(KILL cr);
 9408   format %{
 9409     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9410     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9411   %}
 9412   ins_encode %{
 9413     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9414                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 9415                /*weak*/ true, noreg);
 9416     __ csetw($res$$Register, Assembler::EQ);
 9417   %}
 9418   ins_pipe(pipe_slow);
 9419 %}
 9420 
 9421 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9422   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 9423   ins_cost(2 * VOLATILE_REF_COST);
 9424   effect(KILL cr);
 9425   format %{
 9426     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9427     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9428   %}
 9429   ins_encode %{
 9430     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9431                Assembler::word, /*acquire*/ false, /*release*/ true,
 9432                /*weak*/ true, noreg);
 9433     __ csetw($res$$Register, Assembler::EQ);
 9434   %}
 9435   ins_pipe(pipe_slow);
 9436 %}
 9437 
 9438 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9439   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 9440   ins_cost(2 * VOLATILE_REF_COST);
 9441   effect(KILL cr);
 9442   format %{
 9443     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9444     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9445   %}
 9446   ins_encode %{
 9447     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9448                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9449                /*weak*/ true, noreg);
 9450     __ csetw($res$$Register, Assembler::EQ);
 9451   %}
 9452   ins_pipe(pipe_slow);
 9453 %}
 9454 
 9455 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9456   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 9457   ins_cost(2 * VOLATILE_REF_COST);
 9458   effect(KILL cr);
 9459   format %{
 9460     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9461     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9462   %}
 9463   ins_encode %{
 9464     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9465                Assembler::word, /*acquire*/ false, /*release*/ true,
 9466                /*weak*/ true, noreg);
 9467     __ csetw($res$$Register, Assembler::EQ);
 9468   %}
 9469   ins_pipe(pipe_slow);
 9470 %}
 9471 
 9472 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9473   predicate(n->as_LoadStore()->barrier_data() == 0);
 9474   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 9475   ins_cost(2 * VOLATILE_REF_COST);
 9476   effect(KILL cr);
 9477   format %{
 9478     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9479     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9480   %}
 9481   ins_encode %{
 9482     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9483                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9484                /*weak*/ true, noreg);
 9485     __ csetw($res$$Register, Assembler::EQ);
 9486   %}
 9487   ins_pipe(pipe_slow);
 9488 %}
 9489 
 9490 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9491   predicate(needs_acquiring_load_exclusive(n));
 9492   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 9493   ins_cost(VOLATILE_REF_COST);
 9494   effect(KILL cr);
 9495   format %{
 9496     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9497     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9498   %}
 9499   ins_encode %{
 9500     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9501                Assembler::byte, /*acquire*/ true, /*release*/ true,
 9502                /*weak*/ true, noreg);
 9503     __ csetw($res$$Register, Assembler::EQ);
 9504   %}
 9505   ins_pipe(pipe_slow);
 9506 %}
 9507 
 9508 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9509   predicate(needs_acquiring_load_exclusive(n));
 9510   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 9511   ins_cost(VOLATILE_REF_COST);
 9512   effect(KILL cr);
 9513   format %{
 9514     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9515     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9516   %}
 9517   ins_encode %{
 9518     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9519                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 9520                /*weak*/ true, noreg);
 9521     __ csetw($res$$Register, Assembler::EQ);
 9522   %}
 9523   ins_pipe(pipe_slow);
 9524 %}
 9525 
 9526 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9527   predicate(needs_acquiring_load_exclusive(n));
 9528   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 9529   ins_cost(VOLATILE_REF_COST);
 9530   effect(KILL cr);
 9531   format %{
 9532     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9533     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9534   %}
 9535   ins_encode %{
 9536     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9537                Assembler::word, /*acquire*/ true, /*release*/ true,
 9538                /*weak*/ true, noreg);
 9539     __ csetw($res$$Register, Assembler::EQ);
 9540   %}
 9541   ins_pipe(pipe_slow);
 9542 %}
 9543 
 9544 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9545   predicate(needs_acquiring_load_exclusive(n));
 9546   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 9547   ins_cost(VOLATILE_REF_COST);
 9548   effect(KILL cr);
 9549   format %{
 9550     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9551     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9552   %}
 9553   ins_encode %{
 9554     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9555                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9556                /*weak*/ true, noreg);
 9557     __ csetw($res$$Register, Assembler::EQ);
 9558   %}
 9559   ins_pipe(pipe_slow);
 9560 %}
 9561 
 9562 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9563   predicate(needs_acquiring_load_exclusive(n));
 9564   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 9565   ins_cost(VOLATILE_REF_COST);
 9566   effect(KILL cr);
 9567   format %{
 9568     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9569     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9570   %}
 9571   ins_encode %{
 9572     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9573                Assembler::word, /*acquire*/ true, /*release*/ true,
 9574                /*weak*/ true, noreg);
 9575     __ csetw($res$$Register, Assembler::EQ);
 9576   %}
 9577   ins_pipe(pipe_slow);
 9578 %}
 9579 
 9580 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9581   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 9582   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9583   ins_cost(VOLATILE_REF_COST);
 9584   effect(KILL cr);
 9585   format %{
 9586     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9587     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9588   %}
 9589   ins_encode %{
 9590     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9591                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9592                /*weak*/ true, noreg);
 9593     __ csetw($res$$Register, Assembler::EQ);
 9594   %}
 9595   ins_pipe(pipe_slow);
 9596 %}
 9597 
 9598 // END This section of the file is automatically generated. Do not edit --------------
 9599 // ---------------------------------------------------------------------
 9600 
 9601 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
 9602   match(Set prev (GetAndSetI mem newv));
 9603   ins_cost(2 * VOLATILE_REF_COST);
 9604   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
 9605   ins_encode %{
 9606     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9607   %}
 9608   ins_pipe(pipe_serial);
 9609 %}
 9610 
 9611 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
 9612   match(Set prev (GetAndSetL mem newv));
 9613   ins_cost(2 * VOLATILE_REF_COST);
 9614   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
 9615   ins_encode %{
 9616     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9617   %}
 9618   ins_pipe(pipe_serial);
 9619 %}
 9620 
 9621 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
 9622   match(Set prev (GetAndSetN mem newv));
 9623   ins_cost(2 * VOLATILE_REF_COST);
 9624   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
 9625   ins_encode %{
 9626     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9627   %}
 9628   ins_pipe(pipe_serial);
 9629 %}
 9630 
 9631 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
 9632   predicate(n->as_LoadStore()->barrier_data() == 0);
 9633   match(Set prev (GetAndSetP mem newv));
 9634   ins_cost(2 * VOLATILE_REF_COST);
 9635   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
 9636   ins_encode %{
 9637     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9638   %}
 9639   ins_pipe(pipe_serial);
 9640 %}
 9641 
 9642 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
 9643   predicate(needs_acquiring_load_exclusive(n));
 9644   match(Set prev (GetAndSetI mem newv));
 9645   ins_cost(VOLATILE_REF_COST);
 9646   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]" %}
 9647   ins_encode %{
 9648     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9649   %}
 9650   ins_pipe(pipe_serial);
 9651 %}
 9652 
 9653 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
 9654   predicate(needs_acquiring_load_exclusive(n));
 9655   match(Set prev (GetAndSetL mem newv));
 9656   ins_cost(VOLATILE_REF_COST);
 9657   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
 9658   ins_encode %{
 9659     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9660   %}
 9661   ins_pipe(pipe_serial);
 9662 %}
 9663 
 9664 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
 9665   predicate(needs_acquiring_load_exclusive(n));
 9666   match(Set prev (GetAndSetN mem newv));
 9667   ins_cost(VOLATILE_REF_COST);
 9668   format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
 9669   ins_encode %{
 9670     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9671   %}
 9672   ins_pipe(pipe_serial);
 9673 %}
 9674 
 9675 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
 9676   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9677   match(Set prev (GetAndSetP mem newv));
 9678   ins_cost(VOLATILE_REF_COST);
 9679   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
 9680   ins_encode %{
 9681     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9682   %}
 9683   ins_pipe(pipe_serial);
 9684 %}
 9685 
 9686 
 9687 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
 9688   match(Set newval (GetAndAddL mem incr));
 9689   ins_cost(2 * VOLATILE_REF_COST + 1);
 9690   format %{ "get_and_addL $newval, [$mem], $incr" %}
 9691   ins_encode %{
 9692     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9693   %}
 9694   ins_pipe(pipe_serial);
 9695 %}
 9696 
 9697 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
 9698   predicate(n->as_LoadStore()->result_not_used());
 9699   match(Set dummy (GetAndAddL mem incr));
 9700   ins_cost(2 * VOLATILE_REF_COST);
 9701   format %{ "get_and_addL [$mem], $incr" %}
 9702   ins_encode %{
 9703     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
 9704   %}
 9705   ins_pipe(pipe_serial);
 9706 %}
 9707 
 9708 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
 9709   match(Set newval (GetAndAddL mem incr));
 9710   ins_cost(2 * VOLATILE_REF_COST + 1);
 9711   format %{ "get_and_addL $newval, [$mem], $incr" %}
 9712   ins_encode %{
 9713     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9714   %}
 9715   ins_pipe(pipe_serial);
 9716 %}
 9717 
 9718 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
 9719   predicate(n->as_LoadStore()->result_not_used());
 9720   match(Set dummy (GetAndAddL mem incr));
 9721   ins_cost(2 * VOLATILE_REF_COST);
 9722   format %{ "get_and_addL [$mem], $incr" %}
 9723   ins_encode %{
 9724     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
 9725   %}
 9726   ins_pipe(pipe_serial);
 9727 %}
 9728 
 9729 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
 9730   match(Set newval (GetAndAddI mem incr));
 9731   ins_cost(2 * VOLATILE_REF_COST + 1);
 9732   format %{ "get_and_addI $newval, [$mem], $incr" %}
 9733   ins_encode %{
 9734     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9735   %}
 9736   ins_pipe(pipe_serial);
 9737 %}
 9738 
 9739 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
 9740   predicate(n->as_LoadStore()->result_not_used());
 9741   match(Set dummy (GetAndAddI mem incr));
 9742   ins_cost(2 * VOLATILE_REF_COST);
 9743   format %{ "get_and_addI [$mem], $incr" %}
 9744   ins_encode %{
 9745     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
 9746   %}
 9747   ins_pipe(pipe_serial);
 9748 %}
 9749 
 9750 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
 9751   match(Set newval (GetAndAddI mem incr));
 9752   ins_cost(2 * VOLATILE_REF_COST + 1);
 9753   format %{ "get_and_addI $newval, [$mem], $incr" %}
 9754   ins_encode %{
 9755     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9756   %}
 9757   ins_pipe(pipe_serial);
 9758 %}
 9759 
 9760 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
 9761   predicate(n->as_LoadStore()->result_not_used());
 9762   match(Set dummy (GetAndAddI mem incr));
 9763   ins_cost(2 * VOLATILE_REF_COST);
 9764   format %{ "get_and_addI [$mem], $incr" %}
 9765   ins_encode %{
 9766     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
 9767   %}
 9768   ins_pipe(pipe_serial);
 9769 %}
 9770 
 9771 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
 9772   predicate(needs_acquiring_load_exclusive(n));
 9773   match(Set newval (GetAndAddL mem incr));
 9774   ins_cost(VOLATILE_REF_COST + 1);
 9775   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
 9776   ins_encode %{
 9777     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9778   %}
 9779   ins_pipe(pipe_serial);
 9780 %}
 9781 
 9782 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
 9783   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9784   match(Set dummy (GetAndAddL mem incr));
 9785   ins_cost(VOLATILE_REF_COST);
 9786   format %{ "get_and_addL_acq [$mem], $incr" %}
 9787   ins_encode %{
 9788     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
 9789   %}
 9790   ins_pipe(pipe_serial);
 9791 %}
 9792 
 9793 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
 9794   predicate(needs_acquiring_load_exclusive(n));
 9795   match(Set newval (GetAndAddL mem incr));
 9796   ins_cost(VOLATILE_REF_COST + 1);
 9797   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
 9798   ins_encode %{
 9799     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9800   %}
 9801   ins_pipe(pipe_serial);
 9802 %}
 9803 
 9804 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
 9805   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9806   match(Set dummy (GetAndAddL mem incr));
 9807   ins_cost(VOLATILE_REF_COST);
 9808   format %{ "get_and_addL_acq [$mem], $incr" %}
 9809   ins_encode %{
 9810     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
 9811   %}
 9812   ins_pipe(pipe_serial);
 9813 %}
 9814 
 9815 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
 9816   predicate(needs_acquiring_load_exclusive(n));
 9817   match(Set newval (GetAndAddI mem incr));
 9818   ins_cost(VOLATILE_REF_COST + 1);
 9819   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
 9820   ins_encode %{
 9821     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9822   %}
 9823   ins_pipe(pipe_serial);
 9824 %}
 9825 
 9826 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
 9827   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9828   match(Set dummy (GetAndAddI mem incr));
 9829   ins_cost(VOLATILE_REF_COST);
 9830   format %{ "get_and_addI_acq [$mem], $incr" %}
 9831   ins_encode %{
 9832     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
 9833   %}
 9834   ins_pipe(pipe_serial);
 9835 %}
 9836 
 9837 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
 9838   predicate(needs_acquiring_load_exclusive(n));
 9839   match(Set newval (GetAndAddI mem incr));
 9840   ins_cost(VOLATILE_REF_COST + 1);
 9841   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
 9842   ins_encode %{
 9843     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9844   %}
 9845   ins_pipe(pipe_serial);
 9846 %}
 9847 
 9848 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
 9849   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9850   match(Set dummy (GetAndAddI mem incr));
 9851   ins_cost(VOLATILE_REF_COST);
 9852   format %{ "get_and_addI_acq [$mem], $incr" %}
 9853   ins_encode %{
 9854     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
 9855   %}
 9856   ins_pipe(pipe_serial);
 9857 %}
 9858 
 9859 // Manifest a CmpL result in an integer register.
 9860 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9861 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
 9862 %{
 9863   match(Set dst (CmpL3 src1 src2));
 9864   effect(KILL flags);
 9865 
 9866   ins_cost(INSN_COST * 6);
 9867   format %{
 9868       "cmp $src1, $src2"
 9869       "csetw $dst, ne"
 9870       "cnegw $dst, lt"
 9871   %}
 9872   // format %{ "CmpL3 $dst, $src1, $src2" %}
 9873   ins_encode %{
 9874     __ cmp($src1$$Register, $src2$$Register);
 9875     __ csetw($dst$$Register, Assembler::NE);
 9876     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 9877   %}
 9878 
 9879   ins_pipe(pipe_class_default);
 9880 %}
 9881 
 9882 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
 9883 %{
 9884   match(Set dst (CmpL3 src1 src2));
 9885   effect(KILL flags);
 9886 
 9887   ins_cost(INSN_COST * 6);
 9888   format %{
 9889       "cmp $src1, $src2"
 9890       "csetw $dst, ne"
 9891       "cnegw $dst, lt"
 9892   %}
 9893   ins_encode %{
 9894     int32_t con = (int32_t)$src2$$constant;
 9895      if (con < 0) {
 9896       __ adds(zr, $src1$$Register, -con);
 9897     } else {
 9898       __ subs(zr, $src1$$Register, con);
 9899     }
 9900     __ csetw($dst$$Register, Assembler::NE);
 9901     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 9902   %}
 9903 
 9904   ins_pipe(pipe_class_default);
 9905 %}
 9906 
 9907 // ============================================================================
 9908 // Conditional Move Instructions
 9909 
 9910 // n.b. we have identical rules for both a signed compare op (cmpOp)
 9911 // and an unsigned compare op (cmpOpU). it would be nice if we could
 9912 // define an op class which merged both inputs and use it to type the
 9913 // argument to a single rule. unfortunatelyt his fails because the
 9914 // opclass does not live up to the COND_INTER interface of its
 9915 // component operands. When the generic code tries to negate the
 9916 // operand it ends up running the generci Machoper::negate method
 9917 // which throws a ShouldNotHappen. So, we have to provide two flavours
 9918 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
 9919 
 9920 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9921   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 9922 
 9923   ins_cost(INSN_COST * 2);
 9924   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
 9925 
 9926   ins_encode %{
 9927     __ cselw(as_Register($dst$$reg),
 9928              as_Register($src2$$reg),
 9929              as_Register($src1$$reg),
 9930              (Assembler::Condition)$cmp$$cmpcode);
 9931   %}
 9932 
 9933   ins_pipe(icond_reg_reg);
 9934 %}
 9935 
 9936 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9937   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 9938 
 9939   ins_cost(INSN_COST * 2);
 9940   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
 9941 
 9942   ins_encode %{
 9943     __ cselw(as_Register($dst$$reg),
 9944              as_Register($src2$$reg),
 9945              as_Register($src1$$reg),
 9946              (Assembler::Condition)$cmp$$cmpcode);
 9947   %}
 9948 
 9949   ins_pipe(icond_reg_reg);
 9950 %}
 9951 
 9952 // special cases where one arg is zero
 9953 
 9954 // n.b. this is selected in preference to the rule above because it
 9955 // avoids loading constant 0 into a source register
 9956 
 9957 // TODO
 9958 // we ought only to be able to cull one of these variants as the ideal
 9959 // transforms ought always to order the zero consistently (to left/right?)
 9960 
 9961 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
 9962   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
 9963 
 9964   ins_cost(INSN_COST * 2);
 9965   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
 9966 
 9967   ins_encode %{
 9968     __ cselw(as_Register($dst$$reg),
 9969              as_Register($src$$reg),
 9970              zr,
 9971              (Assembler::Condition)$cmp$$cmpcode);
 9972   %}
 9973 
 9974   ins_pipe(icond_reg);
 9975 %}
 9976 
 9977 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
 9978   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
 9979 
 9980   ins_cost(INSN_COST * 2);
 9981   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
 9982 
 9983   ins_encode %{
 9984     __ cselw(as_Register($dst$$reg),
 9985              as_Register($src$$reg),
 9986              zr,
 9987              (Assembler::Condition)$cmp$$cmpcode);
 9988   %}
 9989 
 9990   ins_pipe(icond_reg);
 9991 %}
 9992 
 9993 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 9994   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
 9995 
 9996   ins_cost(INSN_COST * 2);
 9997   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
 9998 
 9999   ins_encode %{
10000     __ cselw(as_Register($dst$$reg),
10001              zr,
10002              as_Register($src$$reg),
10003              (Assembler::Condition)$cmp$$cmpcode);
10004   %}
10005 
10006   ins_pipe(icond_reg);
10007 %}
10008 
10009 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
10010   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
10011 
10012   ins_cost(INSN_COST * 2);
10013   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
10014 
10015   ins_encode %{
10016     __ cselw(as_Register($dst$$reg),
10017              zr,
10018              as_Register($src$$reg),
10019              (Assembler::Condition)$cmp$$cmpcode);
10020   %}
10021 
10022   ins_pipe(icond_reg);
10023 %}
10024 
10025 // special case for creating a boolean 0 or 1
10026 
10027 // n.b. this is selected in preference to the rule above because it
10028 // avoids loading constants 0 and 1 into a source register
10029 
10030 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
10031   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
10032 
10033   ins_cost(INSN_COST * 2);
10034   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
10035 
10036   ins_encode %{
10037     // equivalently
10038     // cset(as_Register($dst$$reg),
10039     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
10040     __ csincw(as_Register($dst$$reg),
10041              zr,
10042              zr,
10043              (Assembler::Condition)$cmp$$cmpcode);
10044   %}
10045 
10046   ins_pipe(icond_none);
10047 %}
10048 
10049 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
10050   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
10051 
10052   ins_cost(INSN_COST * 2);
10053   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
10054 
10055   ins_encode %{
10056     // equivalently
10057     // cset(as_Register($dst$$reg),
10058     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
10059     __ csincw(as_Register($dst$$reg),
10060              zr,
10061              zr,
10062              (Assembler::Condition)$cmp$$cmpcode);
10063   %}
10064 
10065   ins_pipe(icond_none);
10066 %}
10067 
10068 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
10069   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
10070 
10071   ins_cost(INSN_COST * 2);
10072   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
10073 
10074   ins_encode %{
10075     __ csel(as_Register($dst$$reg),
10076             as_Register($src2$$reg),
10077             as_Register($src1$$reg),
10078             (Assembler::Condition)$cmp$$cmpcode);
10079   %}
10080 
10081   ins_pipe(icond_reg_reg);
10082 %}
10083 
10084 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
10085   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
10086 
10087   ins_cost(INSN_COST * 2);
10088   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
10089 
10090   ins_encode %{
10091     __ csel(as_Register($dst$$reg),
10092             as_Register($src2$$reg),
10093             as_Register($src1$$reg),
10094             (Assembler::Condition)$cmp$$cmpcode);
10095   %}
10096 
10097   ins_pipe(icond_reg_reg);
10098 %}
10099 
10100 // special cases where one arg is zero
10101 
10102 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
10103   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
10104 
10105   ins_cost(INSN_COST * 2);
10106   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
10107 
10108   ins_encode %{
10109     __ csel(as_Register($dst$$reg),
10110             zr,
10111             as_Register($src$$reg),
10112             (Assembler::Condition)$cmp$$cmpcode);
10113   %}
10114 
10115   ins_pipe(icond_reg);
10116 %}
10117 
10118 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
10119   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
10120 
10121   ins_cost(INSN_COST * 2);
10122   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
10123 
10124   ins_encode %{
10125     __ csel(as_Register($dst$$reg),
10126             zr,
10127             as_Register($src$$reg),
10128             (Assembler::Condition)$cmp$$cmpcode);
10129   %}
10130 
10131   ins_pipe(icond_reg);
10132 %}
10133 
10134 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
10135   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
10136 
10137   ins_cost(INSN_COST * 2);
10138   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
10139 
10140   ins_encode %{
10141     __ csel(as_Register($dst$$reg),
10142             as_Register($src$$reg),
10143             zr,
10144             (Assembler::Condition)$cmp$$cmpcode);
10145   %}
10146 
10147   ins_pipe(icond_reg);
10148 %}
10149 
10150 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
10151   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
10152 
10153   ins_cost(INSN_COST * 2);
10154   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
10155 
10156   ins_encode %{
10157     __ csel(as_Register($dst$$reg),
10158             as_Register($src$$reg),
10159             zr,
10160             (Assembler::Condition)$cmp$$cmpcode);
10161   %}
10162 
10163   ins_pipe(icond_reg);
10164 %}
10165 
10166 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10167   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10168 
10169   ins_cost(INSN_COST * 2);
10170   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
10171 
10172   ins_encode %{
10173     __ csel(as_Register($dst$$reg),
10174             as_Register($src2$$reg),
10175             as_Register($src1$$reg),
10176             (Assembler::Condition)$cmp$$cmpcode);
10177   %}
10178 
10179   ins_pipe(icond_reg_reg);
10180 %}
10181 
10182 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10183   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10184 
10185   ins_cost(INSN_COST * 2);
10186   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
10187 
10188   ins_encode %{
10189     __ csel(as_Register($dst$$reg),
10190             as_Register($src2$$reg),
10191             as_Register($src1$$reg),
10192             (Assembler::Condition)$cmp$$cmpcode);
10193   %}
10194 
10195   ins_pipe(icond_reg_reg);
10196 %}
10197 
10198 // special cases where one arg is zero
10199 
10200 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10201   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10202 
10203   ins_cost(INSN_COST * 2);
10204   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
10205 
10206   ins_encode %{
10207     __ csel(as_Register($dst$$reg),
10208             zr,
10209             as_Register($src$$reg),
10210             (Assembler::Condition)$cmp$$cmpcode);
10211   %}
10212 
10213   ins_pipe(icond_reg);
10214 %}
10215 
10216 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10217   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10218 
10219   ins_cost(INSN_COST * 2);
10220   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
10221 
10222   ins_encode %{
10223     __ csel(as_Register($dst$$reg),
10224             zr,
10225             as_Register($src$$reg),
10226             (Assembler::Condition)$cmp$$cmpcode);
10227   %}
10228 
10229   ins_pipe(icond_reg);
10230 %}
10231 
10232 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10233   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10234 
10235   ins_cost(INSN_COST * 2);
10236   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
10237 
10238   ins_encode %{
10239     __ csel(as_Register($dst$$reg),
10240             as_Register($src$$reg),
10241             zr,
10242             (Assembler::Condition)$cmp$$cmpcode);
10243   %}
10244 
10245   ins_pipe(icond_reg);
10246 %}
10247 
10248 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10249   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10250 
10251   ins_cost(INSN_COST * 2);
10252   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
10253 
10254   ins_encode %{
10255     __ csel(as_Register($dst$$reg),
10256             as_Register($src$$reg),
10257             zr,
10258             (Assembler::Condition)$cmp$$cmpcode);
10259   %}
10260 
10261   ins_pipe(icond_reg);
10262 %}
10263 
10264 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10265   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10266 
10267   ins_cost(INSN_COST * 2);
10268   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10269 
10270   ins_encode %{
10271     __ cselw(as_Register($dst$$reg),
10272              as_Register($src2$$reg),
10273              as_Register($src1$$reg),
10274              (Assembler::Condition)$cmp$$cmpcode);
10275   %}
10276 
10277   ins_pipe(icond_reg_reg);
10278 %}
10279 
10280 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10281   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10282 
10283   ins_cost(INSN_COST * 2);
10284   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10285 
10286   ins_encode %{
10287     __ cselw(as_Register($dst$$reg),
10288              as_Register($src2$$reg),
10289              as_Register($src1$$reg),
10290              (Assembler::Condition)$cmp$$cmpcode);
10291   %}
10292 
10293   ins_pipe(icond_reg_reg);
10294 %}
10295 
10296 // special cases where one arg is zero
10297 
10298 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10299   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10300 
10301   ins_cost(INSN_COST * 2);
10302   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
10303 
10304   ins_encode %{
10305     __ cselw(as_Register($dst$$reg),
10306              zr,
10307              as_Register($src$$reg),
10308              (Assembler::Condition)$cmp$$cmpcode);
10309   %}
10310 
10311   ins_pipe(icond_reg);
10312 %}
10313 
10314 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10315   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10316 
10317   ins_cost(INSN_COST * 2);
10318   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
10319 
10320   ins_encode %{
10321     __ cselw(as_Register($dst$$reg),
10322              zr,
10323              as_Register($src$$reg),
10324              (Assembler::Condition)$cmp$$cmpcode);
10325   %}
10326 
10327   ins_pipe(icond_reg);
10328 %}
10329 
10330 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10331   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10332 
10333   ins_cost(INSN_COST * 2);
10334   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
10335 
10336   ins_encode %{
10337     __ cselw(as_Register($dst$$reg),
10338              as_Register($src$$reg),
10339              zr,
10340              (Assembler::Condition)$cmp$$cmpcode);
10341   %}
10342 
10343   ins_pipe(icond_reg);
10344 %}
10345 
10346 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10347   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10348 
10349   ins_cost(INSN_COST * 2);
10350   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
10351 
10352   ins_encode %{
10353     __ cselw(as_Register($dst$$reg),
10354              as_Register($src$$reg),
10355              zr,
10356              (Assembler::Condition)$cmp$$cmpcode);
10357   %}
10358 
10359   ins_pipe(icond_reg);
10360 %}
10361 
10362 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
10363 %{
10364   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10365 
10366   ins_cost(INSN_COST * 3);
10367 
10368   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10369   ins_encode %{
10370     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10371     __ fcsels(as_FloatRegister($dst$$reg),
10372               as_FloatRegister($src2$$reg),
10373               as_FloatRegister($src1$$reg),
10374               cond);
10375   %}
10376 
10377   ins_pipe(fp_cond_reg_reg_s);
10378 %}
10379 
10380 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
10381 %{
10382   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10383 
10384   ins_cost(INSN_COST * 3);
10385 
10386   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10387   ins_encode %{
10388     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10389     __ fcsels(as_FloatRegister($dst$$reg),
10390               as_FloatRegister($src2$$reg),
10391               as_FloatRegister($src1$$reg),
10392               cond);
10393   %}
10394 
10395   ins_pipe(fp_cond_reg_reg_s);
10396 %}
10397 
10398 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
10399 %{
10400   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10401 
10402   ins_cost(INSN_COST * 3);
10403 
10404   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10405   ins_encode %{
10406     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10407     __ fcseld(as_FloatRegister($dst$$reg),
10408               as_FloatRegister($src2$$reg),
10409               as_FloatRegister($src1$$reg),
10410               cond);
10411   %}
10412 
10413   ins_pipe(fp_cond_reg_reg_d);
10414 %}
10415 
10416 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
10417 %{
10418   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10419 
10420   ins_cost(INSN_COST * 3);
10421 
10422   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10423   ins_encode %{
10424     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10425     __ fcseld(as_FloatRegister($dst$$reg),
10426               as_FloatRegister($src2$$reg),
10427               as_FloatRegister($src1$$reg),
10428               cond);
10429   %}
10430 
10431   ins_pipe(fp_cond_reg_reg_d);
10432 %}
10433 
10434 // ============================================================================
10435 // Arithmetic Instructions
10436 //
10437 
10438 // Integer Addition
10439 
10440 // TODO
10441 // these currently employ operations which do not set CR and hence are
10442 // not flagged as killing CR but we would like to isolate the cases
10443 // where we want to set flags from those where we don't. need to work
10444 // out how to do that.
10445 
10446 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10447   match(Set dst (AddI src1 src2));
10448 
10449   ins_cost(INSN_COST);
10450   format %{ "addw  $dst, $src1, $src2" %}
10451 
10452   ins_encode %{
10453     __ addw(as_Register($dst$$reg),
10454             as_Register($src1$$reg),
10455             as_Register($src2$$reg));
10456   %}
10457 
10458   ins_pipe(ialu_reg_reg);
10459 %}
10460 
10461 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10462   match(Set dst (AddI src1 src2));
10463 
10464   ins_cost(INSN_COST);
10465   format %{ "addw $dst, $src1, $src2" %}
10466 
10467   // use opcode to indicate that this is an add not a sub
10468   opcode(0x0);
10469 
10470   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10471 
10472   ins_pipe(ialu_reg_imm);
10473 %}
10474 
10475 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10476   match(Set dst (AddI (ConvL2I src1) src2));
10477 
10478   ins_cost(INSN_COST);
10479   format %{ "addw $dst, $src1, $src2" %}
10480 
10481   // use opcode to indicate that this is an add not a sub
10482   opcode(0x0);
10483 
10484   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10485 
10486   ins_pipe(ialu_reg_imm);
10487 %}
10488 
10489 // Pointer Addition
10490 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10491   match(Set dst (AddP src1 src2));
10492 
10493   ins_cost(INSN_COST);
10494   format %{ "add $dst, $src1, $src2\t# ptr" %}
10495 
10496   ins_encode %{
10497     __ add(as_Register($dst$$reg),
10498            as_Register($src1$$reg),
10499            as_Register($src2$$reg));
10500   %}
10501 
10502   ins_pipe(ialu_reg_reg);
10503 %}
10504 
10505 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10506   match(Set dst (AddP src1 (ConvI2L src2)));
10507 
10508   ins_cost(1.9 * INSN_COST);
10509   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10510 
10511   ins_encode %{
10512     __ add(as_Register($dst$$reg),
10513            as_Register($src1$$reg),
10514            as_Register($src2$$reg), ext::sxtw);
10515   %}
10516 
10517   ins_pipe(ialu_reg_reg);
10518 %}
10519 
10520 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10521   match(Set dst (AddP src1 (LShiftL src2 scale)));
10522 
10523   ins_cost(1.9 * INSN_COST);
10524   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10525 
10526   ins_encode %{
10527     __ lea(as_Register($dst$$reg),
10528            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10529                    Address::lsl($scale$$constant)));
10530   %}
10531 
10532   ins_pipe(ialu_reg_reg_shift);
10533 %}
10534 
10535 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10536   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10537 
10538   ins_cost(1.9 * INSN_COST);
10539   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10540 
10541   ins_encode %{
10542     __ lea(as_Register($dst$$reg),
10543            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10544                    Address::sxtw($scale$$constant)));
10545   %}
10546 
10547   ins_pipe(ialu_reg_reg_shift);
10548 %}
10549 
10550 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10551   match(Set dst (LShiftL (ConvI2L src) scale));
10552 
10553   ins_cost(INSN_COST);
10554   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10555 
10556   ins_encode %{
10557     __ sbfiz(as_Register($dst$$reg),
10558           as_Register($src$$reg),
10559           $scale$$constant & 63, MIN2(32, (int)((-$scale$$constant) & 63)));
10560   %}
10561 
10562   ins_pipe(ialu_reg_shift);
10563 %}
10564 
10565 // Pointer Immediate Addition
10566 // n.b. this needs to be more expensive than using an indirect memory
10567 // operand
10568 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10569   match(Set dst (AddP src1 src2));
10570 
10571   ins_cost(INSN_COST);
10572   format %{ "add $dst, $src1, $src2\t# ptr" %}
10573 
10574   // use opcode to indicate that this is an add not a sub
10575   opcode(0x0);
10576 
10577   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10578 
10579   ins_pipe(ialu_reg_imm);
10580 %}
10581 
10582 // Long Addition
10583 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10584 
10585   match(Set dst (AddL src1 src2));
10586 
10587   ins_cost(INSN_COST);
10588   format %{ "add  $dst, $src1, $src2" %}
10589 
10590   ins_encode %{
10591     __ add(as_Register($dst$$reg),
10592            as_Register($src1$$reg),
10593            as_Register($src2$$reg));
10594   %}
10595 
10596   ins_pipe(ialu_reg_reg);
10597 %}
10598 
10599 // No constant pool entries requiredLong Immediate Addition.
10600 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10601   match(Set dst (AddL src1 src2));
10602 
10603   ins_cost(INSN_COST);
10604   format %{ "add $dst, $src1, $src2" %}
10605 
10606   // use opcode to indicate that this is an add not a sub
10607   opcode(0x0);
10608 
10609   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10610 
10611   ins_pipe(ialu_reg_imm);
10612 %}
10613 
10614 // Integer Subtraction
10615 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10616   match(Set dst (SubI src1 src2));
10617 
10618   ins_cost(INSN_COST);
10619   format %{ "subw  $dst, $src1, $src2" %}
10620 
10621   ins_encode %{
10622     __ subw(as_Register($dst$$reg),
10623             as_Register($src1$$reg),
10624             as_Register($src2$$reg));
10625   %}
10626 
10627   ins_pipe(ialu_reg_reg);
10628 %}
10629 
10630 // Immediate Subtraction
10631 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10632   match(Set dst (SubI src1 src2));
10633 
10634   ins_cost(INSN_COST);
10635   format %{ "subw $dst, $src1, $src2" %}
10636 
10637   // use opcode to indicate that this is a sub not an add
10638   opcode(0x1);
10639 
10640   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10641 
10642   ins_pipe(ialu_reg_imm);
10643 %}
10644 
10645 // Long Subtraction
10646 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10647 
10648   match(Set dst (SubL src1 src2));
10649 
10650   ins_cost(INSN_COST);
10651   format %{ "sub  $dst, $src1, $src2" %}
10652 
10653   ins_encode %{
10654     __ sub(as_Register($dst$$reg),
10655            as_Register($src1$$reg),
10656            as_Register($src2$$reg));
10657   %}
10658 
10659   ins_pipe(ialu_reg_reg);
10660 %}
10661 
10662 // No constant pool entries requiredLong Immediate Subtraction.
10663 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10664   match(Set dst (SubL src1 src2));
10665 
10666   ins_cost(INSN_COST);
10667   format %{ "sub$dst, $src1, $src2" %}
10668 
10669   // use opcode to indicate that this is a sub not an add
10670   opcode(0x1);
10671 
10672   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10673 
10674   ins_pipe(ialu_reg_imm);
10675 %}
10676 
10677 // Integer Negation (special case for sub)
10678 
10679 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10680   match(Set dst (SubI zero src));
10681 
10682   ins_cost(INSN_COST);
10683   format %{ "negw $dst, $src\t# int" %}
10684 
10685   ins_encode %{
10686     __ negw(as_Register($dst$$reg),
10687             as_Register($src$$reg));
10688   %}
10689 
10690   ins_pipe(ialu_reg);
10691 %}
10692 
10693 // Long Negation
10694 
10695 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
10696   match(Set dst (SubL zero src));
10697 
10698   ins_cost(INSN_COST);
10699   format %{ "neg $dst, $src\t# long" %}
10700 
10701   ins_encode %{
10702     __ neg(as_Register($dst$$reg),
10703            as_Register($src$$reg));
10704   %}
10705 
10706   ins_pipe(ialu_reg);
10707 %}
10708 
10709 // Integer Multiply
10710 
10711 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10712   match(Set dst (MulI src1 src2));
10713 
10714   ins_cost(INSN_COST * 3);
10715   format %{ "mulw  $dst, $src1, $src2" %}
10716 
10717   ins_encode %{
10718     __ mulw(as_Register($dst$$reg),
10719             as_Register($src1$$reg),
10720             as_Register($src2$$reg));
10721   %}
10722 
10723   ins_pipe(imul_reg_reg);
10724 %}
10725 
10726 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10727   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10728 
10729   ins_cost(INSN_COST * 3);
10730   format %{ "smull  $dst, $src1, $src2" %}
10731 
10732   ins_encode %{
10733     __ smull(as_Register($dst$$reg),
10734              as_Register($src1$$reg),
10735              as_Register($src2$$reg));
10736   %}
10737 
10738   ins_pipe(imul_reg_reg);
10739 %}
10740 
10741 // Long Multiply
10742 
10743 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10744   match(Set dst (MulL src1 src2));
10745 
10746   ins_cost(INSN_COST * 5);
10747   format %{ "mul  $dst, $src1, $src2" %}
10748 
10749   ins_encode %{
10750     __ mul(as_Register($dst$$reg),
10751            as_Register($src1$$reg),
10752            as_Register($src2$$reg));
10753   %}
10754 
10755   ins_pipe(lmul_reg_reg);
10756 %}
10757 
10758 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10759 %{
10760   match(Set dst (MulHiL src1 src2));
10761 
10762   ins_cost(INSN_COST * 7);
10763   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
10764 
10765   ins_encode %{
10766     __ smulh(as_Register($dst$$reg),
10767              as_Register($src1$$reg),
10768              as_Register($src2$$reg));
10769   %}
10770 
10771   ins_pipe(lmul_reg_reg);
10772 %}
10773 
10774 // Combined Integer Multiply & Add/Sub
10775 
10776 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10777   match(Set dst (AddI src3 (MulI src1 src2)));
10778 
10779   ins_cost(INSN_COST * 3);
10780   format %{ "madd  $dst, $src1, $src2, $src3" %}
10781 
10782   ins_encode %{
10783     __ maddw(as_Register($dst$$reg),
10784              as_Register($src1$$reg),
10785              as_Register($src2$$reg),
10786              as_Register($src3$$reg));
10787   %}
10788 
10789   ins_pipe(imac_reg_reg);
10790 %}
10791 
10792 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10793   match(Set dst (SubI src3 (MulI src1 src2)));
10794 
10795   ins_cost(INSN_COST * 3);
10796   format %{ "msub  $dst, $src1, $src2, $src3" %}
10797 
10798   ins_encode %{
10799     __ msubw(as_Register($dst$$reg),
10800              as_Register($src1$$reg),
10801              as_Register($src2$$reg),
10802              as_Register($src3$$reg));
10803   %}
10804 
10805   ins_pipe(imac_reg_reg);
10806 %}
10807 
10808 // Combined Integer Multiply & Neg
10809 
10810 instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
10811   match(Set dst (MulI (SubI zero src1) src2));
10812   match(Set dst (MulI src1 (SubI zero src2)));
10813 
10814   ins_cost(INSN_COST * 3);
10815   format %{ "mneg  $dst, $src1, $src2" %}
10816 
10817   ins_encode %{
10818     __ mnegw(as_Register($dst$$reg),
10819              as_Register($src1$$reg),
10820              as_Register($src2$$reg));
10821   %}
10822 
10823   ins_pipe(imac_reg_reg);
10824 %}
10825 
10826 // Combined Long Multiply & Add/Sub
10827 
10828 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10829   match(Set dst (AddL src3 (MulL src1 src2)));
10830 
10831   ins_cost(INSN_COST * 5);
10832   format %{ "madd  $dst, $src1, $src2, $src3" %}
10833 
10834   ins_encode %{
10835     __ madd(as_Register($dst$$reg),
10836             as_Register($src1$$reg),
10837             as_Register($src2$$reg),
10838             as_Register($src3$$reg));
10839   %}
10840 
10841   ins_pipe(lmac_reg_reg);
10842 %}
10843 
10844 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10845   match(Set dst (SubL src3 (MulL src1 src2)));
10846 
10847   ins_cost(INSN_COST * 5);
10848   format %{ "msub  $dst, $src1, $src2, $src3" %}
10849 
10850   ins_encode %{
10851     __ msub(as_Register($dst$$reg),
10852             as_Register($src1$$reg),
10853             as_Register($src2$$reg),
10854             as_Register($src3$$reg));
10855   %}
10856 
10857   ins_pipe(lmac_reg_reg);
10858 %}
10859 
10860 // Combined Long Multiply & Neg
10861 
10862 instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
10863   match(Set dst (MulL (SubL zero src1) src2));
10864   match(Set dst (MulL src1 (SubL zero src2)));
10865 
10866   ins_cost(INSN_COST * 5);
10867   format %{ "mneg  $dst, $src1, $src2" %}
10868 
10869   ins_encode %{
10870     __ mneg(as_Register($dst$$reg),
10871             as_Register($src1$$reg),
10872             as_Register($src2$$reg));
10873   %}
10874 
10875   ins_pipe(lmac_reg_reg);
10876 %}
10877 
10878 // Combine Integer Signed Multiply & Add/Sub/Neg Long
10879 
10880 instruct smaddL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10881   match(Set dst (AddL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10882 
10883   ins_cost(INSN_COST * 3);
10884   format %{ "smaddl  $dst, $src1, $src2, $src3" %}
10885 
10886   ins_encode %{
10887     __ smaddl(as_Register($dst$$reg),
10888               as_Register($src1$$reg),
10889               as_Register($src2$$reg),
10890               as_Register($src3$$reg));
10891   %}
10892 
10893   ins_pipe(imac_reg_reg);
10894 %}
10895 
10896 instruct smsubL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10897   match(Set dst (SubL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10898 
10899   ins_cost(INSN_COST * 3);
10900   format %{ "smsubl  $dst, $src1, $src2, $src3" %}
10901 
10902   ins_encode %{
10903     __ smsubl(as_Register($dst$$reg),
10904               as_Register($src1$$reg),
10905               as_Register($src2$$reg),
10906               as_Register($src3$$reg));
10907   %}
10908 
10909   ins_pipe(imac_reg_reg);
10910 %}
10911 
10912 instruct smnegL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, immL0 zero) %{
10913   match(Set dst (MulL (SubL zero (ConvI2L src1)) (ConvI2L src2)));
10914   match(Set dst (MulL (ConvI2L src1) (SubL zero (ConvI2L src2))));
10915 
10916   ins_cost(INSN_COST * 3);
10917   format %{ "smnegl  $dst, $src1, $src2" %}
10918 
10919   ins_encode %{
10920     __ smnegl(as_Register($dst$$reg),
10921               as_Register($src1$$reg),
10922               as_Register($src2$$reg));
10923   %}
10924 
10925   ins_pipe(imac_reg_reg);
10926 %}
10927 
10928 // Combined Multiply-Add Shorts into Integer (dst = src1 * src2 + src3 * src4)
10929 
10930 instruct muladdS2I(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3, iRegIorL2I src4) %{
10931   match(Set dst (MulAddS2I (Binary src1 src2) (Binary src3 src4)));
10932 
10933   ins_cost(INSN_COST * 5);
10934   format %{ "mulw  rscratch1, $src1, $src2\n\t"
10935             "maddw $dst, $src3, $src4, rscratch1" %}
10936 
10937   ins_encode %{
10938     __ mulw(rscratch1, as_Register($src1$$reg), as_Register($src2$$reg));
10939     __ maddw(as_Register($dst$$reg), as_Register($src3$$reg), as_Register($src4$$reg), rscratch1); %}
10940 
10941   ins_pipe(imac_reg_reg);
10942 %}
10943 
10944 // Integer Divide
10945 
10946 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10947   match(Set dst (DivI src1 src2));
10948 
10949   ins_cost(INSN_COST * 19);
10950   format %{ "sdivw  $dst, $src1, $src2" %}
10951 
10952   ins_encode(aarch64_enc_divw(dst, src1, src2));
10953   ins_pipe(idiv_reg_reg);
10954 %}
10955 
10956 // Long Divide
10957 
10958 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10959   match(Set dst (DivL src1 src2));
10960 
10961   ins_cost(INSN_COST * 35);
10962   format %{ "sdiv   $dst, $src1, $src2" %}
10963 
10964   ins_encode(aarch64_enc_div(dst, src1, src2));
10965   ins_pipe(ldiv_reg_reg);
10966 %}
10967 
10968 // Integer Remainder
10969 
10970 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10971   match(Set dst (ModI src1 src2));
10972 
10973   ins_cost(INSN_COST * 22);
10974   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10975             "msubw($dst, rscratch1, $src2, $src1" %}
10976 
10977   ins_encode(aarch64_enc_modw(dst, src1, src2));
10978   ins_pipe(idiv_reg_reg);
10979 %}
10980 
10981 // Long Remainder
10982 
10983 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10984   match(Set dst (ModL src1 src2));
10985 
10986   ins_cost(INSN_COST * 38);
10987   format %{ "sdiv   rscratch1, $src1, $src2\n"
10988             "msub($dst, rscratch1, $src2, $src1" %}
10989 
10990   ins_encode(aarch64_enc_mod(dst, src1, src2));
10991   ins_pipe(ldiv_reg_reg);
10992 %}
10993 
10994 // Integer Shifts
10995 
10996 // Shift Left Register
10997 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10998   match(Set dst (LShiftI src1 src2));
10999 
11000   ins_cost(INSN_COST * 2);
11001   format %{ "lslvw  $dst, $src1, $src2" %}
11002 
11003   ins_encode %{
11004     __ lslvw(as_Register($dst$$reg),
11005              as_Register($src1$$reg),
11006              as_Register($src2$$reg));
11007   %}
11008 
11009   ins_pipe(ialu_reg_reg_vshift);
11010 %}
11011 
11012 // Shift Left Immediate
11013 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11014   match(Set dst (LShiftI src1 src2));
11015 
11016   ins_cost(INSN_COST);
11017   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
11018 
11019   ins_encode %{
11020     __ lslw(as_Register($dst$$reg),
11021             as_Register($src1$$reg),
11022             $src2$$constant & 0x1f);
11023   %}
11024 
11025   ins_pipe(ialu_reg_shift);
11026 %}
11027 
11028 // Shift Right Logical Register
11029 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11030   match(Set dst (URShiftI src1 src2));
11031 
11032   ins_cost(INSN_COST * 2);
11033   format %{ "lsrvw  $dst, $src1, $src2" %}
11034 
11035   ins_encode %{
11036     __ lsrvw(as_Register($dst$$reg),
11037              as_Register($src1$$reg),
11038              as_Register($src2$$reg));
11039   %}
11040 
11041   ins_pipe(ialu_reg_reg_vshift);
11042 %}
11043 
11044 // Shift Right Logical Immediate
11045 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11046   match(Set dst (URShiftI src1 src2));
11047 
11048   ins_cost(INSN_COST);
11049   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
11050 
11051   ins_encode %{
11052     __ lsrw(as_Register($dst$$reg),
11053             as_Register($src1$$reg),
11054             $src2$$constant & 0x1f);
11055   %}
11056 
11057   ins_pipe(ialu_reg_shift);
11058 %}
11059 
11060 // Shift Right Arithmetic Register
11061 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11062   match(Set dst (RShiftI src1 src2));
11063 
11064   ins_cost(INSN_COST * 2);
11065   format %{ "asrvw  $dst, $src1, $src2" %}
11066 
11067   ins_encode %{
11068     __ asrvw(as_Register($dst$$reg),
11069              as_Register($src1$$reg),
11070              as_Register($src2$$reg));
11071   %}
11072 
11073   ins_pipe(ialu_reg_reg_vshift);
11074 %}
11075 
11076 // Shift Right Arithmetic Immediate
11077 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11078   match(Set dst (RShiftI src1 src2));
11079 
11080   ins_cost(INSN_COST);
11081   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
11082 
11083   ins_encode %{
11084     __ asrw(as_Register($dst$$reg),
11085             as_Register($src1$$reg),
11086             $src2$$constant & 0x1f);
11087   %}
11088 
11089   ins_pipe(ialu_reg_shift);
11090 %}
11091 
11092 // Combined Int Mask and Right Shift (using UBFM)
11093 // TODO
11094 
11095 // Long Shifts
11096 
11097 // Shift Left Register
11098 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11099   match(Set dst (LShiftL src1 src2));
11100 
11101   ins_cost(INSN_COST * 2);
11102   format %{ "lslv  $dst, $src1, $src2" %}
11103 
11104   ins_encode %{
11105     __ lslv(as_Register($dst$$reg),
11106             as_Register($src1$$reg),
11107             as_Register($src2$$reg));
11108   %}
11109 
11110   ins_pipe(ialu_reg_reg_vshift);
11111 %}
11112 
11113 // Shift Left Immediate
11114 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11115   match(Set dst (LShiftL src1 src2));
11116 
11117   ins_cost(INSN_COST);
11118   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
11119 
11120   ins_encode %{
11121     __ lsl(as_Register($dst$$reg),
11122             as_Register($src1$$reg),
11123             $src2$$constant & 0x3f);
11124   %}
11125 
11126   ins_pipe(ialu_reg_shift);
11127 %}
11128 
11129 // Shift Right Logical Register
11130 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11131   match(Set dst (URShiftL src1 src2));
11132 
11133   ins_cost(INSN_COST * 2);
11134   format %{ "lsrv  $dst, $src1, $src2" %}
11135 
11136   ins_encode %{
11137     __ lsrv(as_Register($dst$$reg),
11138             as_Register($src1$$reg),
11139             as_Register($src2$$reg));
11140   %}
11141 
11142   ins_pipe(ialu_reg_reg_vshift);
11143 %}
11144 
11145 // Shift Right Logical Immediate
11146 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11147   match(Set dst (URShiftL src1 src2));
11148 
11149   ins_cost(INSN_COST);
11150   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
11151 
11152   ins_encode %{
11153     __ lsr(as_Register($dst$$reg),
11154            as_Register($src1$$reg),
11155            $src2$$constant & 0x3f);
11156   %}
11157 
11158   ins_pipe(ialu_reg_shift);
11159 %}
11160 
11161 // A special-case pattern for card table stores.
11162 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
11163   match(Set dst (URShiftL (CastP2X src1) src2));
11164 
11165   ins_cost(INSN_COST);
11166   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
11167 
11168   ins_encode %{
11169     __ lsr(as_Register($dst$$reg),
11170            as_Register($src1$$reg),
11171            $src2$$constant & 0x3f);
11172   %}
11173 
11174   ins_pipe(ialu_reg_shift);
11175 %}
11176 
11177 // Shift Right Arithmetic Register
11178 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11179   match(Set dst (RShiftL src1 src2));
11180 
11181   ins_cost(INSN_COST * 2);
11182   format %{ "asrv  $dst, $src1, $src2" %}
11183 
11184   ins_encode %{
11185     __ asrv(as_Register($dst$$reg),
11186             as_Register($src1$$reg),
11187             as_Register($src2$$reg));
11188   %}
11189 
11190   ins_pipe(ialu_reg_reg_vshift);
11191 %}
11192 
11193 // Shift Right Arithmetic Immediate
11194 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11195   match(Set dst (RShiftL src1 src2));
11196 
11197   ins_cost(INSN_COST);
11198   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
11199 
11200   ins_encode %{
11201     __ asr(as_Register($dst$$reg),
11202            as_Register($src1$$reg),
11203            $src2$$constant & 0x3f);
11204   %}
11205 
11206   ins_pipe(ialu_reg_shift);
11207 %}
11208 
11209 // BEGIN This section of the file is automatically generated. Do not edit --------------
11210 
11211 
11212 // This pattern is automatically generated from aarch64_ad.m4
11213 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11214 instruct regL_not_reg(iRegLNoSp dst,
11215                          iRegL src1, immL_M1 m1,
11216                          rFlagsReg cr) %{
11217   match(Set dst (XorL src1 m1));
11218   ins_cost(INSN_COST);
11219   format %{ "eon  $dst, $src1, zr" %}
11220 
11221   ins_encode %{
11222     __ eon(as_Register($dst$$reg),
11223               as_Register($src1$$reg),
11224               zr,
11225               Assembler::LSL, 0);
11226   %}
11227 
11228   ins_pipe(ialu_reg);
11229 %}
11230 
11231 // This pattern is automatically generated from aarch64_ad.m4
11232 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11233 instruct regI_not_reg(iRegINoSp dst,
11234                          iRegIorL2I src1, immI_M1 m1,
11235                          rFlagsReg cr) %{
11236   match(Set dst (XorI src1 m1));
11237   ins_cost(INSN_COST);
11238   format %{ "eonw  $dst, $src1, zr" %}
11239 
11240   ins_encode %{
11241     __ eonw(as_Register($dst$$reg),
11242               as_Register($src1$$reg),
11243               zr,
11244               Assembler::LSL, 0);
11245   %}
11246 
11247   ins_pipe(ialu_reg);
11248 %}
11249 
11250 // This pattern is automatically generated from aarch64_ad.m4
11251 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11252 instruct AndI_reg_not_reg(iRegINoSp dst,
11253                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11254                          rFlagsReg cr) %{
11255   match(Set dst (AndI src1 (XorI src2 m1)));
11256   ins_cost(INSN_COST);
11257   format %{ "bicw  $dst, $src1, $src2" %}
11258 
11259   ins_encode %{
11260     __ bicw(as_Register($dst$$reg),
11261               as_Register($src1$$reg),
11262               as_Register($src2$$reg),
11263               Assembler::LSL, 0);
11264   %}
11265 
11266   ins_pipe(ialu_reg_reg);
11267 %}
11268 
11269 // This pattern is automatically generated from aarch64_ad.m4
11270 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11271 instruct AndL_reg_not_reg(iRegLNoSp dst,
11272                          iRegL src1, iRegL src2, immL_M1 m1,
11273                          rFlagsReg cr) %{
11274   match(Set dst (AndL src1 (XorL src2 m1)));
11275   ins_cost(INSN_COST);
11276   format %{ "bic  $dst, $src1, $src2" %}
11277 
11278   ins_encode %{
11279     __ bic(as_Register($dst$$reg),
11280               as_Register($src1$$reg),
11281               as_Register($src2$$reg),
11282               Assembler::LSL, 0);
11283   %}
11284 
11285   ins_pipe(ialu_reg_reg);
11286 %}
11287 
11288 // This pattern is automatically generated from aarch64_ad.m4
11289 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11290 instruct OrI_reg_not_reg(iRegINoSp dst,
11291                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11292                          rFlagsReg cr) %{
11293   match(Set dst (OrI src1 (XorI src2 m1)));
11294   ins_cost(INSN_COST);
11295   format %{ "ornw  $dst, $src1, $src2" %}
11296 
11297   ins_encode %{
11298     __ ornw(as_Register($dst$$reg),
11299               as_Register($src1$$reg),
11300               as_Register($src2$$reg),
11301               Assembler::LSL, 0);
11302   %}
11303 
11304   ins_pipe(ialu_reg_reg);
11305 %}
11306 
11307 // This pattern is automatically generated from aarch64_ad.m4
11308 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11309 instruct OrL_reg_not_reg(iRegLNoSp dst,
11310                          iRegL src1, iRegL src2, immL_M1 m1,
11311                          rFlagsReg cr) %{
11312   match(Set dst (OrL src1 (XorL src2 m1)));
11313   ins_cost(INSN_COST);
11314   format %{ "orn  $dst, $src1, $src2" %}
11315 
11316   ins_encode %{
11317     __ orn(as_Register($dst$$reg),
11318               as_Register($src1$$reg),
11319               as_Register($src2$$reg),
11320               Assembler::LSL, 0);
11321   %}
11322 
11323   ins_pipe(ialu_reg_reg);
11324 %}
11325 
11326 // This pattern is automatically generated from aarch64_ad.m4
11327 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11328 instruct XorI_reg_not_reg(iRegINoSp dst,
11329                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11330                          rFlagsReg cr) %{
11331   match(Set dst (XorI m1 (XorI src2 src1)));
11332   ins_cost(INSN_COST);
11333   format %{ "eonw  $dst, $src1, $src2" %}
11334 
11335   ins_encode %{
11336     __ eonw(as_Register($dst$$reg),
11337               as_Register($src1$$reg),
11338               as_Register($src2$$reg),
11339               Assembler::LSL, 0);
11340   %}
11341 
11342   ins_pipe(ialu_reg_reg);
11343 %}
11344 
11345 // This pattern is automatically generated from aarch64_ad.m4
11346 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11347 instruct XorL_reg_not_reg(iRegLNoSp dst,
11348                          iRegL src1, iRegL src2, immL_M1 m1,
11349                          rFlagsReg cr) %{
11350   match(Set dst (XorL m1 (XorL src2 src1)));
11351   ins_cost(INSN_COST);
11352   format %{ "eon  $dst, $src1, $src2" %}
11353 
11354   ins_encode %{
11355     __ eon(as_Register($dst$$reg),
11356               as_Register($src1$$reg),
11357               as_Register($src2$$reg),
11358               Assembler::LSL, 0);
11359   %}
11360 
11361   ins_pipe(ialu_reg_reg);
11362 %}
11363 
11364 // This pattern is automatically generated from aarch64_ad.m4
11365 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11366 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
11367                          iRegIorL2I src1, iRegIorL2I src2,
11368                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11369   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
11370   ins_cost(1.9 * INSN_COST);
11371   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
11372 
11373   ins_encode %{
11374     __ bicw(as_Register($dst$$reg),
11375               as_Register($src1$$reg),
11376               as_Register($src2$$reg),
11377               Assembler::LSR,
11378               $src3$$constant & 0x1f);
11379   %}
11380 
11381   ins_pipe(ialu_reg_reg_shift);
11382 %}
11383 
11384 // This pattern is automatically generated from aarch64_ad.m4
11385 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11386 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
11387                          iRegL src1, iRegL src2,
11388                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11389   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
11390   ins_cost(1.9 * INSN_COST);
11391   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
11392 
11393   ins_encode %{
11394     __ bic(as_Register($dst$$reg),
11395               as_Register($src1$$reg),
11396               as_Register($src2$$reg),
11397               Assembler::LSR,
11398               $src3$$constant & 0x3f);
11399   %}
11400 
11401   ins_pipe(ialu_reg_reg_shift);
11402 %}
11403 
11404 // This pattern is automatically generated from aarch64_ad.m4
11405 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11406 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
11407                          iRegIorL2I src1, iRegIorL2I src2,
11408                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11409   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
11410   ins_cost(1.9 * INSN_COST);
11411   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
11412 
11413   ins_encode %{
11414     __ bicw(as_Register($dst$$reg),
11415               as_Register($src1$$reg),
11416               as_Register($src2$$reg),
11417               Assembler::ASR,
11418               $src3$$constant & 0x1f);
11419   %}
11420 
11421   ins_pipe(ialu_reg_reg_shift);
11422 %}
11423 
11424 // This pattern is automatically generated from aarch64_ad.m4
11425 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11426 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
11427                          iRegL src1, iRegL src2,
11428                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11429   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
11430   ins_cost(1.9 * INSN_COST);
11431   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
11432 
11433   ins_encode %{
11434     __ bic(as_Register($dst$$reg),
11435               as_Register($src1$$reg),
11436               as_Register($src2$$reg),
11437               Assembler::ASR,
11438               $src3$$constant & 0x3f);
11439   %}
11440 
11441   ins_pipe(ialu_reg_reg_shift);
11442 %}
11443 
11444 // This pattern is automatically generated from aarch64_ad.m4
11445 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11446 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11447                          iRegIorL2I src1, iRegIorL2I src2,
11448                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11449   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11450   ins_cost(1.9 * INSN_COST);
11451   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11452 
11453   ins_encode %{
11454     __ bicw(as_Register($dst$$reg),
11455               as_Register($src1$$reg),
11456               as_Register($src2$$reg),
11457               Assembler::LSL,
11458               $src3$$constant & 0x1f);
11459   %}
11460 
11461   ins_pipe(ialu_reg_reg_shift);
11462 %}
11463 
11464 // This pattern is automatically generated from aarch64_ad.m4
11465 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11466 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11467                          iRegL src1, iRegL src2,
11468                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11469   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11470   ins_cost(1.9 * INSN_COST);
11471   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11472 
11473   ins_encode %{
11474     __ bic(as_Register($dst$$reg),
11475               as_Register($src1$$reg),
11476               as_Register($src2$$reg),
11477               Assembler::LSL,
11478               $src3$$constant & 0x3f);
11479   %}
11480 
11481   ins_pipe(ialu_reg_reg_shift);
11482 %}
11483 
11484 // This pattern is automatically generated from aarch64_ad.m4
11485 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11486 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11487                          iRegIorL2I src1, iRegIorL2I src2,
11488                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11489   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11490   ins_cost(1.9 * INSN_COST);
11491   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11492 
11493   ins_encode %{
11494     __ eonw(as_Register($dst$$reg),
11495               as_Register($src1$$reg),
11496               as_Register($src2$$reg),
11497               Assembler::LSR,
11498               $src3$$constant & 0x1f);
11499   %}
11500 
11501   ins_pipe(ialu_reg_reg_shift);
11502 %}
11503 
11504 // This pattern is automatically generated from aarch64_ad.m4
11505 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11506 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11507                          iRegL src1, iRegL src2,
11508                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11509   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11510   ins_cost(1.9 * INSN_COST);
11511   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11512 
11513   ins_encode %{
11514     __ eon(as_Register($dst$$reg),
11515               as_Register($src1$$reg),
11516               as_Register($src2$$reg),
11517               Assembler::LSR,
11518               $src3$$constant & 0x3f);
11519   %}
11520 
11521   ins_pipe(ialu_reg_reg_shift);
11522 %}
11523 
11524 // This pattern is automatically generated from aarch64_ad.m4
11525 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11526 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11527                          iRegIorL2I src1, iRegIorL2I src2,
11528                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11529   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11530   ins_cost(1.9 * INSN_COST);
11531   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11532 
11533   ins_encode %{
11534     __ eonw(as_Register($dst$$reg),
11535               as_Register($src1$$reg),
11536               as_Register($src2$$reg),
11537               Assembler::ASR,
11538               $src3$$constant & 0x1f);
11539   %}
11540 
11541   ins_pipe(ialu_reg_reg_shift);
11542 %}
11543 
11544 // This pattern is automatically generated from aarch64_ad.m4
11545 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11546 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11547                          iRegL src1, iRegL src2,
11548                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11549   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11550   ins_cost(1.9 * INSN_COST);
11551   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11552 
11553   ins_encode %{
11554     __ eon(as_Register($dst$$reg),
11555               as_Register($src1$$reg),
11556               as_Register($src2$$reg),
11557               Assembler::ASR,
11558               $src3$$constant & 0x3f);
11559   %}
11560 
11561   ins_pipe(ialu_reg_reg_shift);
11562 %}
11563 
11564 // This pattern is automatically generated from aarch64_ad.m4
11565 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11566 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11567                          iRegIorL2I src1, iRegIorL2I src2,
11568                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11569   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11570   ins_cost(1.9 * INSN_COST);
11571   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11572 
11573   ins_encode %{
11574     __ eonw(as_Register($dst$$reg),
11575               as_Register($src1$$reg),
11576               as_Register($src2$$reg),
11577               Assembler::LSL,
11578               $src3$$constant & 0x1f);
11579   %}
11580 
11581   ins_pipe(ialu_reg_reg_shift);
11582 %}
11583 
11584 // This pattern is automatically generated from aarch64_ad.m4
11585 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11586 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11587                          iRegL src1, iRegL src2,
11588                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11589   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11590   ins_cost(1.9 * INSN_COST);
11591   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11592 
11593   ins_encode %{
11594     __ eon(as_Register($dst$$reg),
11595               as_Register($src1$$reg),
11596               as_Register($src2$$reg),
11597               Assembler::LSL,
11598               $src3$$constant & 0x3f);
11599   %}
11600 
11601   ins_pipe(ialu_reg_reg_shift);
11602 %}
11603 
11604 // This pattern is automatically generated from aarch64_ad.m4
11605 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11606 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11607                          iRegIorL2I src1, iRegIorL2I src2,
11608                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11609   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11610   ins_cost(1.9 * INSN_COST);
11611   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11612 
11613   ins_encode %{
11614     __ ornw(as_Register($dst$$reg),
11615               as_Register($src1$$reg),
11616               as_Register($src2$$reg),
11617               Assembler::LSR,
11618               $src3$$constant & 0x1f);
11619   %}
11620 
11621   ins_pipe(ialu_reg_reg_shift);
11622 %}
11623 
11624 // This pattern is automatically generated from aarch64_ad.m4
11625 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11626 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11627                          iRegL src1, iRegL src2,
11628                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11629   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11630   ins_cost(1.9 * INSN_COST);
11631   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11632 
11633   ins_encode %{
11634     __ orn(as_Register($dst$$reg),
11635               as_Register($src1$$reg),
11636               as_Register($src2$$reg),
11637               Assembler::LSR,
11638               $src3$$constant & 0x3f);
11639   %}
11640 
11641   ins_pipe(ialu_reg_reg_shift);
11642 %}
11643 
11644 // This pattern is automatically generated from aarch64_ad.m4
11645 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11646 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11647                          iRegIorL2I src1, iRegIorL2I src2,
11648                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11649   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11650   ins_cost(1.9 * INSN_COST);
11651   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11652 
11653   ins_encode %{
11654     __ ornw(as_Register($dst$$reg),
11655               as_Register($src1$$reg),
11656               as_Register($src2$$reg),
11657               Assembler::ASR,
11658               $src3$$constant & 0x1f);
11659   %}
11660 
11661   ins_pipe(ialu_reg_reg_shift);
11662 %}
11663 
11664 // This pattern is automatically generated from aarch64_ad.m4
11665 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11666 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11667                          iRegL src1, iRegL src2,
11668                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11669   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11670   ins_cost(1.9 * INSN_COST);
11671   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11672 
11673   ins_encode %{
11674     __ orn(as_Register($dst$$reg),
11675               as_Register($src1$$reg),
11676               as_Register($src2$$reg),
11677               Assembler::ASR,
11678               $src3$$constant & 0x3f);
11679   %}
11680 
11681   ins_pipe(ialu_reg_reg_shift);
11682 %}
11683 
11684 // This pattern is automatically generated from aarch64_ad.m4
11685 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11686 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11687                          iRegIorL2I src1, iRegIorL2I src2,
11688                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11689   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11690   ins_cost(1.9 * INSN_COST);
11691   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11692 
11693   ins_encode %{
11694     __ ornw(as_Register($dst$$reg),
11695               as_Register($src1$$reg),
11696               as_Register($src2$$reg),
11697               Assembler::LSL,
11698               $src3$$constant & 0x1f);
11699   %}
11700 
11701   ins_pipe(ialu_reg_reg_shift);
11702 %}
11703 
11704 // This pattern is automatically generated from aarch64_ad.m4
11705 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11706 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11707                          iRegL src1, iRegL src2,
11708                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11709   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11710   ins_cost(1.9 * INSN_COST);
11711   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11712 
11713   ins_encode %{
11714     __ orn(as_Register($dst$$reg),
11715               as_Register($src1$$reg),
11716               as_Register($src2$$reg),
11717               Assembler::LSL,
11718               $src3$$constant & 0x3f);
11719   %}
11720 
11721   ins_pipe(ialu_reg_reg_shift);
11722 %}
11723 
11724 // This pattern is automatically generated from aarch64_ad.m4
11725 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11726 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11727                          iRegIorL2I src1, iRegIorL2I src2,
11728                          immI src3, rFlagsReg cr) %{
11729   match(Set dst (AndI src1 (URShiftI src2 src3)));
11730 
11731   ins_cost(1.9 * INSN_COST);
11732   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11733 
11734   ins_encode %{
11735     __ andw(as_Register($dst$$reg),
11736               as_Register($src1$$reg),
11737               as_Register($src2$$reg),
11738               Assembler::LSR,
11739               $src3$$constant & 0x1f);
11740   %}
11741 
11742   ins_pipe(ialu_reg_reg_shift);
11743 %}
11744 
11745 // This pattern is automatically generated from aarch64_ad.m4
11746 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11747 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11748                          iRegL src1, iRegL src2,
11749                          immI src3, rFlagsReg cr) %{
11750   match(Set dst (AndL src1 (URShiftL src2 src3)));
11751 
11752   ins_cost(1.9 * INSN_COST);
11753   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11754 
11755   ins_encode %{
11756     __ andr(as_Register($dst$$reg),
11757               as_Register($src1$$reg),
11758               as_Register($src2$$reg),
11759               Assembler::LSR,
11760               $src3$$constant & 0x3f);
11761   %}
11762 
11763   ins_pipe(ialu_reg_reg_shift);
11764 %}
11765 
11766 // This pattern is automatically generated from aarch64_ad.m4
11767 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11768 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11769                          iRegIorL2I src1, iRegIorL2I src2,
11770                          immI src3, rFlagsReg cr) %{
11771   match(Set dst (AndI src1 (RShiftI src2 src3)));
11772 
11773   ins_cost(1.9 * INSN_COST);
11774   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11775 
11776   ins_encode %{
11777     __ andw(as_Register($dst$$reg),
11778               as_Register($src1$$reg),
11779               as_Register($src2$$reg),
11780               Assembler::ASR,
11781               $src3$$constant & 0x1f);
11782   %}
11783 
11784   ins_pipe(ialu_reg_reg_shift);
11785 %}
11786 
11787 // This pattern is automatically generated from aarch64_ad.m4
11788 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11789 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11790                          iRegL src1, iRegL src2,
11791                          immI src3, rFlagsReg cr) %{
11792   match(Set dst (AndL src1 (RShiftL src2 src3)));
11793 
11794   ins_cost(1.9 * INSN_COST);
11795   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11796 
11797   ins_encode %{
11798     __ andr(as_Register($dst$$reg),
11799               as_Register($src1$$reg),
11800               as_Register($src2$$reg),
11801               Assembler::ASR,
11802               $src3$$constant & 0x3f);
11803   %}
11804 
11805   ins_pipe(ialu_reg_reg_shift);
11806 %}
11807 
11808 // This pattern is automatically generated from aarch64_ad.m4
11809 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11810 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11811                          iRegIorL2I src1, iRegIorL2I src2,
11812                          immI src3, rFlagsReg cr) %{
11813   match(Set dst (AndI src1 (LShiftI src2 src3)));
11814 
11815   ins_cost(1.9 * INSN_COST);
11816   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11817 
11818   ins_encode %{
11819     __ andw(as_Register($dst$$reg),
11820               as_Register($src1$$reg),
11821               as_Register($src2$$reg),
11822               Assembler::LSL,
11823               $src3$$constant & 0x1f);
11824   %}
11825 
11826   ins_pipe(ialu_reg_reg_shift);
11827 %}
11828 
11829 // This pattern is automatically generated from aarch64_ad.m4
11830 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11831 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11832                          iRegL src1, iRegL src2,
11833                          immI src3, rFlagsReg cr) %{
11834   match(Set dst (AndL src1 (LShiftL src2 src3)));
11835 
11836   ins_cost(1.9 * INSN_COST);
11837   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11838 
11839   ins_encode %{
11840     __ andr(as_Register($dst$$reg),
11841               as_Register($src1$$reg),
11842               as_Register($src2$$reg),
11843               Assembler::LSL,
11844               $src3$$constant & 0x3f);
11845   %}
11846 
11847   ins_pipe(ialu_reg_reg_shift);
11848 %}
11849 
11850 // This pattern is automatically generated from aarch64_ad.m4
11851 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11852 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11853                          iRegIorL2I src1, iRegIorL2I src2,
11854                          immI src3, rFlagsReg cr) %{
11855   match(Set dst (XorI src1 (URShiftI src2 src3)));
11856 
11857   ins_cost(1.9 * INSN_COST);
11858   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11859 
11860   ins_encode %{
11861     __ eorw(as_Register($dst$$reg),
11862               as_Register($src1$$reg),
11863               as_Register($src2$$reg),
11864               Assembler::LSR,
11865               $src3$$constant & 0x1f);
11866   %}
11867 
11868   ins_pipe(ialu_reg_reg_shift);
11869 %}
11870 
11871 // This pattern is automatically generated from aarch64_ad.m4
11872 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11873 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11874                          iRegL src1, iRegL src2,
11875                          immI src3, rFlagsReg cr) %{
11876   match(Set dst (XorL src1 (URShiftL src2 src3)));
11877 
11878   ins_cost(1.9 * INSN_COST);
11879   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11880 
11881   ins_encode %{
11882     __ eor(as_Register($dst$$reg),
11883               as_Register($src1$$reg),
11884               as_Register($src2$$reg),
11885               Assembler::LSR,
11886               $src3$$constant & 0x3f);
11887   %}
11888 
11889   ins_pipe(ialu_reg_reg_shift);
11890 %}
11891 
11892 // This pattern is automatically generated from aarch64_ad.m4
11893 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11894 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11895                          iRegIorL2I src1, iRegIorL2I src2,
11896                          immI src3, rFlagsReg cr) %{
11897   match(Set dst (XorI src1 (RShiftI src2 src3)));
11898 
11899   ins_cost(1.9 * INSN_COST);
11900   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11901 
11902   ins_encode %{
11903     __ eorw(as_Register($dst$$reg),
11904               as_Register($src1$$reg),
11905               as_Register($src2$$reg),
11906               Assembler::ASR,
11907               $src3$$constant & 0x1f);
11908   %}
11909 
11910   ins_pipe(ialu_reg_reg_shift);
11911 %}
11912 
11913 // This pattern is automatically generated from aarch64_ad.m4
11914 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11915 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11916                          iRegL src1, iRegL src2,
11917                          immI src3, rFlagsReg cr) %{
11918   match(Set dst (XorL src1 (RShiftL src2 src3)));
11919 
11920   ins_cost(1.9 * INSN_COST);
11921   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11922 
11923   ins_encode %{
11924     __ eor(as_Register($dst$$reg),
11925               as_Register($src1$$reg),
11926               as_Register($src2$$reg),
11927               Assembler::ASR,
11928               $src3$$constant & 0x3f);
11929   %}
11930 
11931   ins_pipe(ialu_reg_reg_shift);
11932 %}
11933 
11934 // This pattern is automatically generated from aarch64_ad.m4
11935 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11936 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11937                          iRegIorL2I src1, iRegIorL2I src2,
11938                          immI src3, rFlagsReg cr) %{
11939   match(Set dst (XorI src1 (LShiftI src2 src3)));
11940 
11941   ins_cost(1.9 * INSN_COST);
11942   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11943 
11944   ins_encode %{
11945     __ eorw(as_Register($dst$$reg),
11946               as_Register($src1$$reg),
11947               as_Register($src2$$reg),
11948               Assembler::LSL,
11949               $src3$$constant & 0x1f);
11950   %}
11951 
11952   ins_pipe(ialu_reg_reg_shift);
11953 %}
11954 
11955 // This pattern is automatically generated from aarch64_ad.m4
11956 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11957 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11958                          iRegL src1, iRegL src2,
11959                          immI src3, rFlagsReg cr) %{
11960   match(Set dst (XorL src1 (LShiftL src2 src3)));
11961 
11962   ins_cost(1.9 * INSN_COST);
11963   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11964 
11965   ins_encode %{
11966     __ eor(as_Register($dst$$reg),
11967               as_Register($src1$$reg),
11968               as_Register($src2$$reg),
11969               Assembler::LSL,
11970               $src3$$constant & 0x3f);
11971   %}
11972 
11973   ins_pipe(ialu_reg_reg_shift);
11974 %}
11975 
11976 // This pattern is automatically generated from aarch64_ad.m4
11977 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11978 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11979                          iRegIorL2I src1, iRegIorL2I src2,
11980                          immI src3, rFlagsReg cr) %{
11981   match(Set dst (OrI src1 (URShiftI src2 src3)));
11982 
11983   ins_cost(1.9 * INSN_COST);
11984   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11985 
11986   ins_encode %{
11987     __ orrw(as_Register($dst$$reg),
11988               as_Register($src1$$reg),
11989               as_Register($src2$$reg),
11990               Assembler::LSR,
11991               $src3$$constant & 0x1f);
11992   %}
11993 
11994   ins_pipe(ialu_reg_reg_shift);
11995 %}
11996 
11997 // This pattern is automatically generated from aarch64_ad.m4
11998 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11999 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
12000                          iRegL src1, iRegL src2,
12001                          immI src3, rFlagsReg cr) %{
12002   match(Set dst (OrL src1 (URShiftL src2 src3)));
12003 
12004   ins_cost(1.9 * INSN_COST);
12005   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
12006 
12007   ins_encode %{
12008     __ orr(as_Register($dst$$reg),
12009               as_Register($src1$$reg),
12010               as_Register($src2$$reg),
12011               Assembler::LSR,
12012               $src3$$constant & 0x3f);
12013   %}
12014 
12015   ins_pipe(ialu_reg_reg_shift);
12016 %}
12017 
12018 // This pattern is automatically generated from aarch64_ad.m4
12019 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12020 instruct OrI_reg_RShift_reg(iRegINoSp dst,
12021                          iRegIorL2I src1, iRegIorL2I src2,
12022                          immI src3, rFlagsReg cr) %{
12023   match(Set dst (OrI src1 (RShiftI src2 src3)));
12024 
12025   ins_cost(1.9 * INSN_COST);
12026   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
12027 
12028   ins_encode %{
12029     __ orrw(as_Register($dst$$reg),
12030               as_Register($src1$$reg),
12031               as_Register($src2$$reg),
12032               Assembler::ASR,
12033               $src3$$constant & 0x1f);
12034   %}
12035 
12036   ins_pipe(ialu_reg_reg_shift);
12037 %}
12038 
12039 // This pattern is automatically generated from aarch64_ad.m4
12040 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12041 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
12042                          iRegL src1, iRegL src2,
12043                          immI src3, rFlagsReg cr) %{
12044   match(Set dst (OrL src1 (RShiftL src2 src3)));
12045 
12046   ins_cost(1.9 * INSN_COST);
12047   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
12048 
12049   ins_encode %{
12050     __ orr(as_Register($dst$$reg),
12051               as_Register($src1$$reg),
12052               as_Register($src2$$reg),
12053               Assembler::ASR,
12054               $src3$$constant & 0x3f);
12055   %}
12056 
12057   ins_pipe(ialu_reg_reg_shift);
12058 %}
12059 
12060 // This pattern is automatically generated from aarch64_ad.m4
12061 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12062 instruct OrI_reg_LShift_reg(iRegINoSp dst,
12063                          iRegIorL2I src1, iRegIorL2I src2,
12064                          immI src3, rFlagsReg cr) %{
12065   match(Set dst (OrI src1 (LShiftI src2 src3)));
12066 
12067   ins_cost(1.9 * INSN_COST);
12068   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
12069 
12070   ins_encode %{
12071     __ orrw(as_Register($dst$$reg),
12072               as_Register($src1$$reg),
12073               as_Register($src2$$reg),
12074               Assembler::LSL,
12075               $src3$$constant & 0x1f);
12076   %}
12077 
12078   ins_pipe(ialu_reg_reg_shift);
12079 %}
12080 
12081 // This pattern is automatically generated from aarch64_ad.m4
12082 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12083 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
12084                          iRegL src1, iRegL src2,
12085                          immI src3, rFlagsReg cr) %{
12086   match(Set dst (OrL src1 (LShiftL src2 src3)));
12087 
12088   ins_cost(1.9 * INSN_COST);
12089   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
12090 
12091   ins_encode %{
12092     __ orr(as_Register($dst$$reg),
12093               as_Register($src1$$reg),
12094               as_Register($src2$$reg),
12095               Assembler::LSL,
12096               $src3$$constant & 0x3f);
12097   %}
12098 
12099   ins_pipe(ialu_reg_reg_shift);
12100 %}
12101 
12102 // This pattern is automatically generated from aarch64_ad.m4
12103 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12104 instruct AddI_reg_URShift_reg(iRegINoSp dst,
12105                          iRegIorL2I src1, iRegIorL2I src2,
12106                          immI src3, rFlagsReg cr) %{
12107   match(Set dst (AddI src1 (URShiftI src2 src3)));
12108 
12109   ins_cost(1.9 * INSN_COST);
12110   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
12111 
12112   ins_encode %{
12113     __ addw(as_Register($dst$$reg),
12114               as_Register($src1$$reg),
12115               as_Register($src2$$reg),
12116               Assembler::LSR,
12117               $src3$$constant & 0x1f);
12118   %}
12119 
12120   ins_pipe(ialu_reg_reg_shift);
12121 %}
12122 
12123 // This pattern is automatically generated from aarch64_ad.m4
12124 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12125 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
12126                          iRegL src1, iRegL src2,
12127                          immI src3, rFlagsReg cr) %{
12128   match(Set dst (AddL src1 (URShiftL src2 src3)));
12129 
12130   ins_cost(1.9 * INSN_COST);
12131   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
12132 
12133   ins_encode %{
12134     __ add(as_Register($dst$$reg),
12135               as_Register($src1$$reg),
12136               as_Register($src2$$reg),
12137               Assembler::LSR,
12138               $src3$$constant & 0x3f);
12139   %}
12140 
12141   ins_pipe(ialu_reg_reg_shift);
12142 %}
12143 
12144 // This pattern is automatically generated from aarch64_ad.m4
12145 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12146 instruct AddI_reg_RShift_reg(iRegINoSp dst,
12147                          iRegIorL2I src1, iRegIorL2I src2,
12148                          immI src3, rFlagsReg cr) %{
12149   match(Set dst (AddI src1 (RShiftI src2 src3)));
12150 
12151   ins_cost(1.9 * INSN_COST);
12152   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
12153 
12154   ins_encode %{
12155     __ addw(as_Register($dst$$reg),
12156               as_Register($src1$$reg),
12157               as_Register($src2$$reg),
12158               Assembler::ASR,
12159               $src3$$constant & 0x1f);
12160   %}
12161 
12162   ins_pipe(ialu_reg_reg_shift);
12163 %}
12164 
12165 // This pattern is automatically generated from aarch64_ad.m4
12166 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12167 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
12168                          iRegL src1, iRegL src2,
12169                          immI src3, rFlagsReg cr) %{
12170   match(Set dst (AddL src1 (RShiftL src2 src3)));
12171 
12172   ins_cost(1.9 * INSN_COST);
12173   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
12174 
12175   ins_encode %{
12176     __ add(as_Register($dst$$reg),
12177               as_Register($src1$$reg),
12178               as_Register($src2$$reg),
12179               Assembler::ASR,
12180               $src3$$constant & 0x3f);
12181   %}
12182 
12183   ins_pipe(ialu_reg_reg_shift);
12184 %}
12185 
12186 // This pattern is automatically generated from aarch64_ad.m4
12187 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12188 instruct AddI_reg_LShift_reg(iRegINoSp dst,
12189                          iRegIorL2I src1, iRegIorL2I src2,
12190                          immI src3, rFlagsReg cr) %{
12191   match(Set dst (AddI src1 (LShiftI src2 src3)));
12192 
12193   ins_cost(1.9 * INSN_COST);
12194   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
12195 
12196   ins_encode %{
12197     __ addw(as_Register($dst$$reg),
12198               as_Register($src1$$reg),
12199               as_Register($src2$$reg),
12200               Assembler::LSL,
12201               $src3$$constant & 0x1f);
12202   %}
12203 
12204   ins_pipe(ialu_reg_reg_shift);
12205 %}
12206 
12207 // This pattern is automatically generated from aarch64_ad.m4
12208 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12209 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
12210                          iRegL src1, iRegL src2,
12211                          immI src3, rFlagsReg cr) %{
12212   match(Set dst (AddL src1 (LShiftL src2 src3)));
12213 
12214   ins_cost(1.9 * INSN_COST);
12215   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
12216 
12217   ins_encode %{
12218     __ add(as_Register($dst$$reg),
12219               as_Register($src1$$reg),
12220               as_Register($src2$$reg),
12221               Assembler::LSL,
12222               $src3$$constant & 0x3f);
12223   %}
12224 
12225   ins_pipe(ialu_reg_reg_shift);
12226 %}
12227 
12228 // This pattern is automatically generated from aarch64_ad.m4
12229 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12230 instruct SubI_reg_URShift_reg(iRegINoSp dst,
12231                          iRegIorL2I src1, iRegIorL2I src2,
12232                          immI src3, rFlagsReg cr) %{
12233   match(Set dst (SubI src1 (URShiftI src2 src3)));
12234 
12235   ins_cost(1.9 * INSN_COST);
12236   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
12237 
12238   ins_encode %{
12239     __ subw(as_Register($dst$$reg),
12240               as_Register($src1$$reg),
12241               as_Register($src2$$reg),
12242               Assembler::LSR,
12243               $src3$$constant & 0x1f);
12244   %}
12245 
12246   ins_pipe(ialu_reg_reg_shift);
12247 %}
12248 
12249 // This pattern is automatically generated from aarch64_ad.m4
12250 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12251 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
12252                          iRegL src1, iRegL src2,
12253                          immI src3, rFlagsReg cr) %{
12254   match(Set dst (SubL src1 (URShiftL src2 src3)));
12255 
12256   ins_cost(1.9 * INSN_COST);
12257   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
12258 
12259   ins_encode %{
12260     __ sub(as_Register($dst$$reg),
12261               as_Register($src1$$reg),
12262               as_Register($src2$$reg),
12263               Assembler::LSR,
12264               $src3$$constant & 0x3f);
12265   %}
12266 
12267   ins_pipe(ialu_reg_reg_shift);
12268 %}
12269 
12270 // This pattern is automatically generated from aarch64_ad.m4
12271 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12272 instruct SubI_reg_RShift_reg(iRegINoSp dst,
12273                          iRegIorL2I src1, iRegIorL2I src2,
12274                          immI src3, rFlagsReg cr) %{
12275   match(Set dst (SubI src1 (RShiftI src2 src3)));
12276 
12277   ins_cost(1.9 * INSN_COST);
12278   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
12279 
12280   ins_encode %{
12281     __ subw(as_Register($dst$$reg),
12282               as_Register($src1$$reg),
12283               as_Register($src2$$reg),
12284               Assembler::ASR,
12285               $src3$$constant & 0x1f);
12286   %}
12287 
12288   ins_pipe(ialu_reg_reg_shift);
12289 %}
12290 
12291 // This pattern is automatically generated from aarch64_ad.m4
12292 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12293 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
12294                          iRegL src1, iRegL src2,
12295                          immI src3, rFlagsReg cr) %{
12296   match(Set dst (SubL src1 (RShiftL src2 src3)));
12297 
12298   ins_cost(1.9 * INSN_COST);
12299   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
12300 
12301   ins_encode %{
12302     __ sub(as_Register($dst$$reg),
12303               as_Register($src1$$reg),
12304               as_Register($src2$$reg),
12305               Assembler::ASR,
12306               $src3$$constant & 0x3f);
12307   %}
12308 
12309   ins_pipe(ialu_reg_reg_shift);
12310 %}
12311 
12312 // This pattern is automatically generated from aarch64_ad.m4
12313 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12314 instruct SubI_reg_LShift_reg(iRegINoSp dst,
12315                          iRegIorL2I src1, iRegIorL2I src2,
12316                          immI src3, rFlagsReg cr) %{
12317   match(Set dst (SubI src1 (LShiftI src2 src3)));
12318 
12319   ins_cost(1.9 * INSN_COST);
12320   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
12321 
12322   ins_encode %{
12323     __ subw(as_Register($dst$$reg),
12324               as_Register($src1$$reg),
12325               as_Register($src2$$reg),
12326               Assembler::LSL,
12327               $src3$$constant & 0x1f);
12328   %}
12329 
12330   ins_pipe(ialu_reg_reg_shift);
12331 %}
12332 
12333 // This pattern is automatically generated from aarch64_ad.m4
12334 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12335 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
12336                          iRegL src1, iRegL src2,
12337                          immI src3, rFlagsReg cr) %{
12338   match(Set dst (SubL src1 (LShiftL src2 src3)));
12339 
12340   ins_cost(1.9 * INSN_COST);
12341   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
12342 
12343   ins_encode %{
12344     __ sub(as_Register($dst$$reg),
12345               as_Register($src1$$reg),
12346               as_Register($src2$$reg),
12347               Assembler::LSL,
12348               $src3$$constant & 0x3f);
12349   %}
12350 
12351   ins_pipe(ialu_reg_reg_shift);
12352 %}
12353 
12354  
12355 // This pattern is automatically generated from aarch64_ad.m4
12356 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12357 
12358 // Shift Left followed by Shift Right.
12359 // This idiom is used by the compiler for the i2b bytecode etc.
12360 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12361 %{
12362   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
12363   ins_cost(INSN_COST * 2);
12364   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12365   ins_encode %{
12366     int lshift = $lshift_count$$constant & 63;
12367     int rshift = $rshift_count$$constant & 63;
12368     int s = 63 - lshift;
12369     int r = (rshift - lshift) & 63;
12370     __ sbfm(as_Register($dst$$reg),
12371             as_Register($src$$reg),
12372             r, s);
12373   %}
12374 
12375   ins_pipe(ialu_reg_shift);
12376 %}
12377 
12378 // This pattern is automatically generated from aarch64_ad.m4
12379 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12380 
12381 // Shift Left followed by Shift Right.
12382 // This idiom is used by the compiler for the i2b bytecode etc.
12383 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12384 %{
12385   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
12386   ins_cost(INSN_COST * 2);
12387   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12388   ins_encode %{
12389     int lshift = $lshift_count$$constant & 31;
12390     int rshift = $rshift_count$$constant & 31;
12391     int s = 31 - lshift;
12392     int r = (rshift - lshift) & 31;
12393     __ sbfmw(as_Register($dst$$reg),
12394             as_Register($src$$reg),
12395             r, s);
12396   %}
12397 
12398   ins_pipe(ialu_reg_shift);
12399 %}
12400 
12401 // This pattern is automatically generated from aarch64_ad.m4
12402 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12403 
12404 // Shift Left followed by Shift Right.
12405 // This idiom is used by the compiler for the i2b bytecode etc.
12406 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12407 %{
12408   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
12409   ins_cost(INSN_COST * 2);
12410   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12411   ins_encode %{
12412     int lshift = $lshift_count$$constant & 63;
12413     int rshift = $rshift_count$$constant & 63;
12414     int s = 63 - lshift;
12415     int r = (rshift - lshift) & 63;
12416     __ ubfm(as_Register($dst$$reg),
12417             as_Register($src$$reg),
12418             r, s);
12419   %}
12420 
12421   ins_pipe(ialu_reg_shift);
12422 %}
12423 
12424 // This pattern is automatically generated from aarch64_ad.m4
12425 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12426 
12427 // Shift Left followed by Shift Right.
12428 // This idiom is used by the compiler for the i2b bytecode etc.
12429 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12430 %{
12431   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
12432   ins_cost(INSN_COST * 2);
12433   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12434   ins_encode %{
12435     int lshift = $lshift_count$$constant & 31;
12436     int rshift = $rshift_count$$constant & 31;
12437     int s = 31 - lshift;
12438     int r = (rshift - lshift) & 31;
12439     __ ubfmw(as_Register($dst$$reg),
12440             as_Register($src$$reg),
12441             r, s);
12442   %}
12443 
12444   ins_pipe(ialu_reg_shift);
12445 %}
12446 
12447 // Bitfield extract with shift & mask
12448 
12449 // This pattern is automatically generated from aarch64_ad.m4
12450 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12451 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12452 %{
12453   match(Set dst (AndI (URShiftI src rshift) mask));
12454   // Make sure we are not going to exceed what ubfxw can do.
12455   predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12456 
12457   ins_cost(INSN_COST);
12458   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
12459   ins_encode %{
12460     int rshift = $rshift$$constant & 31;
12461     intptr_t mask = $mask$$constant;
12462     int width = exact_log2(mask+1);
12463     __ ubfxw(as_Register($dst$$reg),
12464             as_Register($src$$reg), rshift, width);
12465   %}
12466   ins_pipe(ialu_reg_shift);
12467 %}
12468 
12469 // This pattern is automatically generated from aarch64_ad.m4
12470 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12471 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
12472 %{
12473   match(Set dst (AndL (URShiftL src rshift) mask));
12474   // Make sure we are not going to exceed what ubfx can do.
12475   predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
12476 
12477   ins_cost(INSN_COST);
12478   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12479   ins_encode %{
12480     int rshift = $rshift$$constant & 63;
12481     intptr_t mask = $mask$$constant;
12482     int width = exact_log2_long(mask+1);
12483     __ ubfx(as_Register($dst$$reg),
12484             as_Register($src$$reg), rshift, width);
12485   %}
12486   ins_pipe(ialu_reg_shift);
12487 %}
12488 
12489 
12490 // This pattern is automatically generated from aarch64_ad.m4
12491 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12492 
12493 // We can use ubfx when extending an And with a mask when we know mask
12494 // is positive.  We know that because immI_bitmask guarantees it.
12495 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12496 %{
12497   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
12498   // Make sure we are not going to exceed what ubfxw can do.
12499   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12500 
12501   ins_cost(INSN_COST * 2);
12502   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12503   ins_encode %{
12504     int rshift = $rshift$$constant & 31;
12505     intptr_t mask = $mask$$constant;
12506     int width = exact_log2(mask+1);
12507     __ ubfx(as_Register($dst$$reg),
12508             as_Register($src$$reg), rshift, width);
12509   %}
12510   ins_pipe(ialu_reg_shift);
12511 %}
12512 
12513 
12514 // This pattern is automatically generated from aarch64_ad.m4
12515 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12516 
12517 // We can use ubfiz when masking by a positive number and then left shifting the result.
12518 // We know that the mask is positive because immI_bitmask guarantees it.
12519 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12520 %{
12521   match(Set dst (LShiftI (AndI src mask) lshift));
12522   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 31)) <= (31 + 1));
12523 
12524   ins_cost(INSN_COST);
12525   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12526   ins_encode %{
12527     int lshift = $lshift$$constant & 31;
12528     intptr_t mask = $mask$$constant;
12529     int width = exact_log2(mask+1);
12530     __ ubfizw(as_Register($dst$$reg),
12531           as_Register($src$$reg), lshift, width);
12532   %}
12533   ins_pipe(ialu_reg_shift);
12534 %}
12535 
12536 // This pattern is automatically generated from aarch64_ad.m4
12537 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12538 
12539 // We can use ubfiz when masking by a positive number and then left shifting the result.
12540 // We know that the mask is positive because immL_bitmask guarantees it.
12541 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
12542 %{
12543   match(Set dst (LShiftL (AndL src mask) lshift));
12544   predicate((exact_log2_long(n->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12545 
12546   ins_cost(INSN_COST);
12547   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12548   ins_encode %{
12549     int lshift = $lshift$$constant & 63;
12550     intptr_t mask = $mask$$constant;
12551     int width = exact_log2_long(mask+1);
12552     __ ubfiz(as_Register($dst$$reg),
12553           as_Register($src$$reg), lshift, width);
12554   %}
12555   ins_pipe(ialu_reg_shift);
12556 %}
12557 
12558 // This pattern is automatically generated from aarch64_ad.m4
12559 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12560 
12561 // We can use ubfiz when masking by a positive number and then left shifting the result.
12562 // We know that the mask is positive because immI_bitmask guarantees it.
12563 instruct ubfizwIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12564 %{
12565   match(Set dst (ConvI2L (LShiftI (AndI src mask) lshift)));
12566   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= 31);
12567 
12568   ins_cost(INSN_COST);
12569   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12570   ins_encode %{
12571     int lshift = $lshift$$constant & 31;
12572     intptr_t mask = $mask$$constant;
12573     int width = exact_log2(mask+1);
12574     __ ubfizw(as_Register($dst$$reg),
12575           as_Register($src$$reg), lshift, width);
12576   %}
12577   ins_pipe(ialu_reg_shift);
12578 %}
12579 
12580 // This pattern is automatically generated from aarch64_ad.m4
12581 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12582 
12583 // We can use ubfiz when masking by a positive number and then left shifting the result.
12584 // We know that the mask is positive because immL_bitmask guarantees it.
12585 instruct ubfizLConvL2I(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
12586 %{
12587   match(Set dst (ConvL2I (LShiftL (AndL src mask) lshift)));
12588   predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= 31);
12589 
12590   ins_cost(INSN_COST);
12591   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12592   ins_encode %{
12593     int lshift = $lshift$$constant & 63;
12594     intptr_t mask = $mask$$constant;
12595     int width = exact_log2_long(mask+1);
12596     __ ubfiz(as_Register($dst$$reg),
12597           as_Register($src$$reg), lshift, width);
12598   %}
12599   ins_pipe(ialu_reg_shift);
12600 %}
12601 
12602 
12603 // This pattern is automatically generated from aarch64_ad.m4
12604 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12605 
12606 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
12607 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12608 %{
12609   match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
12610   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12611 
12612   ins_cost(INSN_COST);
12613   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12614   ins_encode %{
12615     int lshift = $lshift$$constant & 63;
12616     intptr_t mask = $mask$$constant;
12617     int width = exact_log2(mask+1);
12618     __ ubfiz(as_Register($dst$$reg),
12619              as_Register($src$$reg), lshift, width);
12620   %}
12621   ins_pipe(ialu_reg_shift);
12622 %}
12623 
12624 // This pattern is automatically generated from aarch64_ad.m4
12625 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12626 
12627 // If there is a convert L to I block between and AndL and a LShiftI, we can also match ubfiz
12628 instruct ubfizLConvL2Ix(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
12629 %{
12630   match(Set dst (LShiftI (ConvL2I (AndL src mask)) lshift));
12631   predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 31)) <= 31);
12632 
12633   ins_cost(INSN_COST);
12634   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12635   ins_encode %{
12636     int lshift = $lshift$$constant & 31;
12637     intptr_t mask = $mask$$constant;
12638     int width = exact_log2(mask+1);
12639     __ ubfiz(as_Register($dst$$reg),
12640              as_Register($src$$reg), lshift, width);
12641   %}
12642   ins_pipe(ialu_reg_shift);
12643 %}
12644 
12645 // This pattern is automatically generated from aarch64_ad.m4
12646 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12647 
12648 // Can skip int2long conversions after AND with small bitmask
12649 instruct ubfizIConvI2LAndI(iRegLNoSp dst, iRegI src, immI_bitmask msk)
12650 %{
12651   match(Set dst (ConvI2L (AndI src msk)));
12652   ins_cost(INSN_COST);
12653   format %{ "ubfiz $dst, $src, 0, exact_log2($msk + 1) " %}
12654   ins_encode %{
12655     __ ubfiz(as_Register($dst$$reg), as_Register($src$$reg), 0, exact_log2($msk$$constant + 1));
12656   %}
12657   ins_pipe(ialu_reg_shift);
12658 %}
12659 
12660 
12661 // Rotations 
12662 // This pattern is automatically generated from aarch64_ad.m4
12663 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12664 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12665 %{
12666   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12667   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12668 
12669   ins_cost(INSN_COST);
12670   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12671 
12672   ins_encode %{
12673     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12674             $rshift$$constant & 63);
12675   %}
12676   ins_pipe(ialu_reg_reg_extr);
12677 %}
12678 
12679 
12680 // This pattern is automatically generated from aarch64_ad.m4
12681 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12682 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12683 %{
12684   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12685   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12686 
12687   ins_cost(INSN_COST);
12688   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12689 
12690   ins_encode %{
12691     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12692             $rshift$$constant & 31);
12693   %}
12694   ins_pipe(ialu_reg_reg_extr);
12695 %}
12696 
12697 
12698 // This pattern is automatically generated from aarch64_ad.m4
12699 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12700 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12701 %{
12702   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12703   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12704 
12705   ins_cost(INSN_COST);
12706   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12707 
12708   ins_encode %{
12709     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12710             $rshift$$constant & 63);
12711   %}
12712   ins_pipe(ialu_reg_reg_extr);
12713 %}
12714 
12715 
12716 // This pattern is automatically generated from aarch64_ad.m4
12717 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12718 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12719 %{
12720   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12721   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12722 
12723   ins_cost(INSN_COST);
12724   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12725 
12726   ins_encode %{
12727     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12728             $rshift$$constant & 31);
12729   %}
12730   ins_pipe(ialu_reg_reg_extr);
12731 %}
12732 
12733 
12734 // This pattern is automatically generated from aarch64_ad.m4
12735 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12736 
12737 // rol expander
12738 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12739 %{
12740   effect(DEF dst, USE src, USE shift);
12741 
12742   format %{ "rol    $dst, $src, $shift" %}
12743   ins_cost(INSN_COST * 3);
12744   ins_encode %{
12745     __ subw(rscratch1, zr, as_Register($shift$$reg));
12746     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12747             rscratch1);
12748     %}
12749   ins_pipe(ialu_reg_reg_vshift);
12750 %}
12751 
12752 // This pattern is automatically generated from aarch64_ad.m4
12753 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12754 
12755 // rol expander
12756 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12757 %{
12758   effect(DEF dst, USE src, USE shift);
12759 
12760   format %{ "rol    $dst, $src, $shift" %}
12761   ins_cost(INSN_COST * 3);
12762   ins_encode %{
12763     __ subw(rscratch1, zr, as_Register($shift$$reg));
12764     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12765             rscratch1);
12766     %}
12767   ins_pipe(ialu_reg_reg_vshift);
12768 %}
12769 
12770 // This pattern is automatically generated from aarch64_ad.m4
12771 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12772 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12773 %{
12774   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
12775 
12776   expand %{
12777     rolL_rReg(dst, src, shift, cr);
12778   %}
12779 %}
12780 
12781 // This pattern is automatically generated from aarch64_ad.m4
12782 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12783 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12784 %{
12785   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
12786 
12787   expand %{
12788     rolL_rReg(dst, src, shift, cr);
12789   %}
12790 %}
12791 
12792 // This pattern is automatically generated from aarch64_ad.m4
12793 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12794 instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12795 %{
12796   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
12797 
12798   expand %{
12799     rolI_rReg(dst, src, shift, cr);
12800   %}
12801 %}
12802 
12803 // This pattern is automatically generated from aarch64_ad.m4
12804 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12805 instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12806 %{
12807   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
12808 
12809   expand %{
12810     rolI_rReg(dst, src, shift, cr);
12811   %}
12812 %}
12813 
12814 // This pattern is automatically generated from aarch64_ad.m4
12815 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12816 
12817 // ror expander
12818 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12819 %{
12820   effect(DEF dst, USE src, USE shift);
12821 
12822   format %{ "ror    $dst, $src, $shift" %}
12823   ins_cost(INSN_COST);
12824   ins_encode %{
12825     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12826             as_Register($shift$$reg));
12827     %}
12828   ins_pipe(ialu_reg_reg_vshift);
12829 %}
12830 
12831 // This pattern is automatically generated from aarch64_ad.m4
12832 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12833 
12834 // ror expander
12835 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12836 %{
12837   effect(DEF dst, USE src, USE shift);
12838 
12839   format %{ "ror    $dst, $src, $shift" %}
12840   ins_cost(INSN_COST);
12841   ins_encode %{
12842     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12843             as_Register($shift$$reg));
12844     %}
12845   ins_pipe(ialu_reg_reg_vshift);
12846 %}
12847 
12848 // This pattern is automatically generated from aarch64_ad.m4
12849 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12850 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12851 %{
12852   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
12853 
12854   expand %{
12855     rorL_rReg(dst, src, shift, cr);
12856   %}
12857 %}
12858 
12859 // This pattern is automatically generated from aarch64_ad.m4
12860 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12861 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12862 %{
12863   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
12864 
12865   expand %{
12866     rorL_rReg(dst, src, shift, cr);
12867   %}
12868 %}
12869 
12870 // This pattern is automatically generated from aarch64_ad.m4
12871 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12872 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12873 %{
12874   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
12875 
12876   expand %{
12877     rorI_rReg(dst, src, shift, cr);
12878   %}
12879 %}
12880 
12881 // This pattern is automatically generated from aarch64_ad.m4
12882 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12883 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12884 %{
12885   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
12886 
12887   expand %{
12888     rorI_rReg(dst, src, shift, cr);
12889   %}
12890 %}
12891 
12892 
12893 // Add/subtract (extended)
12894 
12895 // This pattern is automatically generated from aarch64_ad.m4
12896 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12897 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12898 %{
12899   match(Set dst (AddL src1 (ConvI2L src2)));
12900   ins_cost(INSN_COST);
12901   format %{ "add  $dst, $src1, $src2, sxtw" %}
12902 
12903    ins_encode %{
12904      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12905             as_Register($src2$$reg), ext::sxtw);
12906    %}
12907   ins_pipe(ialu_reg_reg);
12908 %}
12909 
12910 // This pattern is automatically generated from aarch64_ad.m4
12911 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12912 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12913 %{
12914   match(Set dst (SubL src1 (ConvI2L src2)));
12915   ins_cost(INSN_COST);
12916   format %{ "sub  $dst, $src1, $src2, sxtw" %}
12917 
12918    ins_encode %{
12919      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12920             as_Register($src2$$reg), ext::sxtw);
12921    %}
12922   ins_pipe(ialu_reg_reg);
12923 %}
12924 
12925 // This pattern is automatically generated from aarch64_ad.m4
12926 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12927 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12928 %{
12929   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12930   ins_cost(INSN_COST);
12931   format %{ "add  $dst, $src1, $src2, sxth" %}
12932 
12933    ins_encode %{
12934      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12935             as_Register($src2$$reg), ext::sxth);
12936    %}
12937   ins_pipe(ialu_reg_reg);
12938 %}
12939 
12940 // This pattern is automatically generated from aarch64_ad.m4
12941 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12942 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12943 %{
12944   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12945   ins_cost(INSN_COST);
12946   format %{ "add  $dst, $src1, $src2, sxtb" %}
12947 
12948    ins_encode %{
12949      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12950             as_Register($src2$$reg), ext::sxtb);
12951    %}
12952   ins_pipe(ialu_reg_reg);
12953 %}
12954 
12955 // This pattern is automatically generated from aarch64_ad.m4
12956 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12957 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12958 %{
12959   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12960   ins_cost(INSN_COST);
12961   format %{ "add  $dst, $src1, $src2, uxtb" %}
12962 
12963    ins_encode %{
12964      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12965             as_Register($src2$$reg), ext::uxtb);
12966    %}
12967   ins_pipe(ialu_reg_reg);
12968 %}
12969 
12970 // This pattern is automatically generated from aarch64_ad.m4
12971 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12972 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12973 %{
12974   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12975   ins_cost(INSN_COST);
12976   format %{ "add  $dst, $src1, $src2, sxth" %}
12977 
12978    ins_encode %{
12979      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12980             as_Register($src2$$reg), ext::sxth);
12981    %}
12982   ins_pipe(ialu_reg_reg);
12983 %}
12984 
12985 // This pattern is automatically generated from aarch64_ad.m4
12986 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12987 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12988 %{
12989   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12990   ins_cost(INSN_COST);
12991   format %{ "add  $dst, $src1, $src2, sxtw" %}
12992 
12993    ins_encode %{
12994      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12995             as_Register($src2$$reg), ext::sxtw);
12996    %}
12997   ins_pipe(ialu_reg_reg);
12998 %}
12999 
13000 // This pattern is automatically generated from aarch64_ad.m4
13001 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13002 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
13003 %{
13004   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
13005   ins_cost(INSN_COST);
13006   format %{ "add  $dst, $src1, $src2, sxtb" %}
13007 
13008    ins_encode %{
13009      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13010             as_Register($src2$$reg), ext::sxtb);
13011    %}
13012   ins_pipe(ialu_reg_reg);
13013 %}
13014 
13015 // This pattern is automatically generated from aarch64_ad.m4
13016 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13017 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
13018 %{
13019   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
13020   ins_cost(INSN_COST);
13021   format %{ "add  $dst, $src1, $src2, uxtb" %}
13022 
13023    ins_encode %{
13024      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13025             as_Register($src2$$reg), ext::uxtb);
13026    %}
13027   ins_pipe(ialu_reg_reg);
13028 %}
13029 
13030 // This pattern is automatically generated from aarch64_ad.m4
13031 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13032 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
13033 %{
13034   match(Set dst (AddI src1 (AndI src2 mask)));
13035   ins_cost(INSN_COST);
13036   format %{ "addw  $dst, $src1, $src2, uxtb" %}
13037 
13038    ins_encode %{
13039      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13040             as_Register($src2$$reg), ext::uxtb);
13041    %}
13042   ins_pipe(ialu_reg_reg);
13043 %}
13044 
13045 // This pattern is automatically generated from aarch64_ad.m4
13046 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13047 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
13048 %{
13049   match(Set dst (AddI src1 (AndI src2 mask)));
13050   ins_cost(INSN_COST);
13051   format %{ "addw  $dst, $src1, $src2, uxth" %}
13052 
13053    ins_encode %{
13054      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13055             as_Register($src2$$reg), ext::uxth);
13056    %}
13057   ins_pipe(ialu_reg_reg);
13058 %}
13059 
13060 // This pattern is automatically generated from aarch64_ad.m4
13061 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13062 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
13063 %{
13064   match(Set dst (AddL src1 (AndL src2 mask)));
13065   ins_cost(INSN_COST);
13066   format %{ "add  $dst, $src1, $src2, uxtb" %}
13067 
13068    ins_encode %{
13069      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13070             as_Register($src2$$reg), ext::uxtb);
13071    %}
13072   ins_pipe(ialu_reg_reg);
13073 %}
13074 
13075 // This pattern is automatically generated from aarch64_ad.m4
13076 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13077 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13078 %{
13079   match(Set dst (AddL src1 (AndL src2 mask)));
13080   ins_cost(INSN_COST);
13081   format %{ "add  $dst, $src1, $src2, uxth" %}
13082 
13083    ins_encode %{
13084      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13085             as_Register($src2$$reg), ext::uxth);
13086    %}
13087   ins_pipe(ialu_reg_reg);
13088 %}
13089 
13090 // This pattern is automatically generated from aarch64_ad.m4
13091 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13092 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13093 %{
13094   match(Set dst (AddL src1 (AndL src2 mask)));
13095   ins_cost(INSN_COST);
13096   format %{ "add  $dst, $src1, $src2, uxtw" %}
13097 
13098    ins_encode %{
13099      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13100             as_Register($src2$$reg), ext::uxtw);
13101    %}
13102   ins_pipe(ialu_reg_reg);
13103 %}
13104 
13105 // This pattern is automatically generated from aarch64_ad.m4
13106 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13107 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
13108 %{
13109   match(Set dst (SubI src1 (AndI src2 mask)));
13110   ins_cost(INSN_COST);
13111   format %{ "subw  $dst, $src1, $src2, uxtb" %}
13112 
13113    ins_encode %{
13114      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13115             as_Register($src2$$reg), ext::uxtb);
13116    %}
13117   ins_pipe(ialu_reg_reg);
13118 %}
13119 
13120 // This pattern is automatically generated from aarch64_ad.m4
13121 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13122 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
13123 %{
13124   match(Set dst (SubI src1 (AndI src2 mask)));
13125   ins_cost(INSN_COST);
13126   format %{ "subw  $dst, $src1, $src2, uxth" %}
13127 
13128    ins_encode %{
13129      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13130             as_Register($src2$$reg), ext::uxth);
13131    %}
13132   ins_pipe(ialu_reg_reg);
13133 %}
13134 
13135 // This pattern is automatically generated from aarch64_ad.m4
13136 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13137 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
13138 %{
13139   match(Set dst (SubL src1 (AndL src2 mask)));
13140   ins_cost(INSN_COST);
13141   format %{ "sub  $dst, $src1, $src2, uxtb" %}
13142 
13143    ins_encode %{
13144      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13145             as_Register($src2$$reg), ext::uxtb);
13146    %}
13147   ins_pipe(ialu_reg_reg);
13148 %}
13149 
13150 // This pattern is automatically generated from aarch64_ad.m4
13151 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13152 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13153 %{
13154   match(Set dst (SubL src1 (AndL src2 mask)));
13155   ins_cost(INSN_COST);
13156   format %{ "sub  $dst, $src1, $src2, uxth" %}
13157 
13158    ins_encode %{
13159      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13160             as_Register($src2$$reg), ext::uxth);
13161    %}
13162   ins_pipe(ialu_reg_reg);
13163 %}
13164 
13165 // This pattern is automatically generated from aarch64_ad.m4
13166 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13167 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13168 %{
13169   match(Set dst (SubL src1 (AndL src2 mask)));
13170   ins_cost(INSN_COST);
13171   format %{ "sub  $dst, $src1, $src2, uxtw" %}
13172 
13173    ins_encode %{
13174      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13175             as_Register($src2$$reg), ext::uxtw);
13176    %}
13177   ins_pipe(ialu_reg_reg);
13178 %}
13179 
13180 
13181 // This pattern is automatically generated from aarch64_ad.m4
13182 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13183 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13184 %{
13185   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13186   ins_cost(1.9 * INSN_COST);
13187   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
13188 
13189    ins_encode %{
13190      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13191             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13192    %}
13193   ins_pipe(ialu_reg_reg_shift);
13194 %}
13195 
13196 // This pattern is automatically generated from aarch64_ad.m4
13197 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13198 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13199 %{
13200   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13201   ins_cost(1.9 * INSN_COST);
13202   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
13203 
13204    ins_encode %{
13205      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13206             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13207    %}
13208   ins_pipe(ialu_reg_reg_shift);
13209 %}
13210 
13211 // This pattern is automatically generated from aarch64_ad.m4
13212 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13213 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13214 %{
13215   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13216   ins_cost(1.9 * INSN_COST);
13217   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
13218 
13219    ins_encode %{
13220      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13221             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13222    %}
13223   ins_pipe(ialu_reg_reg_shift);
13224 %}
13225 
13226 // This pattern is automatically generated from aarch64_ad.m4
13227 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13228 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13229 %{
13230   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13231   ins_cost(1.9 * INSN_COST);
13232   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
13233 
13234    ins_encode %{
13235      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13236             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13237    %}
13238   ins_pipe(ialu_reg_reg_shift);
13239 %}
13240 
13241 // This pattern is automatically generated from aarch64_ad.m4
13242 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13243 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13244 %{
13245   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13246   ins_cost(1.9 * INSN_COST);
13247   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
13248 
13249    ins_encode %{
13250      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13251             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13252    %}
13253   ins_pipe(ialu_reg_reg_shift);
13254 %}
13255 
13256 // This pattern is automatically generated from aarch64_ad.m4
13257 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13258 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13259 %{
13260   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13261   ins_cost(1.9 * INSN_COST);
13262   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
13263 
13264    ins_encode %{
13265      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13266             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13267    %}
13268   ins_pipe(ialu_reg_reg_shift);
13269 %}
13270 
13271 // This pattern is automatically generated from aarch64_ad.m4
13272 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13273 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13274 %{
13275   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13276   ins_cost(1.9 * INSN_COST);
13277   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
13278 
13279    ins_encode %{
13280      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13281             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13282    %}
13283   ins_pipe(ialu_reg_reg_shift);
13284 %}
13285 
13286 // This pattern is automatically generated from aarch64_ad.m4
13287 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13288 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13289 %{
13290   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13291   ins_cost(1.9 * INSN_COST);
13292   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
13293 
13294    ins_encode %{
13295      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13296             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13297    %}
13298   ins_pipe(ialu_reg_reg_shift);
13299 %}
13300 
13301 // This pattern is automatically generated from aarch64_ad.m4
13302 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13303 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13304 %{
13305   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13306   ins_cost(1.9 * INSN_COST);
13307   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
13308 
13309    ins_encode %{
13310      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13311             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13312    %}
13313   ins_pipe(ialu_reg_reg_shift);
13314 %}
13315 
13316 // This pattern is automatically generated from aarch64_ad.m4
13317 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13318 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13319 %{
13320   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13321   ins_cost(1.9 * INSN_COST);
13322   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
13323 
13324    ins_encode %{
13325      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13326             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13327    %}
13328   ins_pipe(ialu_reg_reg_shift);
13329 %}
13330 
13331 // This pattern is automatically generated from aarch64_ad.m4
13332 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13333 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13334 %{
13335   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
13336   ins_cost(1.9 * INSN_COST);
13337   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
13338 
13339    ins_encode %{
13340      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13341             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13342    %}
13343   ins_pipe(ialu_reg_reg_shift);
13344 %}
13345 
13346 // This pattern is automatically generated from aarch64_ad.m4
13347 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13348 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13349 %{
13350   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
13351   ins_cost(1.9 * INSN_COST);
13352   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
13353 
13354    ins_encode %{
13355      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13356             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13357    %}
13358   ins_pipe(ialu_reg_reg_shift);
13359 %}
13360 
13361 // This pattern is automatically generated from aarch64_ad.m4
13362 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13363 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13364 %{
13365   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13366   ins_cost(1.9 * INSN_COST);
13367   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
13368 
13369    ins_encode %{
13370      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13371             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13372    %}
13373   ins_pipe(ialu_reg_reg_shift);
13374 %}
13375 
13376 // This pattern is automatically generated from aarch64_ad.m4
13377 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13378 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13379 %{
13380   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13381   ins_cost(1.9 * INSN_COST);
13382   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
13383 
13384    ins_encode %{
13385      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13386             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13387    %}
13388   ins_pipe(ialu_reg_reg_shift);
13389 %}
13390 
13391 // This pattern is automatically generated from aarch64_ad.m4
13392 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13393 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13394 %{
13395   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13396   ins_cost(1.9 * INSN_COST);
13397   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
13398 
13399    ins_encode %{
13400      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13401             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13402    %}
13403   ins_pipe(ialu_reg_reg_shift);
13404 %}
13405 
13406 // This pattern is automatically generated from aarch64_ad.m4
13407 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13408 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13409 %{
13410   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13411   ins_cost(1.9 * INSN_COST);
13412   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
13413 
13414    ins_encode %{
13415      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13416             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13417    %}
13418   ins_pipe(ialu_reg_reg_shift);
13419 %}
13420 
13421 // This pattern is automatically generated from aarch64_ad.m4
13422 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13423 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13424 %{
13425   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13426   ins_cost(1.9 * INSN_COST);
13427   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
13428 
13429    ins_encode %{
13430      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13431             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13432    %}
13433   ins_pipe(ialu_reg_reg_shift);
13434 %}
13435 
13436 // This pattern is automatically generated from aarch64_ad.m4
13437 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13438 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13439 %{
13440   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13441   ins_cost(1.9 * INSN_COST);
13442   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
13443 
13444    ins_encode %{
13445      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13446             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13447    %}
13448   ins_pipe(ialu_reg_reg_shift);
13449 %}
13450 
13451 // This pattern is automatically generated from aarch64_ad.m4
13452 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13453 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13454 %{
13455   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13456   ins_cost(1.9 * INSN_COST);
13457   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
13458 
13459    ins_encode %{
13460      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13461             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13462    %}
13463   ins_pipe(ialu_reg_reg_shift);
13464 %}
13465 
13466 // This pattern is automatically generated from aarch64_ad.m4
13467 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13468 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13469 %{
13470   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13471   ins_cost(1.9 * INSN_COST);
13472   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
13473 
13474    ins_encode %{
13475      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13476             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13477    %}
13478   ins_pipe(ialu_reg_reg_shift);
13479 %}
13480 
13481 // This pattern is automatically generated from aarch64_ad.m4
13482 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13483 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13484 %{
13485   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13486   ins_cost(1.9 * INSN_COST);
13487   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
13488 
13489    ins_encode %{
13490      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13491             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13492    %}
13493   ins_pipe(ialu_reg_reg_shift);
13494 %}
13495 
13496 // This pattern is automatically generated from aarch64_ad.m4
13497 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13498 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13499 %{
13500   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13501   ins_cost(1.9 * INSN_COST);
13502   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
13503 
13504    ins_encode %{
13505      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13506             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13507    %}
13508   ins_pipe(ialu_reg_reg_shift);
13509 %}
13510 
13511 
13512 
13513 // END This section of the file is automatically generated. Do not edit --------------
13514 
13515 
13516 // ============================================================================
13517 // Floating Point Arithmetic Instructions
13518 
13519 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13520   match(Set dst (AddF src1 src2));
13521 
13522   ins_cost(INSN_COST * 5);
13523   format %{ "fadds   $dst, $src1, $src2" %}
13524 
13525   ins_encode %{
13526     __ fadds(as_FloatRegister($dst$$reg),
13527              as_FloatRegister($src1$$reg),
13528              as_FloatRegister($src2$$reg));
13529   %}
13530 
13531   ins_pipe(fp_dop_reg_reg_s);
13532 %}
13533 
13534 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13535   match(Set dst (AddD src1 src2));
13536 
13537   ins_cost(INSN_COST * 5);
13538   format %{ "faddd   $dst, $src1, $src2" %}
13539 
13540   ins_encode %{
13541     __ faddd(as_FloatRegister($dst$$reg),
13542              as_FloatRegister($src1$$reg),
13543              as_FloatRegister($src2$$reg));
13544   %}
13545 
13546   ins_pipe(fp_dop_reg_reg_d);
13547 %}
13548 
13549 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13550   match(Set dst (SubF src1 src2));
13551 
13552   ins_cost(INSN_COST * 5);
13553   format %{ "fsubs   $dst, $src1, $src2" %}
13554 
13555   ins_encode %{
13556     __ fsubs(as_FloatRegister($dst$$reg),
13557              as_FloatRegister($src1$$reg),
13558              as_FloatRegister($src2$$reg));
13559   %}
13560 
13561   ins_pipe(fp_dop_reg_reg_s);
13562 %}
13563 
13564 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13565   match(Set dst (SubD src1 src2));
13566 
13567   ins_cost(INSN_COST * 5);
13568   format %{ "fsubd   $dst, $src1, $src2" %}
13569 
13570   ins_encode %{
13571     __ fsubd(as_FloatRegister($dst$$reg),
13572              as_FloatRegister($src1$$reg),
13573              as_FloatRegister($src2$$reg));
13574   %}
13575 
13576   ins_pipe(fp_dop_reg_reg_d);
13577 %}
13578 
13579 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13580   match(Set dst (MulF src1 src2));
13581 
13582   ins_cost(INSN_COST * 6);
13583   format %{ "fmuls   $dst, $src1, $src2" %}
13584 
13585   ins_encode %{
13586     __ fmuls(as_FloatRegister($dst$$reg),
13587              as_FloatRegister($src1$$reg),
13588              as_FloatRegister($src2$$reg));
13589   %}
13590 
13591   ins_pipe(fp_dop_reg_reg_s);
13592 %}
13593 
13594 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13595   match(Set dst (MulD src1 src2));
13596 
13597   ins_cost(INSN_COST * 6);
13598   format %{ "fmuld   $dst, $src1, $src2" %}
13599 
13600   ins_encode %{
13601     __ fmuld(as_FloatRegister($dst$$reg),
13602              as_FloatRegister($src1$$reg),
13603              as_FloatRegister($src2$$reg));
13604   %}
13605 
13606   ins_pipe(fp_dop_reg_reg_d);
13607 %}
13608 
13609 // src1 * src2 + src3
13610 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13611   predicate(UseFMA);
13612   match(Set dst (FmaF src3 (Binary src1 src2)));
13613 
13614   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
13615 
13616   ins_encode %{
13617     __ fmadds(as_FloatRegister($dst$$reg),
13618              as_FloatRegister($src1$$reg),
13619              as_FloatRegister($src2$$reg),
13620              as_FloatRegister($src3$$reg));
13621   %}
13622 
13623   ins_pipe(pipe_class_default);
13624 %}
13625 
13626 // src1 * src2 + src3
13627 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13628   predicate(UseFMA);
13629   match(Set dst (FmaD src3 (Binary src1 src2)));
13630 
13631   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
13632 
13633   ins_encode %{
13634     __ fmaddd(as_FloatRegister($dst$$reg),
13635              as_FloatRegister($src1$$reg),
13636              as_FloatRegister($src2$$reg),
13637              as_FloatRegister($src3$$reg));
13638   %}
13639 
13640   ins_pipe(pipe_class_default);
13641 %}
13642 
13643 // -src1 * src2 + src3
13644 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13645   predicate(UseFMA);
13646   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
13647   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
13648 
13649   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
13650 
13651   ins_encode %{
13652     __ fmsubs(as_FloatRegister($dst$$reg),
13653               as_FloatRegister($src1$$reg),
13654               as_FloatRegister($src2$$reg),
13655               as_FloatRegister($src3$$reg));
13656   %}
13657 
13658   ins_pipe(pipe_class_default);
13659 %}
13660 
13661 // -src1 * src2 + src3
13662 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13663   predicate(UseFMA);
13664   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
13665   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
13666 
13667   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
13668 
13669   ins_encode %{
13670     __ fmsubd(as_FloatRegister($dst$$reg),
13671               as_FloatRegister($src1$$reg),
13672               as_FloatRegister($src2$$reg),
13673               as_FloatRegister($src3$$reg));
13674   %}
13675 
13676   ins_pipe(pipe_class_default);
13677 %}
13678 
13679 // -src1 * src2 - src3
13680 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13681   predicate(UseFMA);
13682   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
13683   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
13684 
13685   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
13686 
13687   ins_encode %{
13688     __ fnmadds(as_FloatRegister($dst$$reg),
13689                as_FloatRegister($src1$$reg),
13690                as_FloatRegister($src2$$reg),
13691                as_FloatRegister($src3$$reg));
13692   %}
13693 
13694   ins_pipe(pipe_class_default);
13695 %}
13696 
13697 // -src1 * src2 - src3
13698 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13699   predicate(UseFMA);
13700   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
13701   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
13702 
13703   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
13704 
13705   ins_encode %{
13706     __ fnmaddd(as_FloatRegister($dst$$reg),
13707                as_FloatRegister($src1$$reg),
13708                as_FloatRegister($src2$$reg),
13709                as_FloatRegister($src3$$reg));
13710   %}
13711 
13712   ins_pipe(pipe_class_default);
13713 %}
13714 
13715 // src1 * src2 - src3
13716 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
13717   predicate(UseFMA);
13718   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
13719 
13720   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
13721 
13722   ins_encode %{
13723     __ fnmsubs(as_FloatRegister($dst$$reg),
13724                as_FloatRegister($src1$$reg),
13725                as_FloatRegister($src2$$reg),
13726                as_FloatRegister($src3$$reg));
13727   %}
13728 
13729   ins_pipe(pipe_class_default);
13730 %}
13731 
13732 // src1 * src2 - src3
13733 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
13734   predicate(UseFMA);
13735   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
13736 
13737   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
13738 
13739   ins_encode %{
13740   // n.b. insn name should be fnmsubd
13741     __ fnmsub(as_FloatRegister($dst$$reg),
13742               as_FloatRegister($src1$$reg),
13743               as_FloatRegister($src2$$reg),
13744               as_FloatRegister($src3$$reg));
13745   %}
13746 
13747   ins_pipe(pipe_class_default);
13748 %}
13749 
13750 
13751 // Math.max(FF)F
13752 instruct maxF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13753   match(Set dst (MaxF src1 src2));
13754 
13755   format %{ "fmaxs   $dst, $src1, $src2" %}
13756   ins_encode %{
13757     __ fmaxs(as_FloatRegister($dst$$reg),
13758              as_FloatRegister($src1$$reg),
13759              as_FloatRegister($src2$$reg));
13760   %}
13761 
13762   ins_pipe(fp_dop_reg_reg_s);
13763 %}
13764 
13765 // Math.min(FF)F
13766 instruct minF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13767   match(Set dst (MinF src1 src2));
13768 
13769   format %{ "fmins   $dst, $src1, $src2" %}
13770   ins_encode %{
13771     __ fmins(as_FloatRegister($dst$$reg),
13772              as_FloatRegister($src1$$reg),
13773              as_FloatRegister($src2$$reg));
13774   %}
13775 
13776   ins_pipe(fp_dop_reg_reg_s);
13777 %}
13778 
13779 // Math.max(DD)D
13780 instruct maxD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13781   match(Set dst (MaxD src1 src2));
13782 
13783   format %{ "fmaxd   $dst, $src1, $src2" %}
13784   ins_encode %{
13785     __ fmaxd(as_FloatRegister($dst$$reg),
13786              as_FloatRegister($src1$$reg),
13787              as_FloatRegister($src2$$reg));
13788   %}
13789 
13790   ins_pipe(fp_dop_reg_reg_d);
13791 %}
13792 
13793 // Math.min(DD)D
13794 instruct minD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13795   match(Set dst (MinD src1 src2));
13796 
13797   format %{ "fmind   $dst, $src1, $src2" %}
13798   ins_encode %{
13799     __ fmind(as_FloatRegister($dst$$reg),
13800              as_FloatRegister($src1$$reg),
13801              as_FloatRegister($src2$$reg));
13802   %}
13803 
13804   ins_pipe(fp_dop_reg_reg_d);
13805 %}
13806 
13807 
13808 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13809   match(Set dst (DivF src1  src2));
13810 
13811   ins_cost(INSN_COST * 18);
13812   format %{ "fdivs   $dst, $src1, $src2" %}
13813 
13814   ins_encode %{
13815     __ fdivs(as_FloatRegister($dst$$reg),
13816              as_FloatRegister($src1$$reg),
13817              as_FloatRegister($src2$$reg));
13818   %}
13819 
13820   ins_pipe(fp_div_s);
13821 %}
13822 
13823 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13824   match(Set dst (DivD src1  src2));
13825 
13826   ins_cost(INSN_COST * 32);
13827   format %{ "fdivd   $dst, $src1, $src2" %}
13828 
13829   ins_encode %{
13830     __ fdivd(as_FloatRegister($dst$$reg),
13831              as_FloatRegister($src1$$reg),
13832              as_FloatRegister($src2$$reg));
13833   %}
13834 
13835   ins_pipe(fp_div_d);
13836 %}
13837 
13838 instruct negF_reg_reg(vRegF dst, vRegF src) %{
13839   match(Set dst (NegF src));
13840 
13841   ins_cost(INSN_COST * 3);
13842   format %{ "fneg   $dst, $src" %}
13843 
13844   ins_encode %{
13845     __ fnegs(as_FloatRegister($dst$$reg),
13846              as_FloatRegister($src$$reg));
13847   %}
13848 
13849   ins_pipe(fp_uop_s);
13850 %}
13851 
13852 instruct negD_reg_reg(vRegD dst, vRegD src) %{
13853   match(Set dst (NegD src));
13854 
13855   ins_cost(INSN_COST * 3);
13856   format %{ "fnegd   $dst, $src" %}
13857 
13858   ins_encode %{
13859     __ fnegd(as_FloatRegister($dst$$reg),
13860              as_FloatRegister($src$$reg));
13861   %}
13862 
13863   ins_pipe(fp_uop_d);
13864 %}
13865 
13866 instruct absI_reg(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
13867 %{
13868   match(Set dst (AbsI src));
13869 
13870   effect(KILL cr);
13871   ins_cost(INSN_COST * 2);
13872   format %{ "cmpw  $src, zr\n\t"
13873             "cnegw $dst, $src, Assembler::LT\t# int abs"
13874   %}
13875 
13876   ins_encode %{
13877     __ cmpw(as_Register($src$$reg), zr);
13878     __ cnegw(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
13879   %}
13880   ins_pipe(pipe_class_default);
13881 %}
13882 
13883 instruct absL_reg(iRegLNoSp dst, iRegL src, rFlagsReg cr)
13884 %{
13885   match(Set dst (AbsL src));
13886 
13887   effect(KILL cr);
13888   ins_cost(INSN_COST * 2);
13889   format %{ "cmp  $src, zr\n\t"
13890             "cneg $dst, $src, Assembler::LT\t# long abs"
13891   %}
13892 
13893   ins_encode %{
13894     __ cmp(as_Register($src$$reg), zr);
13895     __ cneg(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
13896   %}
13897   ins_pipe(pipe_class_default);
13898 %}
13899 
13900 instruct absF_reg(vRegF dst, vRegF src) %{
13901   match(Set dst (AbsF src));
13902 
13903   ins_cost(INSN_COST * 3);
13904   format %{ "fabss   $dst, $src" %}
13905   ins_encode %{
13906     __ fabss(as_FloatRegister($dst$$reg),
13907              as_FloatRegister($src$$reg));
13908   %}
13909 
13910   ins_pipe(fp_uop_s);
13911 %}
13912 
13913 instruct absD_reg(vRegD dst, vRegD src) %{
13914   match(Set dst (AbsD src));
13915 
13916   ins_cost(INSN_COST * 3);
13917   format %{ "fabsd   $dst, $src" %}
13918   ins_encode %{
13919     __ fabsd(as_FloatRegister($dst$$reg),
13920              as_FloatRegister($src$$reg));
13921   %}
13922 
13923   ins_pipe(fp_uop_d);
13924 %}
13925 
13926 instruct sqrtD_reg(vRegD dst, vRegD src) %{
13927   match(Set dst (SqrtD src));
13928 
13929   ins_cost(INSN_COST * 50);
13930   format %{ "fsqrtd  $dst, $src" %}
13931   ins_encode %{
13932     __ fsqrtd(as_FloatRegister($dst$$reg),
13933              as_FloatRegister($src$$reg));
13934   %}
13935 
13936   ins_pipe(fp_div_s);
13937 %}
13938 
13939 instruct sqrtF_reg(vRegF dst, vRegF src) %{
13940   match(Set dst (SqrtF src));
13941 
13942   ins_cost(INSN_COST * 50);
13943   format %{ "fsqrts  $dst, $src" %}
13944   ins_encode %{
13945     __ fsqrts(as_FloatRegister($dst$$reg),
13946              as_FloatRegister($src$$reg));
13947   %}
13948 
13949   ins_pipe(fp_div_d);
13950 %}
13951 
13952 // Math.rint, floor, ceil
13953 instruct roundD_reg(vRegD dst, vRegD src, immI rmode) %{
13954   match(Set dst (RoundDoubleMode src rmode));
13955   format %{ "frint  $dst, $src, $rmode" %}
13956   ins_encode %{
13957     switch ($rmode$$constant) {
13958       case RoundDoubleModeNode::rmode_rint:
13959         __ frintnd(as_FloatRegister($dst$$reg),
13960                    as_FloatRegister($src$$reg));
13961         break;
13962       case RoundDoubleModeNode::rmode_floor:
13963         __ frintmd(as_FloatRegister($dst$$reg),
13964                    as_FloatRegister($src$$reg));
13965         break;
13966       case RoundDoubleModeNode::rmode_ceil:
13967         __ frintpd(as_FloatRegister($dst$$reg),
13968                    as_FloatRegister($src$$reg));
13969         break;
13970     }
13971   %}
13972   ins_pipe(fp_uop_d);
13973 %}
13974 
13975 // ============================================================================
13976 // Logical Instructions
13977 
13978 // Integer Logical Instructions
13979 
13980 // And Instructions
13981 
13982 
13983 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
13984   match(Set dst (AndI src1 src2));
13985 
13986   format %{ "andw  $dst, $src1, $src2\t# int" %}
13987 
13988   ins_cost(INSN_COST);
13989   ins_encode %{
13990     __ andw(as_Register($dst$$reg),
13991             as_Register($src1$$reg),
13992             as_Register($src2$$reg));
13993   %}
13994 
13995   ins_pipe(ialu_reg_reg);
13996 %}
13997 
13998 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
13999   match(Set dst (AndI src1 src2));
14000 
14001   format %{ "andsw  $dst, $src1, $src2\t# int" %}
14002 
14003   ins_cost(INSN_COST);
14004   ins_encode %{
14005     __ andw(as_Register($dst$$reg),
14006             as_Register($src1$$reg),
14007             (uint64_t)($src2$$constant));
14008   %}
14009 
14010   ins_pipe(ialu_reg_imm);
14011 %}
14012 
14013 // Or Instructions
14014 
14015 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14016   match(Set dst (OrI src1 src2));
14017 
14018   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14019 
14020   ins_cost(INSN_COST);
14021   ins_encode %{
14022     __ orrw(as_Register($dst$$reg),
14023             as_Register($src1$$reg),
14024             as_Register($src2$$reg));
14025   %}
14026 
14027   ins_pipe(ialu_reg_reg);
14028 %}
14029 
14030 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14031   match(Set dst (OrI src1 src2));
14032 
14033   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14034 
14035   ins_cost(INSN_COST);
14036   ins_encode %{
14037     __ orrw(as_Register($dst$$reg),
14038             as_Register($src1$$reg),
14039             (uint64_t)($src2$$constant));
14040   %}
14041 
14042   ins_pipe(ialu_reg_imm);
14043 %}
14044 
14045 // Xor Instructions
14046 
14047 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14048   match(Set dst (XorI src1 src2));
14049 
14050   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14051 
14052   ins_cost(INSN_COST);
14053   ins_encode %{
14054     __ eorw(as_Register($dst$$reg),
14055             as_Register($src1$$reg),
14056             as_Register($src2$$reg));
14057   %}
14058 
14059   ins_pipe(ialu_reg_reg);
14060 %}
14061 
14062 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14063   match(Set dst (XorI src1 src2));
14064 
14065   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14066 
14067   ins_cost(INSN_COST);
14068   ins_encode %{
14069     __ eorw(as_Register($dst$$reg),
14070             as_Register($src1$$reg),
14071             (uint64_t)($src2$$constant));
14072   %}
14073 
14074   ins_pipe(ialu_reg_imm);
14075 %}
14076 
14077 // Long Logical Instructions
14078 // TODO
14079 
14080 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
14081   match(Set dst (AndL src1 src2));
14082 
14083   format %{ "and  $dst, $src1, $src2\t# int" %}
14084 
14085   ins_cost(INSN_COST);
14086   ins_encode %{
14087     __ andr(as_Register($dst$$reg),
14088             as_Register($src1$$reg),
14089             as_Register($src2$$reg));
14090   %}
14091 
14092   ins_pipe(ialu_reg_reg);
14093 %}
14094 
14095 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
14096   match(Set dst (AndL src1 src2));
14097 
14098   format %{ "and  $dst, $src1, $src2\t# int" %}
14099 
14100   ins_cost(INSN_COST);
14101   ins_encode %{
14102     __ andr(as_Register($dst$$reg),
14103             as_Register($src1$$reg),
14104             (uint64_t)($src2$$constant));
14105   %}
14106 
14107   ins_pipe(ialu_reg_imm);
14108 %}
14109 
14110 // Or Instructions
14111 
14112 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14113   match(Set dst (OrL src1 src2));
14114 
14115   format %{ "orr  $dst, $src1, $src2\t# int" %}
14116 
14117   ins_cost(INSN_COST);
14118   ins_encode %{
14119     __ orr(as_Register($dst$$reg),
14120            as_Register($src1$$reg),
14121            as_Register($src2$$reg));
14122   %}
14123 
14124   ins_pipe(ialu_reg_reg);
14125 %}
14126 
14127 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14128   match(Set dst (OrL src1 src2));
14129 
14130   format %{ "orr  $dst, $src1, $src2\t# int" %}
14131 
14132   ins_cost(INSN_COST);
14133   ins_encode %{
14134     __ orr(as_Register($dst$$reg),
14135            as_Register($src1$$reg),
14136            (uint64_t)($src2$$constant));
14137   %}
14138 
14139   ins_pipe(ialu_reg_imm);
14140 %}
14141 
14142 // Xor Instructions
14143 
14144 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14145   match(Set dst (XorL src1 src2));
14146 
14147   format %{ "eor  $dst, $src1, $src2\t# int" %}
14148 
14149   ins_cost(INSN_COST);
14150   ins_encode %{
14151     __ eor(as_Register($dst$$reg),
14152            as_Register($src1$$reg),
14153            as_Register($src2$$reg));
14154   %}
14155 
14156   ins_pipe(ialu_reg_reg);
14157 %}
14158 
14159 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14160   match(Set dst (XorL src1 src2));
14161 
14162   ins_cost(INSN_COST);
14163   format %{ "eor  $dst, $src1, $src2\t# int" %}
14164 
14165   ins_encode %{
14166     __ eor(as_Register($dst$$reg),
14167            as_Register($src1$$reg),
14168            (uint64_t)($src2$$constant));
14169   %}
14170 
14171   ins_pipe(ialu_reg_imm);
14172 %}
14173 
14174 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
14175 %{
14176   match(Set dst (ConvI2L src));
14177 
14178   ins_cost(INSN_COST);
14179   format %{ "sxtw  $dst, $src\t# i2l" %}
14180   ins_encode %{
14181     __ sbfm($dst$$Register, $src$$Register, 0, 31);
14182   %}
14183   ins_pipe(ialu_reg_shift);
14184 %}
14185 
14186 // this pattern occurs in bigmath arithmetic
14187 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
14188 %{
14189   match(Set dst (AndL (ConvI2L src) mask));
14190 
14191   ins_cost(INSN_COST);
14192   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
14193   ins_encode %{
14194     __ ubfm($dst$$Register, $src$$Register, 0, 31);
14195   %}
14196 
14197   ins_pipe(ialu_reg_shift);
14198 %}
14199 
14200 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
14201   match(Set dst (ConvL2I src));
14202 
14203   ins_cost(INSN_COST);
14204   format %{ "movw  $dst, $src \t// l2i" %}
14205 
14206   ins_encode %{
14207     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
14208   %}
14209 
14210   ins_pipe(ialu_reg);
14211 %}
14212 
14213 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
14214 %{
14215   match(Set dst (Conv2B src));
14216   effect(KILL cr);
14217 
14218   format %{
14219     "cmpw $src, zr\n\t"
14220     "cset $dst, ne"
14221   %}
14222 
14223   ins_encode %{
14224     __ cmpw(as_Register($src$$reg), zr);
14225     __ cset(as_Register($dst$$reg), Assembler::NE);
14226   %}
14227 
14228   ins_pipe(ialu_reg);
14229 %}
14230 
14231 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
14232 %{
14233   match(Set dst (Conv2B src));
14234   effect(KILL cr);
14235 
14236   format %{
14237     "cmp  $src, zr\n\t"
14238     "cset $dst, ne"
14239   %}
14240 
14241   ins_encode %{
14242     __ cmp(as_Register($src$$reg), zr);
14243     __ cset(as_Register($dst$$reg), Assembler::NE);
14244   %}
14245 
14246   ins_pipe(ialu_reg);
14247 %}
14248 
14249 instruct convD2F_reg(vRegF dst, vRegD src) %{
14250   match(Set dst (ConvD2F src));
14251 
14252   ins_cost(INSN_COST * 5);
14253   format %{ "fcvtd  $dst, $src \t// d2f" %}
14254 
14255   ins_encode %{
14256     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14257   %}
14258 
14259   ins_pipe(fp_d2f);
14260 %}
14261 
14262 instruct convF2D_reg(vRegD dst, vRegF src) %{
14263   match(Set dst (ConvF2D src));
14264 
14265   ins_cost(INSN_COST * 5);
14266   format %{ "fcvts  $dst, $src \t// f2d" %}
14267 
14268   ins_encode %{
14269     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14270   %}
14271 
14272   ins_pipe(fp_f2d);
14273 %}
14274 
14275 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14276   match(Set dst (ConvF2I src));
14277 
14278   ins_cost(INSN_COST * 5);
14279   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
14280 
14281   ins_encode %{
14282     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14283   %}
14284 
14285   ins_pipe(fp_f2i);
14286 %}
14287 
14288 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
14289   match(Set dst (ConvF2L src));
14290 
14291   ins_cost(INSN_COST * 5);
14292   format %{ "fcvtzs  $dst, $src \t// f2l" %}
14293 
14294   ins_encode %{
14295     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14296   %}
14297 
14298   ins_pipe(fp_f2l);
14299 %}
14300 
14301 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
14302   match(Set dst (ConvI2F src));
14303 
14304   ins_cost(INSN_COST * 5);
14305   format %{ "scvtfws  $dst, $src \t// i2f" %}
14306 
14307   ins_encode %{
14308     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14309   %}
14310 
14311   ins_pipe(fp_i2f);
14312 %}
14313 
14314 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
14315   match(Set dst (ConvL2F src));
14316 
14317   ins_cost(INSN_COST * 5);
14318   format %{ "scvtfs  $dst, $src \t// l2f" %}
14319 
14320   ins_encode %{
14321     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14322   %}
14323 
14324   ins_pipe(fp_l2f);
14325 %}
14326 
14327 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
14328   match(Set dst (ConvD2I src));
14329 
14330   ins_cost(INSN_COST * 5);
14331   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
14332 
14333   ins_encode %{
14334     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14335   %}
14336 
14337   ins_pipe(fp_d2i);
14338 %}
14339 
14340 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14341   match(Set dst (ConvD2L src));
14342 
14343   ins_cost(INSN_COST * 5);
14344   format %{ "fcvtzd  $dst, $src \t// d2l" %}
14345 
14346   ins_encode %{
14347     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14348   %}
14349 
14350   ins_pipe(fp_d2l);
14351 %}
14352 
14353 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
14354   match(Set dst (ConvI2D src));
14355 
14356   ins_cost(INSN_COST * 5);
14357   format %{ "scvtfwd  $dst, $src \t// i2d" %}
14358 
14359   ins_encode %{
14360     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14361   %}
14362 
14363   ins_pipe(fp_i2d);
14364 %}
14365 
14366 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
14367   match(Set dst (ConvL2D src));
14368 
14369   ins_cost(INSN_COST * 5);
14370   format %{ "scvtfd  $dst, $src \t// l2d" %}
14371 
14372   ins_encode %{
14373     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14374   %}
14375 
14376   ins_pipe(fp_l2d);
14377 %}
14378 
14379 // stack <-> reg and reg <-> reg shuffles with no conversion
14380 
14381 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
14382 
14383   match(Set dst (MoveF2I src));
14384 
14385   effect(DEF dst, USE src);
14386 
14387   ins_cost(4 * INSN_COST);
14388 
14389   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
14390 
14391   ins_encode %{
14392     __ ldrw($dst$$Register, Address(sp, $src$$disp));
14393   %}
14394 
14395   ins_pipe(iload_reg_reg);
14396 
14397 %}
14398 
14399 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
14400 
14401   match(Set dst (MoveI2F src));
14402 
14403   effect(DEF dst, USE src);
14404 
14405   ins_cost(4 * INSN_COST);
14406 
14407   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
14408 
14409   ins_encode %{
14410     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14411   %}
14412 
14413   ins_pipe(pipe_class_memory);
14414 
14415 %}
14416 
14417 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
14418 
14419   match(Set dst (MoveD2L src));
14420 
14421   effect(DEF dst, USE src);
14422 
14423   ins_cost(4 * INSN_COST);
14424 
14425   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
14426 
14427   ins_encode %{
14428     __ ldr($dst$$Register, Address(sp, $src$$disp));
14429   %}
14430 
14431   ins_pipe(iload_reg_reg);
14432 
14433 %}
14434 
14435 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
14436 
14437   match(Set dst (MoveL2D src));
14438 
14439   effect(DEF dst, USE src);
14440 
14441   ins_cost(4 * INSN_COST);
14442 
14443   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
14444 
14445   ins_encode %{
14446     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14447   %}
14448 
14449   ins_pipe(pipe_class_memory);
14450 
14451 %}
14452 
14453 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
14454 
14455   match(Set dst (MoveF2I src));
14456 
14457   effect(DEF dst, USE src);
14458 
14459   ins_cost(INSN_COST);
14460 
14461   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
14462 
14463   ins_encode %{
14464     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14465   %}
14466 
14467   ins_pipe(pipe_class_memory);
14468 
14469 %}
14470 
14471 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
14472 
14473   match(Set dst (MoveI2F src));
14474 
14475   effect(DEF dst, USE src);
14476 
14477   ins_cost(INSN_COST);
14478 
14479   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
14480 
14481   ins_encode %{
14482     __ strw($src$$Register, Address(sp, $dst$$disp));
14483   %}
14484 
14485   ins_pipe(istore_reg_reg);
14486 
14487 %}
14488 
14489 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
14490 
14491   match(Set dst (MoveD2L src));
14492 
14493   effect(DEF dst, USE src);
14494 
14495   ins_cost(INSN_COST);
14496 
14497   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
14498 
14499   ins_encode %{
14500     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14501   %}
14502 
14503   ins_pipe(pipe_class_memory);
14504 
14505 %}
14506 
14507 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
14508 
14509   match(Set dst (MoveL2D src));
14510 
14511   effect(DEF dst, USE src);
14512 
14513   ins_cost(INSN_COST);
14514 
14515   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
14516 
14517   ins_encode %{
14518     __ str($src$$Register, Address(sp, $dst$$disp));
14519   %}
14520 
14521   ins_pipe(istore_reg_reg);
14522 
14523 %}
14524 
14525 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14526 
14527   match(Set dst (MoveF2I src));
14528 
14529   effect(DEF dst, USE src);
14530 
14531   ins_cost(INSN_COST);
14532 
14533   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
14534 
14535   ins_encode %{
14536     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
14537   %}
14538 
14539   ins_pipe(fp_f2i);
14540 
14541 %}
14542 
14543 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
14544 
14545   match(Set dst (MoveI2F src));
14546 
14547   effect(DEF dst, USE src);
14548 
14549   ins_cost(INSN_COST);
14550 
14551   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
14552 
14553   ins_encode %{
14554     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
14555   %}
14556 
14557   ins_pipe(fp_i2f);
14558 
14559 %}
14560 
14561 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14562 
14563   match(Set dst (MoveD2L src));
14564 
14565   effect(DEF dst, USE src);
14566 
14567   ins_cost(INSN_COST);
14568 
14569   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
14570 
14571   ins_encode %{
14572     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
14573   %}
14574 
14575   ins_pipe(fp_d2l);
14576 
14577 %}
14578 
14579 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
14580 
14581   match(Set dst (MoveL2D src));
14582 
14583   effect(DEF dst, USE src);
14584 
14585   ins_cost(INSN_COST);
14586 
14587   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
14588 
14589   ins_encode %{
14590     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
14591   %}
14592 
14593   ins_pipe(fp_l2d);
14594 
14595 %}
14596 
14597 // ============================================================================
14598 // clearing of an array
14599 
14600 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
14601 %{
14602   match(Set dummy (ClearArray cnt base));
14603   effect(USE_KILL cnt, USE_KILL base, KILL cr);
14604 
14605   ins_cost(4 * INSN_COST);
14606   format %{ "ClearArray $cnt, $base" %}
14607 
14608   ins_encode %{
14609     __ zero_words($base$$Register, $cnt$$Register);
14610   %}
14611 
14612   ins_pipe(pipe_class_memory);
14613 %}
14614 
14615 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
14616 %{
14617   predicate((uint64_t)n->in(2)->get_long()
14618             < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
14619   match(Set dummy (ClearArray cnt base));
14620   effect(USE_KILL base);
14621 
14622   ins_cost(4 * INSN_COST);
14623   format %{ "ClearArray $cnt, $base" %}
14624 
14625   ins_encode %{
14626     __ zero_words($base$$Register, (uint64_t)$cnt$$constant);
14627   %}
14628 
14629   ins_pipe(pipe_class_memory);
14630 %}
14631 
14632 // ============================================================================
14633 // Overflow Math Instructions
14634 
14635 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14636 %{
14637   match(Set cr (OverflowAddI op1 op2));
14638 
14639   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
14640   ins_cost(INSN_COST);
14641   ins_encode %{
14642     __ cmnw($op1$$Register, $op2$$Register);
14643   %}
14644 
14645   ins_pipe(icmp_reg_reg);
14646 %}
14647 
14648 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
14649 %{
14650   match(Set cr (OverflowAddI op1 op2));
14651 
14652   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
14653   ins_cost(INSN_COST);
14654   ins_encode %{
14655     __ cmnw($op1$$Register, $op2$$constant);
14656   %}
14657 
14658   ins_pipe(icmp_reg_imm);
14659 %}
14660 
14661 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14662 %{
14663   match(Set cr (OverflowAddL op1 op2));
14664 
14665   format %{ "cmn   $op1, $op2\t# overflow check long" %}
14666   ins_cost(INSN_COST);
14667   ins_encode %{
14668     __ cmn($op1$$Register, $op2$$Register);
14669   %}
14670 
14671   ins_pipe(icmp_reg_reg);
14672 %}
14673 
14674 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
14675 %{
14676   match(Set cr (OverflowAddL op1 op2));
14677 
14678   format %{ "cmn   $op1, $op2\t# overflow check long" %}
14679   ins_cost(INSN_COST);
14680   ins_encode %{
14681     __ cmn($op1$$Register, $op2$$constant);
14682   %}
14683 
14684   ins_pipe(icmp_reg_imm);
14685 %}
14686 
14687 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14688 %{
14689   match(Set cr (OverflowSubI op1 op2));
14690 
14691   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
14692   ins_cost(INSN_COST);
14693   ins_encode %{
14694     __ cmpw($op1$$Register, $op2$$Register);
14695   %}
14696 
14697   ins_pipe(icmp_reg_reg);
14698 %}
14699 
14700 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
14701 %{
14702   match(Set cr (OverflowSubI op1 op2));
14703 
14704   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
14705   ins_cost(INSN_COST);
14706   ins_encode %{
14707     __ cmpw($op1$$Register, $op2$$constant);
14708   %}
14709 
14710   ins_pipe(icmp_reg_imm);
14711 %}
14712 
14713 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14714 %{
14715   match(Set cr (OverflowSubL op1 op2));
14716 
14717   format %{ "cmp   $op1, $op2\t# overflow check long" %}
14718   ins_cost(INSN_COST);
14719   ins_encode %{
14720     __ cmp($op1$$Register, $op2$$Register);
14721   %}
14722 
14723   ins_pipe(icmp_reg_reg);
14724 %}
14725 
14726 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
14727 %{
14728   match(Set cr (OverflowSubL op1 op2));
14729 
14730   format %{ "cmp   $op1, $op2\t# overflow check long" %}
14731   ins_cost(INSN_COST);
14732   ins_encode %{
14733     __ subs(zr, $op1$$Register, $op2$$constant);
14734   %}
14735 
14736   ins_pipe(icmp_reg_imm);
14737 %}
14738 
14739 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
14740 %{
14741   match(Set cr (OverflowSubI zero op1));
14742 
14743   format %{ "cmpw  zr, $op1\t# overflow check int" %}
14744   ins_cost(INSN_COST);
14745   ins_encode %{
14746     __ cmpw(zr, $op1$$Register);
14747   %}
14748 
14749   ins_pipe(icmp_reg_imm);
14750 %}
14751 
14752 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
14753 %{
14754   match(Set cr (OverflowSubL zero op1));
14755 
14756   format %{ "cmp   zr, $op1\t# overflow check long" %}
14757   ins_cost(INSN_COST);
14758   ins_encode %{
14759     __ cmp(zr, $op1$$Register);
14760   %}
14761 
14762   ins_pipe(icmp_reg_imm);
14763 %}
14764 
14765 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14766 %{
14767   match(Set cr (OverflowMulI op1 op2));
14768 
14769   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14770             "cmp   rscratch1, rscratch1, sxtw\n\t"
14771             "movw  rscratch1, #0x80000000\n\t"
14772             "cselw rscratch1, rscratch1, zr, NE\n\t"
14773             "cmpw  rscratch1, #1" %}
14774   ins_cost(5 * INSN_COST);
14775   ins_encode %{
14776     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14777     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14778     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
14779     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
14780     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
14781   %}
14782 
14783   ins_pipe(pipe_slow);
14784 %}
14785 
14786 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
14787 %{
14788   match(If cmp (OverflowMulI op1 op2));
14789   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14790             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14791   effect(USE labl, KILL cr);
14792 
14793   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14794             "cmp   rscratch1, rscratch1, sxtw\n\t"
14795             "b$cmp   $labl" %}
14796   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
14797   ins_encode %{
14798     Label* L = $labl$$label;
14799     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14800     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14801     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14802     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14803   %}
14804 
14805   ins_pipe(pipe_serial);
14806 %}
14807 
14808 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14809 %{
14810   match(Set cr (OverflowMulL op1 op2));
14811 
14812   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
14813             "smulh rscratch2, $op1, $op2\n\t"
14814             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14815             "movw  rscratch1, #0x80000000\n\t"
14816             "cselw rscratch1, rscratch1, zr, NE\n\t"
14817             "cmpw  rscratch1, #1" %}
14818   ins_cost(6 * INSN_COST);
14819   ins_encode %{
14820     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14821     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14822     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14823     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
14824     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
14825     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
14826   %}
14827 
14828   ins_pipe(pipe_slow);
14829 %}
14830 
14831 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
14832 %{
14833   match(If cmp (OverflowMulL op1 op2));
14834   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14835             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14836   effect(USE labl, KILL cr);
14837 
14838   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
14839             "smulh rscratch2, $op1, $op2\n\t"
14840             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14841             "b$cmp $labl" %}
14842   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
14843   ins_encode %{
14844     Label* L = $labl$$label;
14845     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14846     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14847     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14848     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14849     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14850   %}
14851 
14852   ins_pipe(pipe_serial);
14853 %}
14854 
14855 // ============================================================================
14856 // Compare Instructions
14857 
14858 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
14859 %{
14860   match(Set cr (CmpI op1 op2));
14861 
14862   effect(DEF cr, USE op1, USE op2);
14863 
14864   ins_cost(INSN_COST);
14865   format %{ "cmpw  $op1, $op2" %}
14866 
14867   ins_encode(aarch64_enc_cmpw(op1, op2));
14868 
14869   ins_pipe(icmp_reg_reg);
14870 %}
14871 
14872 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
14873 %{
14874   match(Set cr (CmpI op1 zero));
14875 
14876   effect(DEF cr, USE op1);
14877 
14878   ins_cost(INSN_COST);
14879   format %{ "cmpw $op1, 0" %}
14880 
14881   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14882 
14883   ins_pipe(icmp_reg_imm);
14884 %}
14885 
14886 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
14887 %{
14888   match(Set cr (CmpI op1 op2));
14889 
14890   effect(DEF cr, USE op1);
14891 
14892   ins_cost(INSN_COST);
14893   format %{ "cmpw  $op1, $op2" %}
14894 
14895   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14896 
14897   ins_pipe(icmp_reg_imm);
14898 %}
14899 
14900 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
14901 %{
14902   match(Set cr (CmpI op1 op2));
14903 
14904   effect(DEF cr, USE op1);
14905 
14906   ins_cost(INSN_COST * 2);
14907   format %{ "cmpw  $op1, $op2" %}
14908 
14909   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14910 
14911   ins_pipe(icmp_reg_imm);
14912 %}
14913 
14914 // Unsigned compare Instructions; really, same as signed compare
14915 // except it should only be used to feed an If or a CMovI which takes a
14916 // cmpOpU.
14917 
14918 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
14919 %{
14920   match(Set cr (CmpU op1 op2));
14921 
14922   effect(DEF cr, USE op1, USE op2);
14923 
14924   ins_cost(INSN_COST);
14925   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14926 
14927   ins_encode(aarch64_enc_cmpw(op1, op2));
14928 
14929   ins_pipe(icmp_reg_reg);
14930 %}
14931 
14932 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
14933 %{
14934   match(Set cr (CmpU op1 zero));
14935 
14936   effect(DEF cr, USE op1);
14937 
14938   ins_cost(INSN_COST);
14939   format %{ "cmpw $op1, #0\t# unsigned" %}
14940 
14941   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14942 
14943   ins_pipe(icmp_reg_imm);
14944 %}
14945 
14946 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
14947 %{
14948   match(Set cr (CmpU op1 op2));
14949 
14950   effect(DEF cr, USE op1);
14951 
14952   ins_cost(INSN_COST);
14953   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14954 
14955   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14956 
14957   ins_pipe(icmp_reg_imm);
14958 %}
14959 
14960 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
14961 %{
14962   match(Set cr (CmpU op1 op2));
14963 
14964   effect(DEF cr, USE op1);
14965 
14966   ins_cost(INSN_COST * 2);
14967   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14968 
14969   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14970 
14971   ins_pipe(icmp_reg_imm);
14972 %}
14973 
14974 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14975 %{
14976   match(Set cr (CmpL op1 op2));
14977 
14978   effect(DEF cr, USE op1, USE op2);
14979 
14980   ins_cost(INSN_COST);
14981   format %{ "cmp  $op1, $op2" %}
14982 
14983   ins_encode(aarch64_enc_cmp(op1, op2));
14984 
14985   ins_pipe(icmp_reg_reg);
14986 %}
14987 
14988 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
14989 %{
14990   match(Set cr (CmpL op1 zero));
14991 
14992   effect(DEF cr, USE op1);
14993 
14994   ins_cost(INSN_COST);
14995   format %{ "tst  $op1" %}
14996 
14997   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14998 
14999   ins_pipe(icmp_reg_imm);
15000 %}
15001 
15002 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
15003 %{
15004   match(Set cr (CmpL op1 op2));
15005 
15006   effect(DEF cr, USE op1);
15007 
15008   ins_cost(INSN_COST);
15009   format %{ "cmp  $op1, $op2" %}
15010 
15011   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15012 
15013   ins_pipe(icmp_reg_imm);
15014 %}
15015 
15016 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
15017 %{
15018   match(Set cr (CmpL op1 op2));
15019 
15020   effect(DEF cr, USE op1);
15021 
15022   ins_cost(INSN_COST * 2);
15023   format %{ "cmp  $op1, $op2" %}
15024 
15025   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15026 
15027   ins_pipe(icmp_reg_imm);
15028 %}
15029 
15030 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
15031 %{
15032   match(Set cr (CmpUL op1 op2));
15033 
15034   effect(DEF cr, USE op1, USE op2);
15035 
15036   ins_cost(INSN_COST);
15037   format %{ "cmp  $op1, $op2" %}
15038 
15039   ins_encode(aarch64_enc_cmp(op1, op2));
15040 
15041   ins_pipe(icmp_reg_reg);
15042 %}
15043 
15044 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
15045 %{
15046   match(Set cr (CmpUL op1 zero));
15047 
15048   effect(DEF cr, USE op1);
15049 
15050   ins_cost(INSN_COST);
15051   format %{ "tst  $op1" %}
15052 
15053   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
15054 
15055   ins_pipe(icmp_reg_imm);
15056 %}
15057 
15058 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
15059 %{
15060   match(Set cr (CmpUL op1 op2));
15061 
15062   effect(DEF cr, USE op1);
15063 
15064   ins_cost(INSN_COST);
15065   format %{ "cmp  $op1, $op2" %}
15066 
15067   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15068 
15069   ins_pipe(icmp_reg_imm);
15070 %}
15071 
15072 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
15073 %{
15074   match(Set cr (CmpUL op1 op2));
15075 
15076   effect(DEF cr, USE op1);
15077 
15078   ins_cost(INSN_COST * 2);
15079   format %{ "cmp  $op1, $op2" %}
15080 
15081   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15082 
15083   ins_pipe(icmp_reg_imm);
15084 %}
15085 
15086 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
15087 %{
15088   match(Set cr (CmpP op1 op2));
15089 
15090   effect(DEF cr, USE op1, USE op2);
15091 
15092   ins_cost(INSN_COST);
15093   format %{ "cmp  $op1, $op2\t // ptr" %}
15094 
15095   ins_encode(aarch64_enc_cmpp(op1, op2));
15096 
15097   ins_pipe(icmp_reg_reg);
15098 %}
15099 
15100 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
15101 %{
15102   match(Set cr (CmpN op1 op2));
15103 
15104   effect(DEF cr, USE op1, USE op2);
15105 
15106   ins_cost(INSN_COST);
15107   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
15108 
15109   ins_encode(aarch64_enc_cmpn(op1, op2));
15110 
15111   ins_pipe(icmp_reg_reg);
15112 %}
15113 
15114 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
15115 %{
15116   match(Set cr (CmpP op1 zero));
15117 
15118   effect(DEF cr, USE op1, USE zero);
15119 
15120   ins_cost(INSN_COST);
15121   format %{ "cmp  $op1, 0\t // ptr" %}
15122 
15123   ins_encode(aarch64_enc_testp(op1));
15124 
15125   ins_pipe(icmp_reg_imm);
15126 %}
15127 
15128 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
15129 %{
15130   match(Set cr (CmpN op1 zero));
15131 
15132   effect(DEF cr, USE op1, USE zero);
15133 
15134   ins_cost(INSN_COST);
15135   format %{ "cmp  $op1, 0\t // compressed ptr" %}
15136 
15137   ins_encode(aarch64_enc_testn(op1));
15138 
15139   ins_pipe(icmp_reg_imm);
15140 %}
15141 
15142 // FP comparisons
15143 //
15144 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
15145 // using normal cmpOp. See declaration of rFlagsReg for details.
15146 
15147 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
15148 %{
15149   match(Set cr (CmpF src1 src2));
15150 
15151   ins_cost(3 * INSN_COST);
15152   format %{ "fcmps $src1, $src2" %}
15153 
15154   ins_encode %{
15155     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15156   %}
15157 
15158   ins_pipe(pipe_class_compare);
15159 %}
15160 
15161 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
15162 %{
15163   match(Set cr (CmpF src1 src2));
15164 
15165   ins_cost(3 * INSN_COST);
15166   format %{ "fcmps $src1, 0.0" %}
15167 
15168   ins_encode %{
15169     __ fcmps(as_FloatRegister($src1$$reg), 0.0);
15170   %}
15171 
15172   ins_pipe(pipe_class_compare);
15173 %}
15174 // FROM HERE
15175 
15176 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
15177 %{
15178   match(Set cr (CmpD src1 src2));
15179 
15180   ins_cost(3 * INSN_COST);
15181   format %{ "fcmpd $src1, $src2" %}
15182 
15183   ins_encode %{
15184     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15185   %}
15186 
15187   ins_pipe(pipe_class_compare);
15188 %}
15189 
15190 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
15191 %{
15192   match(Set cr (CmpD src1 src2));
15193 
15194   ins_cost(3 * INSN_COST);
15195   format %{ "fcmpd $src1, 0.0" %}
15196 
15197   ins_encode %{
15198     __ fcmpd(as_FloatRegister($src1$$reg), 0.0);
15199   %}
15200 
15201   ins_pipe(pipe_class_compare);
15202 %}
15203 
15204 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
15205 %{
15206   match(Set dst (CmpF3 src1 src2));
15207   effect(KILL cr);
15208 
15209   ins_cost(5 * INSN_COST);
15210   format %{ "fcmps $src1, $src2\n\t"
15211             "csinvw($dst, zr, zr, eq\n\t"
15212             "csnegw($dst, $dst, $dst, lt)"
15213   %}
15214 
15215   ins_encode %{
15216     Label done;
15217     FloatRegister s1 = as_FloatRegister($src1$$reg);
15218     FloatRegister s2 = as_FloatRegister($src2$$reg);
15219     Register d = as_Register($dst$$reg);
15220     __ fcmps(s1, s2);
15221     // installs 0 if EQ else -1
15222     __ csinvw(d, zr, zr, Assembler::EQ);
15223     // keeps -1 if less or unordered else installs 1
15224     __ csnegw(d, d, d, Assembler::LT);
15225     __ bind(done);
15226   %}
15227 
15228   ins_pipe(pipe_class_default);
15229 
15230 %}
15231 
15232 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
15233 %{
15234   match(Set dst (CmpD3 src1 src2));
15235   effect(KILL cr);
15236 
15237   ins_cost(5 * INSN_COST);
15238   format %{ "fcmpd $src1, $src2\n\t"
15239             "csinvw($dst, zr, zr, eq\n\t"
15240             "csnegw($dst, $dst, $dst, lt)"
15241   %}
15242 
15243   ins_encode %{
15244     Label done;
15245     FloatRegister s1 = as_FloatRegister($src1$$reg);
15246     FloatRegister s2 = as_FloatRegister($src2$$reg);
15247     Register d = as_Register($dst$$reg);
15248     __ fcmpd(s1, s2);
15249     // installs 0 if EQ else -1
15250     __ csinvw(d, zr, zr, Assembler::EQ);
15251     // keeps -1 if less or unordered else installs 1
15252     __ csnegw(d, d, d, Assembler::LT);
15253     __ bind(done);
15254   %}
15255   ins_pipe(pipe_class_default);
15256 
15257 %}
15258 
15259 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
15260 %{
15261   match(Set dst (CmpF3 src1 zero));
15262   effect(KILL cr);
15263 
15264   ins_cost(5 * INSN_COST);
15265   format %{ "fcmps $src1, 0.0\n\t"
15266             "csinvw($dst, zr, zr, eq\n\t"
15267             "csnegw($dst, $dst, $dst, lt)"
15268   %}
15269 
15270   ins_encode %{
15271     Label done;
15272     FloatRegister s1 = as_FloatRegister($src1$$reg);
15273     Register d = as_Register($dst$$reg);
15274     __ fcmps(s1, 0.0);
15275     // installs 0 if EQ else -1
15276     __ csinvw(d, zr, zr, Assembler::EQ);
15277     // keeps -1 if less or unordered else installs 1
15278     __ csnegw(d, d, d, Assembler::LT);
15279     __ bind(done);
15280   %}
15281 
15282   ins_pipe(pipe_class_default);
15283 
15284 %}
15285 
15286 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
15287 %{
15288   match(Set dst (CmpD3 src1 zero));
15289   effect(KILL cr);
15290 
15291   ins_cost(5 * INSN_COST);
15292   format %{ "fcmpd $src1, 0.0\n\t"
15293             "csinvw($dst, zr, zr, eq\n\t"
15294             "csnegw($dst, $dst, $dst, lt)"
15295   %}
15296 
15297   ins_encode %{
15298     Label done;
15299     FloatRegister s1 = as_FloatRegister($src1$$reg);
15300     Register d = as_Register($dst$$reg);
15301     __ fcmpd(s1, 0.0);
15302     // installs 0 if EQ else -1
15303     __ csinvw(d, zr, zr, Assembler::EQ);
15304     // keeps -1 if less or unordered else installs 1
15305     __ csnegw(d, d, d, Assembler::LT);
15306     __ bind(done);
15307   %}
15308   ins_pipe(pipe_class_default);
15309 
15310 %}
15311 
15312 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
15313 %{
15314   match(Set dst (CmpLTMask p q));
15315   effect(KILL cr);
15316 
15317   ins_cost(3 * INSN_COST);
15318 
15319   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
15320             "csetw $dst, lt\n\t"
15321             "subw $dst, zr, $dst"
15322   %}
15323 
15324   ins_encode %{
15325     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
15326     __ csetw(as_Register($dst$$reg), Assembler::LT);
15327     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
15328   %}
15329 
15330   ins_pipe(ialu_reg_reg);
15331 %}
15332 
15333 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
15334 %{
15335   match(Set dst (CmpLTMask src zero));
15336   effect(KILL cr);
15337 
15338   ins_cost(INSN_COST);
15339 
15340   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
15341 
15342   ins_encode %{
15343     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
15344   %}
15345 
15346   ins_pipe(ialu_reg_shift);
15347 %}
15348 
15349 // ============================================================================
15350 // Max and Min
15351 
15352 instruct cmovI_reg_reg_lt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
15353 %{
15354   effect( DEF dst, USE src1, USE src2, USE cr );
15355 
15356   ins_cost(INSN_COST * 2);
15357   format %{ "cselw $dst, $src1, $src2 lt\t"  %}
15358 
15359   ins_encode %{
15360     __ cselw(as_Register($dst$$reg),
15361              as_Register($src1$$reg),
15362              as_Register($src2$$reg),
15363              Assembler::LT);
15364   %}
15365 
15366   ins_pipe(icond_reg_reg);
15367 %}
15368 
15369 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
15370 %{
15371   match(Set dst (MinI src1 src2));
15372   ins_cost(INSN_COST * 3);
15373 
15374   expand %{
15375     rFlagsReg cr;
15376     compI_reg_reg(cr, src1, src2);
15377     cmovI_reg_reg_lt(dst, src1, src2, cr);
15378   %}
15379 
15380 %}
15381 // FROM HERE
15382 
15383 instruct cmovI_reg_reg_gt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
15384 %{
15385   effect( DEF dst, USE src1, USE src2, USE cr );
15386 
15387   ins_cost(INSN_COST * 2);
15388   format %{ "cselw $dst, $src1, $src2 gt\t"  %}
15389 
15390   ins_encode %{
15391     __ cselw(as_Register($dst$$reg),
15392              as_Register($src1$$reg),
15393              as_Register($src2$$reg),
15394              Assembler::GT);
15395   %}
15396 
15397   ins_pipe(icond_reg_reg);
15398 %}
15399 
15400 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
15401 %{
15402   match(Set dst (MaxI src1 src2));
15403   ins_cost(INSN_COST * 3);
15404   expand %{
15405     rFlagsReg cr;
15406     compI_reg_reg(cr, src1, src2);
15407     cmovI_reg_reg_gt(dst, src1, src2, cr);
15408   %}
15409 %}
15410 
15411 // ============================================================================
15412 // Branch Instructions
15413 
15414 // Direct Branch.
15415 instruct branch(label lbl)
15416 %{
15417   match(Goto);
15418 
15419   effect(USE lbl);
15420 
15421   ins_cost(BRANCH_COST);
15422   format %{ "b  $lbl" %}
15423 
15424   ins_encode(aarch64_enc_b(lbl));
15425 
15426   ins_pipe(pipe_branch);
15427 %}
15428 
15429 // Conditional Near Branch
15430 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
15431 %{
15432   // Same match rule as `branchConFar'.
15433   match(If cmp cr);
15434 
15435   effect(USE lbl);
15436 
15437   ins_cost(BRANCH_COST);
15438   // If set to 1 this indicates that the current instruction is a
15439   // short variant of a long branch. This avoids using this
15440   // instruction in first-pass matching. It will then only be used in
15441   // the `Shorten_branches' pass.
15442   // ins_short_branch(1);
15443   format %{ "b$cmp  $lbl" %}
15444 
15445   ins_encode(aarch64_enc_br_con(cmp, lbl));
15446 
15447   ins_pipe(pipe_branch_cond);
15448 %}
15449 
15450 // Conditional Near Branch Unsigned
15451 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
15452 %{
15453   // Same match rule as `branchConFar'.
15454   match(If cmp cr);
15455 
15456   effect(USE lbl);
15457 
15458   ins_cost(BRANCH_COST);
15459   // If set to 1 this indicates that the current instruction is a
15460   // short variant of a long branch. This avoids using this
15461   // instruction in first-pass matching. It will then only be used in
15462   // the `Shorten_branches' pass.
15463   // ins_short_branch(1);
15464   format %{ "b$cmp  $lbl\t# unsigned" %}
15465 
15466   ins_encode(aarch64_enc_br_conU(cmp, lbl));
15467 
15468   ins_pipe(pipe_branch_cond);
15469 %}
15470 
15471 // Make use of CBZ and CBNZ.  These instructions, as well as being
15472 // shorter than (cmp; branch), have the additional benefit of not
15473 // killing the flags.
15474 
15475 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
15476   match(If cmp (CmpI op1 op2));
15477   effect(USE labl);
15478 
15479   ins_cost(BRANCH_COST);
15480   format %{ "cbw$cmp   $op1, $labl" %}
15481   ins_encode %{
15482     Label* L = $labl$$label;
15483     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15484     if (cond == Assembler::EQ)
15485       __ cbzw($op1$$Register, *L);
15486     else
15487       __ cbnzw($op1$$Register, *L);
15488   %}
15489   ins_pipe(pipe_cmp_branch);
15490 %}
15491 
15492 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
15493   match(If cmp (CmpL op1 op2));
15494   effect(USE labl);
15495 
15496   ins_cost(BRANCH_COST);
15497   format %{ "cb$cmp   $op1, $labl" %}
15498   ins_encode %{
15499     Label* L = $labl$$label;
15500     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15501     if (cond == Assembler::EQ)
15502       __ cbz($op1$$Register, *L);
15503     else
15504       __ cbnz($op1$$Register, *L);
15505   %}
15506   ins_pipe(pipe_cmp_branch);
15507 %}
15508 
15509 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
15510   match(If cmp (CmpP op1 op2));
15511   effect(USE labl);
15512 
15513   ins_cost(BRANCH_COST);
15514   format %{ "cb$cmp   $op1, $labl" %}
15515   ins_encode %{
15516     Label* L = $labl$$label;
15517     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15518     if (cond == Assembler::EQ)
15519       __ cbz($op1$$Register, *L);
15520     else
15521       __ cbnz($op1$$Register, *L);
15522   %}
15523   ins_pipe(pipe_cmp_branch);
15524 %}
15525 
15526 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
15527   match(If cmp (CmpN op1 op2));
15528   effect(USE labl);
15529 
15530   ins_cost(BRANCH_COST);
15531   format %{ "cbw$cmp   $op1, $labl" %}
15532   ins_encode %{
15533     Label* L = $labl$$label;
15534     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15535     if (cond == Assembler::EQ)
15536       __ cbzw($op1$$Register, *L);
15537     else
15538       __ cbnzw($op1$$Register, *L);
15539   %}
15540   ins_pipe(pipe_cmp_branch);
15541 %}
15542 
15543 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
15544   match(If cmp (CmpP (DecodeN oop) zero));
15545   effect(USE labl);
15546 
15547   ins_cost(BRANCH_COST);
15548   format %{ "cb$cmp   $oop, $labl" %}
15549   ins_encode %{
15550     Label* L = $labl$$label;
15551     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15552     if (cond == Assembler::EQ)
15553       __ cbzw($oop$$Register, *L);
15554     else
15555       __ cbnzw($oop$$Register, *L);
15556   %}
15557   ins_pipe(pipe_cmp_branch);
15558 %}
15559 
15560 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
15561   match(If cmp (CmpU op1 op2));
15562   effect(USE labl);
15563 
15564   ins_cost(BRANCH_COST);
15565   format %{ "cbw$cmp   $op1, $labl" %}
15566   ins_encode %{
15567     Label* L = $labl$$label;
15568     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15569     if (cond == Assembler::EQ || cond == Assembler::LS)
15570       __ cbzw($op1$$Register, *L);
15571     else
15572       __ cbnzw($op1$$Register, *L);
15573   %}
15574   ins_pipe(pipe_cmp_branch);
15575 %}
15576 
15577 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
15578   match(If cmp (CmpUL op1 op2));
15579   effect(USE labl);
15580 
15581   ins_cost(BRANCH_COST);
15582   format %{ "cb$cmp   $op1, $labl" %}
15583   ins_encode %{
15584     Label* L = $labl$$label;
15585     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15586     if (cond == Assembler::EQ || cond == Assembler::LS)
15587       __ cbz($op1$$Register, *L);
15588     else
15589       __ cbnz($op1$$Register, *L);
15590   %}
15591   ins_pipe(pipe_cmp_branch);
15592 %}
15593 
15594 // Test bit and Branch
15595 
15596 // Patterns for short (< 32KiB) variants
15597 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
15598   match(If cmp (CmpL op1 op2));
15599   effect(USE labl);
15600 
15601   ins_cost(BRANCH_COST);
15602   format %{ "cb$cmp   $op1, $labl # long" %}
15603   ins_encode %{
15604     Label* L = $labl$$label;
15605     Assembler::Condition cond =
15606       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15607     __ tbr(cond, $op1$$Register, 63, *L);
15608   %}
15609   ins_pipe(pipe_cmp_branch);
15610   ins_short_branch(1);
15611 %}
15612 
15613 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15614   match(If cmp (CmpI op1 op2));
15615   effect(USE labl);
15616 
15617   ins_cost(BRANCH_COST);
15618   format %{ "cb$cmp   $op1, $labl # int" %}
15619   ins_encode %{
15620     Label* L = $labl$$label;
15621     Assembler::Condition cond =
15622       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15623     __ tbr(cond, $op1$$Register, 31, *L);
15624   %}
15625   ins_pipe(pipe_cmp_branch);
15626   ins_short_branch(1);
15627 %}
15628 
15629 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
15630   match(If cmp (CmpL (AndL op1 op2) op3));
15631   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
15632   effect(USE labl);
15633 
15634   ins_cost(BRANCH_COST);
15635   format %{ "tb$cmp   $op1, $op2, $labl" %}
15636   ins_encode %{
15637     Label* L = $labl$$label;
15638     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15639     int bit = exact_log2_long($op2$$constant);
15640     __ tbr(cond, $op1$$Register, bit, *L);
15641   %}
15642   ins_pipe(pipe_cmp_branch);
15643   ins_short_branch(1);
15644 %}
15645 
15646 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
15647   match(If cmp (CmpI (AndI op1 op2) op3));
15648   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
15649   effect(USE labl);
15650 
15651   ins_cost(BRANCH_COST);
15652   format %{ "tb$cmp   $op1, $op2, $labl" %}
15653   ins_encode %{
15654     Label* L = $labl$$label;
15655     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15656     int bit = exact_log2((juint)$op2$$constant);
15657     __ tbr(cond, $op1$$Register, bit, *L);
15658   %}
15659   ins_pipe(pipe_cmp_branch);
15660   ins_short_branch(1);
15661 %}
15662 
15663 // And far variants
15664 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
15665   match(If cmp (CmpL op1 op2));
15666   effect(USE labl);
15667 
15668   ins_cost(BRANCH_COST);
15669   format %{ "cb$cmp   $op1, $labl # long" %}
15670   ins_encode %{
15671     Label* L = $labl$$label;
15672     Assembler::Condition cond =
15673       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15674     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
15675   %}
15676   ins_pipe(pipe_cmp_branch);
15677 %}
15678 
15679 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15680   match(If cmp (CmpI op1 op2));
15681   effect(USE labl);
15682 
15683   ins_cost(BRANCH_COST);
15684   format %{ "cb$cmp   $op1, $labl # int" %}
15685   ins_encode %{
15686     Label* L = $labl$$label;
15687     Assembler::Condition cond =
15688       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15689     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
15690   %}
15691   ins_pipe(pipe_cmp_branch);
15692 %}
15693 
15694 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
15695   match(If cmp (CmpL (AndL op1 op2) op3));
15696   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
15697   effect(USE labl);
15698 
15699   ins_cost(BRANCH_COST);
15700   format %{ "tb$cmp   $op1, $op2, $labl" %}
15701   ins_encode %{
15702     Label* L = $labl$$label;
15703     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15704     int bit = exact_log2_long($op2$$constant);
15705     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
15706   %}
15707   ins_pipe(pipe_cmp_branch);
15708 %}
15709 
15710 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
15711   match(If cmp (CmpI (AndI op1 op2) op3));
15712   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
15713   effect(USE labl);
15714 
15715   ins_cost(BRANCH_COST);
15716   format %{ "tb$cmp   $op1, $op2, $labl" %}
15717   ins_encode %{
15718     Label* L = $labl$$label;
15719     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15720     int bit = exact_log2((juint)$op2$$constant);
15721     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
15722   %}
15723   ins_pipe(pipe_cmp_branch);
15724 %}
15725 
15726 // Test bits
15727 
15728 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
15729   match(Set cr (CmpL (AndL op1 op2) op3));
15730   predicate(Assembler::operand_valid_for_logical_immediate
15731             (/*is_32*/false, n->in(1)->in(2)->get_long()));
15732 
15733   ins_cost(INSN_COST);
15734   format %{ "tst $op1, $op2 # long" %}
15735   ins_encode %{
15736     __ tst($op1$$Register, $op2$$constant);
15737   %}
15738   ins_pipe(ialu_reg_reg);
15739 %}
15740 
15741 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
15742   match(Set cr (CmpI (AndI op1 op2) op3));
15743   predicate(Assembler::operand_valid_for_logical_immediate
15744             (/*is_32*/true, n->in(1)->in(2)->get_int()));
15745 
15746   ins_cost(INSN_COST);
15747   format %{ "tst $op1, $op2 # int" %}
15748   ins_encode %{
15749     __ tstw($op1$$Register, $op2$$constant);
15750   %}
15751   ins_pipe(ialu_reg_reg);
15752 %}
15753 
15754 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
15755   match(Set cr (CmpL (AndL op1 op2) op3));
15756 
15757   ins_cost(INSN_COST);
15758   format %{ "tst $op1, $op2 # long" %}
15759   ins_encode %{
15760     __ tst($op1$$Register, $op2$$Register);
15761   %}
15762   ins_pipe(ialu_reg_reg);
15763 %}
15764 
15765 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
15766   match(Set cr (CmpI (AndI op1 op2) op3));
15767 
15768   ins_cost(INSN_COST);
15769   format %{ "tstw $op1, $op2 # int" %}
15770   ins_encode %{
15771     __ tstw($op1$$Register, $op2$$Register);
15772   %}
15773   ins_pipe(ialu_reg_reg);
15774 %}
15775 
15776 
15777 // Conditional Far Branch
15778 // Conditional Far Branch Unsigned
15779 // TODO: fixme
15780 
15781 // counted loop end branch near
15782 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
15783 %{
15784   match(CountedLoopEnd cmp cr);
15785 
15786   effect(USE lbl);
15787 
15788   ins_cost(BRANCH_COST);
15789   // short variant.
15790   // ins_short_branch(1);
15791   format %{ "b$cmp $lbl \t// counted loop end" %}
15792 
15793   ins_encode(aarch64_enc_br_con(cmp, lbl));
15794 
15795   ins_pipe(pipe_branch);
15796 %}
15797 
15798 // counted loop end branch near Unsigned
15799 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
15800 %{
15801   match(CountedLoopEnd cmp cr);
15802 
15803   effect(USE lbl);
15804 
15805   ins_cost(BRANCH_COST);
15806   // short variant.
15807   // ins_short_branch(1);
15808   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
15809 
15810   ins_encode(aarch64_enc_br_conU(cmp, lbl));
15811 
15812   ins_pipe(pipe_branch);
15813 %}
15814 
15815 // counted loop end branch far
15816 // counted loop end branch far unsigned
15817 // TODO: fixme
15818 
15819 // ============================================================================
15820 // inlined locking and unlocking
15821 
15822 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
15823 %{
15824   match(Set cr (FastLock object box));
15825   effect(TEMP tmp, TEMP tmp2);
15826 
15827   // TODO
15828   // identify correct cost
15829   ins_cost(5 * INSN_COST);
15830   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
15831 
15832   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
15833 
15834   ins_pipe(pipe_serial);
15835 %}
15836 
15837 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
15838 %{
15839   match(Set cr (FastUnlock object box));
15840   effect(TEMP tmp, TEMP tmp2);
15841 
15842   ins_cost(5 * INSN_COST);
15843   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
15844 
15845   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
15846 
15847   ins_pipe(pipe_serial);
15848 %}
15849 
15850 
15851 // ============================================================================
15852 // Safepoint Instructions
15853 
15854 // TODO
15855 // provide a near and far version of this code
15856 
15857 instruct safePoint(rFlagsReg cr, iRegP poll)
15858 %{
15859   match(SafePoint poll);
15860   effect(KILL cr);
15861 
15862   format %{
15863     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
15864   %}
15865   ins_encode %{
15866     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
15867   %}
15868   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
15869 %}
15870 
15871 
15872 // ============================================================================
15873 // Procedure Call/Return Instructions
15874 
15875 // Call Java Static Instruction
15876 
15877 instruct CallStaticJavaDirect(method meth)
15878 %{
15879   match(CallStaticJava);
15880 
15881   effect(USE meth);
15882 
15883   ins_cost(CALL_COST);
15884 
15885   format %{ "call,static $meth \t// ==> " %}
15886 
15887   ins_encode( aarch64_enc_java_static_call(meth),
15888               aarch64_enc_call_epilog );
15889 
15890   ins_pipe(pipe_class_call);
15891 %}
15892 
15893 // TO HERE
15894 
15895 // Call Java Dynamic Instruction
15896 instruct CallDynamicJavaDirect(method meth)
15897 %{
15898   match(CallDynamicJava);
15899 
15900   effect(USE meth);
15901 
15902   ins_cost(CALL_COST);
15903 
15904   format %{ "CALL,dynamic $meth \t// ==> " %}
15905 
15906   ins_encode( aarch64_enc_java_dynamic_call(meth),
15907                aarch64_enc_call_epilog );
15908 
15909   ins_pipe(pipe_class_call);
15910 %}
15911 
15912 // Call Runtime Instruction
15913 
15914 instruct CallRuntimeDirect(method meth)
15915 %{
15916   match(CallRuntime);
15917 
15918   effect(USE meth);
15919 
15920   ins_cost(CALL_COST);
15921 
15922   format %{ "CALL, runtime $meth" %}
15923 
15924   ins_encode( aarch64_enc_java_to_runtime(meth) );
15925 
15926   ins_pipe(pipe_class_call);
15927 %}
15928 
15929 // Call Runtime Instruction
15930 
15931 instruct CallLeafDirect(method meth)
15932 %{
15933   match(CallLeaf);
15934 
15935   effect(USE meth);
15936 
15937   ins_cost(CALL_COST);
15938 
15939   format %{ "CALL, runtime leaf $meth" %}
15940 
15941   ins_encode( aarch64_enc_java_to_runtime(meth) );
15942 
15943   ins_pipe(pipe_class_call);
15944 %}
15945 
15946 // Call Runtime Instruction
15947 
15948 instruct CallLeafNoFPDirect(method meth)
15949 %{
15950   match(CallLeafNoFP);
15951 
15952   effect(USE meth);
15953 
15954   ins_cost(CALL_COST);
15955 
15956   format %{ "CALL, runtime leaf nofp $meth" %}
15957 
15958   ins_encode( aarch64_enc_java_to_runtime(meth) );
15959 
15960   ins_pipe(pipe_class_call);
15961 %}
15962 
15963 // Tail Call; Jump from runtime stub to Java code.
15964 // Also known as an 'interprocedural jump'.
15965 // Target of jump will eventually return to caller.
15966 // TailJump below removes the return address.
15967 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_ptr)
15968 %{
15969   match(TailCall jump_target method_ptr);
15970 
15971   ins_cost(CALL_COST);
15972 
15973   format %{ "br $jump_target\t# $method_ptr holds method" %}
15974 
15975   ins_encode(aarch64_enc_tail_call(jump_target));
15976 
15977   ins_pipe(pipe_class_call);
15978 %}
15979 
15980 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
15981 %{
15982   match(TailJump jump_target ex_oop);
15983 
15984   ins_cost(CALL_COST);
15985 
15986   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
15987 
15988   ins_encode(aarch64_enc_tail_jmp(jump_target));
15989 
15990   ins_pipe(pipe_class_call);
15991 %}
15992 
15993 // Create exception oop: created by stack-crawling runtime code.
15994 // Created exception is now available to this handler, and is setup
15995 // just prior to jumping to this handler. No code emitted.
15996 // TODO check
15997 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
15998 instruct CreateException(iRegP_R0 ex_oop)
15999 %{
16000   match(Set ex_oop (CreateEx));
16001 
16002   format %{ " -- \t// exception oop; no code emitted" %}
16003 
16004   size(0);
16005 
16006   ins_encode( /*empty*/ );
16007 
16008   ins_pipe(pipe_class_empty);
16009 %}
16010 
16011 // Rethrow exception: The exception oop will come in the first
16012 // argument position. Then JUMP (not call) to the rethrow stub code.
16013 instruct RethrowException() %{
16014   match(Rethrow);
16015   ins_cost(CALL_COST);
16016 
16017   format %{ "b rethrow_stub" %}
16018 
16019   ins_encode( aarch64_enc_rethrow() );
16020 
16021   ins_pipe(pipe_class_call);
16022 %}
16023 
16024 
16025 // Return Instruction
16026 // epilog node loads ret address into lr as part of frame pop
16027 instruct Ret()
16028 %{
16029   match(Return);
16030 
16031   format %{ "ret\t// return register" %}
16032 
16033   ins_encode( aarch64_enc_ret() );
16034 
16035   ins_pipe(pipe_branch);
16036 %}
16037 
16038 // Die now.
16039 instruct ShouldNotReachHere() %{
16040   match(Halt);
16041 
16042   ins_cost(CALL_COST);
16043   format %{ "ShouldNotReachHere" %}
16044 
16045   ins_encode %{
16046     if (is_reachable()) {
16047       __ stop(_halt_reason);
16048     }
16049   %}
16050 
16051   ins_pipe(pipe_class_default);
16052 %}
16053 
16054 // ============================================================================
16055 // Partial Subtype Check
16056 //
16057 // superklass array for an instance of the superklass.  Set a hidden
16058 // internal cache on a hit (cache is checked with exposed code in
16059 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
16060 // encoding ALSO sets flags.
16061 
16062 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
16063 %{
16064   match(Set result (PartialSubtypeCheck sub super));
16065   effect(KILL cr, KILL temp);
16066 
16067   ins_cost(1100);  // slightly larger than the next version
16068   format %{ "partialSubtypeCheck $result, $sub, $super" %}
16069 
16070   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
16071 
16072   opcode(0x1); // Force zero of result reg on hit
16073 
16074   ins_pipe(pipe_class_memory);
16075 %}
16076 
16077 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
16078 %{
16079   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
16080   effect(KILL temp, KILL result);
16081 
16082   ins_cost(1100);  // slightly larger than the next version
16083   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
16084 
16085   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
16086 
16087   opcode(0x0); // Don't zero result reg on hit
16088 
16089   ins_pipe(pipe_class_memory);
16090 %}
16091 
16092 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16093                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16094 %{
16095   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
16096   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16097   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16098 
16099   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16100   ins_encode %{
16101     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16102     __ string_compare($str1$$Register, $str2$$Register,
16103                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16104                       $tmp1$$Register, $tmp2$$Register,
16105                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::UU);
16106   %}
16107   ins_pipe(pipe_class_memory);
16108 %}
16109 
16110 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16111                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16112 %{
16113   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
16114   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16115   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16116 
16117   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16118   ins_encode %{
16119     __ string_compare($str1$$Register, $str2$$Register,
16120                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16121                       $tmp1$$Register, $tmp2$$Register,
16122                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::LL);
16123   %}
16124   ins_pipe(pipe_class_memory);
16125 %}
16126 
16127 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16128                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16129                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16130 %{
16131   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
16132   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16133   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16134          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16135 
16136   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16137   ins_encode %{
16138     __ string_compare($str1$$Register, $str2$$Register,
16139                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16140                       $tmp1$$Register, $tmp2$$Register,
16141                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16142                       $vtmp3$$FloatRegister, StrIntrinsicNode::UL);
16143   %}
16144   ins_pipe(pipe_class_memory);
16145 %}
16146 
16147 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16148                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16149                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16150 %{
16151   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
16152   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16153   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16154          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16155 
16156   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16157   ins_encode %{
16158     __ string_compare($str1$$Register, $str2$$Register,
16159                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16160                       $tmp1$$Register, $tmp2$$Register,
16161                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16162                       $vtmp3$$FloatRegister,StrIntrinsicNode::LU);
16163   %}
16164   ins_pipe(pipe_class_memory);
16165 %}
16166 
16167 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16168        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
16169        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
16170 %{
16171   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16172   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16173   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16174          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
16175   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
16176 
16177   ins_encode %{
16178     __ string_indexof($str1$$Register, $str2$$Register,
16179                       $cnt1$$Register, $cnt2$$Register,
16180                       $tmp1$$Register, $tmp2$$Register,
16181                       $tmp3$$Register, $tmp4$$Register,
16182                       $tmp5$$Register, $tmp6$$Register,
16183                       -1, $result$$Register, StrIntrinsicNode::UU);
16184   %}
16185   ins_pipe(pipe_class_memory);
16186 %}
16187 
16188 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16189        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
16190        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
16191 %{
16192   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16193   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16194   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16195          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
16196   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
16197 
16198   ins_encode %{
16199     __ string_indexof($str1$$Register, $str2$$Register,
16200                       $cnt1$$Register, $cnt2$$Register,
16201                       $tmp1$$Register, $tmp2$$Register,
16202                       $tmp3$$Register, $tmp4$$Register,
16203                       $tmp5$$Register, $tmp6$$Register,
16204                       -1, $result$$Register, StrIntrinsicNode::LL);
16205   %}
16206   ins_pipe(pipe_class_memory);
16207 %}
16208 
16209 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16210        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
16211        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
16212 %{
16213   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16214   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16215   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16216          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
16217   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
16218 
16219   ins_encode %{
16220     __ string_indexof($str1$$Register, $str2$$Register,
16221                       $cnt1$$Register, $cnt2$$Register,
16222                       $tmp1$$Register, $tmp2$$Register,
16223                       $tmp3$$Register, $tmp4$$Register,
16224                       $tmp5$$Register, $tmp6$$Register,
16225                       -1, $result$$Register, StrIntrinsicNode::UL);
16226   %}
16227   ins_pipe(pipe_class_memory);
16228 %}
16229 
16230 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16231                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16232                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16233 %{
16234   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16235   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16236   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16237          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16238   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
16239 
16240   ins_encode %{
16241     int icnt2 = (int)$int_cnt2$$constant;
16242     __ string_indexof($str1$$Register, $str2$$Register,
16243                       $cnt1$$Register, zr,
16244                       $tmp1$$Register, $tmp2$$Register,
16245                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16246                       icnt2, $result$$Register, StrIntrinsicNode::UU);
16247   %}
16248   ins_pipe(pipe_class_memory);
16249 %}
16250 
16251 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16252                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16253                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16254 %{
16255   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16256   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16257   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16258          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16259   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
16260 
16261   ins_encode %{
16262     int icnt2 = (int)$int_cnt2$$constant;
16263     __ string_indexof($str1$$Register, $str2$$Register,
16264                       $cnt1$$Register, zr,
16265                       $tmp1$$Register, $tmp2$$Register,
16266                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16267                       icnt2, $result$$Register, StrIntrinsicNode::LL);
16268   %}
16269   ins_pipe(pipe_class_memory);
16270 %}
16271 
16272 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16273                  immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16274                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16275 %{
16276   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16277   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16278   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16279          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16280   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
16281 
16282   ins_encode %{
16283     int icnt2 = (int)$int_cnt2$$constant;
16284     __ string_indexof($str1$$Register, $str2$$Register,
16285                       $cnt1$$Register, zr,
16286                       $tmp1$$Register, $tmp2$$Register,
16287                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16288                       icnt2, $result$$Register, StrIntrinsicNode::UL);
16289   %}
16290   ins_pipe(pipe_class_memory);
16291 %}
16292 
16293 instruct string_indexofU_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16294                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16295                               iRegINoSp tmp3, rFlagsReg cr)
16296 %{
16297   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
16298   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
16299          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16300 
16301   format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result" %}
16302 
16303   ins_encode %{
16304     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
16305                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
16306                            $tmp3$$Register);
16307   %}
16308   ins_pipe(pipe_class_memory);
16309 %}
16310 
16311 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
16312                         iRegI_R0 result, rFlagsReg cr)
16313 %{
16314   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
16315   match(Set result (StrEquals (Binary str1 str2) cnt));
16316   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
16317 
16318   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
16319   ins_encode %{
16320     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16321     __ string_equals($str1$$Register, $str2$$Register,
16322                      $result$$Register, $cnt$$Register, 1);
16323   %}
16324   ins_pipe(pipe_class_memory);
16325 %}
16326 
16327 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
16328                         iRegI_R0 result, rFlagsReg cr)
16329 %{
16330   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
16331   match(Set result (StrEquals (Binary str1 str2) cnt));
16332   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
16333 
16334   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
16335   ins_encode %{
16336     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16337     __ string_equals($str1$$Register, $str2$$Register,
16338                      $result$$Register, $cnt$$Register, 2);
16339   %}
16340   ins_pipe(pipe_class_memory);
16341 %}
16342 
16343 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
16344                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
16345                        iRegP_R10 tmp, rFlagsReg cr)
16346 %{
16347   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
16348   match(Set result (AryEq ary1 ary2));
16349   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16350 
16351   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
16352   ins_encode %{
16353     __ arrays_equals($ary1$$Register, $ary2$$Register,
16354                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
16355                      $result$$Register, $tmp$$Register, 1);
16356     %}
16357   ins_pipe(pipe_class_memory);
16358 %}
16359 
16360 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
16361                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
16362                        iRegP_R10 tmp, rFlagsReg cr)
16363 %{
16364   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
16365   match(Set result (AryEq ary1 ary2));
16366   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16367 
16368   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
16369   ins_encode %{
16370     __ arrays_equals($ary1$$Register, $ary2$$Register,
16371                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
16372                      $result$$Register, $tmp$$Register, 2);
16373   %}
16374   ins_pipe(pipe_class_memory);
16375 %}
16376 
16377 instruct has_negatives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
16378 %{
16379   match(Set result (HasNegatives ary1 len));
16380   effect(USE_KILL ary1, USE_KILL len, KILL cr);
16381   format %{ "has negatives byte[] $ary1,$len -> $result" %}
16382   ins_encode %{
16383     __ has_negatives($ary1$$Register, $len$$Register, $result$$Register);
16384   %}
16385   ins_pipe( pipe_slow );
16386 %}
16387 
16388 // fast char[] to byte[] compression
16389 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
16390                          vRegD_V0 tmp1, vRegD_V1 tmp2,
16391                          vRegD_V2 tmp3, vRegD_V3 tmp4,
16392                          iRegI_R0 result, rFlagsReg cr)
16393 %{
16394   match(Set result (StrCompressedCopy src (Binary dst len)));
16395   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
16396 
16397   format %{ "String Compress $src,$dst -> $result    // KILL R1, R2, R3, R4" %}
16398   ins_encode %{
16399     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
16400                            $tmp1$$FloatRegister, $tmp2$$FloatRegister,
16401                            $tmp3$$FloatRegister, $tmp4$$FloatRegister,
16402                            $result$$Register);
16403   %}
16404   ins_pipe( pipe_slow );
16405 %}
16406 
16407 // fast byte[] to char[] inflation
16408 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len,
16409                         vRegD_V0 tmp1, vRegD_V1 tmp2, vRegD_V2 tmp3, iRegP_R3 tmp4, rFlagsReg cr)
16410 %{
16411   match(Set dummy (StrInflatedCopy src (Binary dst len)));
16412   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
16413 
16414   format %{ "String Inflate $src,$dst    // KILL $tmp1, $tmp2" %}
16415   ins_encode %{
16416     __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
16417                           $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister, $tmp4$$Register);
16418   %}
16419   ins_pipe(pipe_class_memory);
16420 %}
16421 
16422 // encode char[] to byte[] in ISO_8859_1
16423 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
16424                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
16425                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
16426                           iRegI_R0 result, rFlagsReg cr)
16427 %{
16428   match(Set result (EncodeISOArray src (Binary dst len)));
16429   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
16430          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
16431 
16432   format %{ "Encode array $src,$dst,$len -> $result" %}
16433   ins_encode %{
16434     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
16435          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
16436          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
16437   %}
16438   ins_pipe( pipe_class_memory );
16439 %}
16440 
16441 // ============================================================================
16442 // This name is KNOWN by the ADLC and cannot be changed.
16443 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
16444 // for this guy.
16445 instruct tlsLoadP(thread_RegP dst)
16446 %{
16447   match(Set dst (ThreadLocal));
16448 
16449   ins_cost(0);
16450 
16451   format %{ " -- \t// $dst=Thread::current(), empty" %}
16452 
16453   size(0);
16454 
16455   ins_encode( /*empty*/ );
16456 
16457   ins_pipe(pipe_class_empty);
16458 %}
16459 
16460 // ====================VECTOR INSTRUCTIONS=====================================
16461 
16462 // Load vector (32 bits)
16463 instruct loadV4(vecD dst, vmem4 mem)
16464 %{
16465   predicate(n->as_LoadVector()->memory_size() == 4);
16466   match(Set dst (LoadVector mem));
16467   ins_cost(4 * INSN_COST);
16468   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
16469   ins_encode( aarch64_enc_ldrvS(dst, mem) );
16470   ins_pipe(vload_reg_mem64);
16471 %}
16472 
16473 // Load vector (64 bits)
16474 instruct loadV8(vecD dst, vmem8 mem)
16475 %{
16476   predicate(n->as_LoadVector()->memory_size() == 8);
16477   match(Set dst (LoadVector mem));
16478   ins_cost(4 * INSN_COST);
16479   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
16480   ins_encode( aarch64_enc_ldrvD(dst, mem) );
16481   ins_pipe(vload_reg_mem64);
16482 %}
16483 
16484 // Load Vector (128 bits)
16485 instruct loadV16(vecX dst, vmem16 mem)
16486 %{
16487   predicate(UseSVE == 0 && n->as_LoadVector()->memory_size() == 16);
16488   match(Set dst (LoadVector mem));
16489   ins_cost(4 * INSN_COST);
16490   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
16491   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
16492   ins_pipe(vload_reg_mem128);
16493 %}
16494 
16495 // Store Vector (32 bits)
16496 instruct storeV4(vecD src, vmem4 mem)
16497 %{
16498   predicate(n->as_StoreVector()->memory_size() == 4);
16499   match(Set mem (StoreVector mem src));
16500   ins_cost(4 * INSN_COST);
16501   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
16502   ins_encode( aarch64_enc_strvS(src, mem) );
16503   ins_pipe(vstore_reg_mem64);
16504 %}
16505 
16506 // Store Vector (64 bits)
16507 instruct storeV8(vecD src, vmem8 mem)
16508 %{
16509   predicate(n->as_StoreVector()->memory_size() == 8);
16510   match(Set mem (StoreVector mem src));
16511   ins_cost(4 * INSN_COST);
16512   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
16513   ins_encode( aarch64_enc_strvD(src, mem) );
16514   ins_pipe(vstore_reg_mem64);
16515 %}
16516 
16517 // Store Vector (128 bits)
16518 instruct storeV16(vecX src, vmem16 mem)
16519 %{
16520   predicate(n->as_StoreVector()->memory_size() == 16);
16521   match(Set mem (StoreVector mem src));
16522   ins_cost(4 * INSN_COST);
16523   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
16524   ins_encode( aarch64_enc_strvQ(src, mem) );
16525   ins_pipe(vstore_reg_mem128);
16526 %}
16527 
16528 instruct replicate8B(vecD dst, iRegIorL2I src)
16529 %{
16530   predicate(n->as_Vector()->length() == 4 ||
16531             n->as_Vector()->length() == 8);
16532   match(Set dst (ReplicateB src));
16533   ins_cost(INSN_COST);
16534   format %{ "dup  $dst, $src\t# vector (8B)" %}
16535   ins_encode %{
16536     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
16537   %}
16538   ins_pipe(vdup_reg_reg64);
16539 %}
16540 
16541 instruct replicate16B(vecX dst, iRegIorL2I src)
16542 %{
16543   predicate(UseSVE == 0 && n->as_Vector()->length() == 16);
16544   match(Set dst (ReplicateB src));
16545   ins_cost(INSN_COST);
16546   format %{ "dup  $dst, $src\t# vector (16B)" %}
16547   ins_encode %{
16548     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
16549   %}
16550   ins_pipe(vdup_reg_reg128);
16551 %}
16552 
16553 instruct replicate8B_imm(vecD dst, immI con)
16554 %{
16555   predicate(n->as_Vector()->length() == 4 ||
16556             n->as_Vector()->length() == 8);
16557   match(Set dst (ReplicateB con));
16558   ins_cost(INSN_COST);
16559   format %{ "movi  $dst, $con\t# vector(8B)" %}
16560   ins_encode %{
16561     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
16562   %}
16563   ins_pipe(vmovi_reg_imm64);
16564 %}
16565 
16566 instruct replicate16B_imm(vecX dst, immI con)
16567 %{
16568   predicate(UseSVE == 0 && n->as_Vector()->length() == 16);
16569   match(Set dst (ReplicateB con));
16570   ins_cost(INSN_COST);
16571   format %{ "movi  $dst, $con\t# vector(16B)" %}
16572   ins_encode %{
16573     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
16574   %}
16575   ins_pipe(vmovi_reg_imm128);
16576 %}
16577 
16578 instruct replicate4S(vecD dst, iRegIorL2I src)
16579 %{
16580   predicate(n->as_Vector()->length() == 2 ||
16581             n->as_Vector()->length() == 4);
16582   match(Set dst (ReplicateS src));
16583   ins_cost(INSN_COST);
16584   format %{ "dup  $dst, $src\t# vector (4S)" %}
16585   ins_encode %{
16586     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
16587   %}
16588   ins_pipe(vdup_reg_reg64);
16589 %}
16590 
16591 instruct replicate8S(vecX dst, iRegIorL2I src)
16592 %{
16593   predicate(UseSVE == 0 && n->as_Vector()->length() == 8);
16594   match(Set dst (ReplicateS src));
16595   ins_cost(INSN_COST);
16596   format %{ "dup  $dst, $src\t# vector (8S)" %}
16597   ins_encode %{
16598     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
16599   %}
16600   ins_pipe(vdup_reg_reg128);
16601 %}
16602 
16603 instruct replicate4S_imm(vecD dst, immI con)
16604 %{
16605   predicate(n->as_Vector()->length() == 2 ||
16606             n->as_Vector()->length() == 4);
16607   match(Set dst (ReplicateS con));
16608   ins_cost(INSN_COST);
16609   format %{ "movi  $dst, $con\t# vector(4H)" %}
16610   ins_encode %{
16611     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
16612   %}
16613   ins_pipe(vmovi_reg_imm64);
16614 %}
16615 
16616 instruct replicate8S_imm(vecX dst, immI con)
16617 %{
16618   predicate(UseSVE == 0 && n->as_Vector()->length() == 8);
16619   match(Set dst (ReplicateS con));
16620   ins_cost(INSN_COST);
16621   format %{ "movi  $dst, $con\t# vector(8H)" %}
16622   ins_encode %{
16623     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
16624   %}
16625   ins_pipe(vmovi_reg_imm128);
16626 %}
16627 
16628 instruct replicate2I(vecD dst, iRegIorL2I src)
16629 %{
16630   predicate(n->as_Vector()->length() == 2);
16631   match(Set dst (ReplicateI src));
16632   ins_cost(INSN_COST);
16633   format %{ "dup  $dst, $src\t# vector (2I)" %}
16634   ins_encode %{
16635     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
16636   %}
16637   ins_pipe(vdup_reg_reg64);
16638 %}
16639 
16640 instruct replicate4I(vecX dst, iRegIorL2I src)
16641 %{
16642   predicate(UseSVE == 0 && n->as_Vector()->length() == 4);
16643   match(Set dst (ReplicateI src));
16644   ins_cost(INSN_COST);
16645   format %{ "dup  $dst, $src\t# vector (4I)" %}
16646   ins_encode %{
16647     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
16648   %}
16649   ins_pipe(vdup_reg_reg128);
16650 %}
16651 
16652 instruct replicate2I_imm(vecD dst, immI con)
16653 %{
16654   predicate(n->as_Vector()->length() == 2);
16655   match(Set dst (ReplicateI con));
16656   ins_cost(INSN_COST);
16657   format %{ "movi  $dst, $con\t# vector(2I)" %}
16658   ins_encode %{
16659     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
16660   %}
16661   ins_pipe(vmovi_reg_imm64);
16662 %}
16663 
16664 instruct replicate4I_imm(vecX dst, immI con)
16665 %{
16666   predicate(UseSVE == 0 && n->as_Vector()->length() == 4);
16667   match(Set dst (ReplicateI con));
16668   ins_cost(INSN_COST);
16669   format %{ "movi  $dst, $con\t# vector(4I)" %}
16670   ins_encode %{
16671     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
16672   %}
16673   ins_pipe(vmovi_reg_imm128);
16674 %}
16675 
16676 instruct replicate2L(vecX dst, iRegL src)
16677 %{
16678   predicate(UseSVE == 0 && n->as_Vector()->length() == 2);
16679   match(Set dst (ReplicateL src));
16680   ins_cost(INSN_COST);
16681   format %{ "dup  $dst, $src\t# vector (2L)" %}
16682   ins_encode %{
16683     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
16684   %}
16685   ins_pipe(vdup_reg_reg128);
16686 %}
16687 
16688 instruct replicate2L_zero(vecX dst, immI0 zero)
16689 %{
16690   predicate(UseSVE == 0 && n->as_Vector()->length() == 2);
16691   match(Set dst (ReplicateI zero));
16692   ins_cost(INSN_COST);
16693   format %{ "movi  $dst, $zero\t# vector(4I)" %}
16694   ins_encode %{
16695     __ eor(as_FloatRegister($dst$$reg), __ T16B,
16696            as_FloatRegister($dst$$reg),
16697            as_FloatRegister($dst$$reg));
16698   %}
16699   ins_pipe(vmovi_reg_imm128);
16700 %}
16701 
16702 instruct replicate2F(vecD dst, vRegF src)
16703 %{
16704   predicate(n->as_Vector()->length() == 2);
16705   match(Set dst (ReplicateF src));
16706   ins_cost(INSN_COST);
16707   format %{ "dup  $dst, $src\t# vector (2F)" %}
16708   ins_encode %{
16709     __ dup(as_FloatRegister($dst$$reg), __ T2S,
16710            as_FloatRegister($src$$reg));
16711   %}
16712   ins_pipe(vdup_reg_freg64);
16713 %}
16714 
16715 instruct replicate4F(vecX dst, vRegF src)
16716 %{
16717   predicate(UseSVE == 0 && n->as_Vector()->length() == 4);
16718   match(Set dst (ReplicateF src));
16719   ins_cost(INSN_COST);
16720   format %{ "dup  $dst, $src\t# vector (4F)" %}
16721   ins_encode %{
16722     __ dup(as_FloatRegister($dst$$reg), __ T4S,
16723            as_FloatRegister($src$$reg));
16724   %}
16725   ins_pipe(vdup_reg_freg128);
16726 %}
16727 
16728 instruct replicate2D(vecX dst, vRegD src)
16729 %{
16730   predicate(UseSVE == 0 && n->as_Vector()->length() == 2);
16731   match(Set dst (ReplicateD src));
16732   ins_cost(INSN_COST);
16733   format %{ "dup  $dst, $src\t# vector (2D)" %}
16734   ins_encode %{
16735     __ dup(as_FloatRegister($dst$$reg), __ T2D,
16736            as_FloatRegister($src$$reg));
16737   %}
16738   ins_pipe(vdup_reg_dreg128);
16739 %}
16740 
16741 // ====================REDUCTION ARITHMETIC====================================
16742 
16743 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I isrc, vecD vsrc, iRegINoSp tmp, iRegINoSp tmp2)
16744 %{
16745   match(Set dst (AddReductionVI isrc vsrc));
16746   ins_cost(INSN_COST);
16747   effect(TEMP tmp, TEMP tmp2);
16748   format %{ "umov  $tmp, $vsrc, S, 0\n\t"
16749             "umov  $tmp2, $vsrc, S, 1\n\t"
16750             "addw  $tmp, $isrc, $tmp\n\t"
16751             "addw  $dst, $tmp, $tmp2\t# add reduction2I"
16752   %}
16753   ins_encode %{
16754     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ S, 0);
16755     __ umov($tmp2$$Register, as_FloatRegister($vsrc$$reg), __ S, 1);
16756     __ addw($tmp$$Register, $isrc$$Register, $tmp$$Register);
16757     __ addw($dst$$Register, $tmp$$Register, $tmp2$$Register);
16758   %}
16759   ins_pipe(pipe_class_default);
16760 %}
16761 
16762 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I isrc, vecX vsrc, vecX vtmp, iRegINoSp itmp)
16763 %{
16764   match(Set dst (AddReductionVI isrc vsrc));
16765   ins_cost(INSN_COST);
16766   effect(TEMP vtmp, TEMP itmp);
16767   format %{ "addv  $vtmp, T4S, $vsrc\n\t"
16768             "umov  $itmp, $vtmp, S, 0\n\t"
16769             "addw  $dst, $itmp, $isrc\t# add reduction4I"
16770   %}
16771   ins_encode %{
16772     __ addv(as_FloatRegister($vtmp$$reg), __ T4S,
16773             as_FloatRegister($vsrc$$reg));
16774     __ umov($itmp$$Register, as_FloatRegister($vtmp$$reg), __ S, 0);
16775     __ addw($dst$$Register, $itmp$$Register, $isrc$$Register);
16776   %}
16777   ins_pipe(pipe_class_default);
16778 %}
16779 
16780 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I isrc, vecD vsrc, iRegINoSp tmp)
16781 %{
16782   match(Set dst (MulReductionVI isrc vsrc));
16783   ins_cost(INSN_COST);
16784   effect(TEMP tmp, TEMP dst);
16785   format %{ "umov  $tmp, $vsrc, S, 0\n\t"
16786             "mul   $dst, $tmp, $isrc\n\t"
16787             "umov  $tmp, $vsrc, S, 1\n\t"
16788             "mul   $dst, $tmp, $dst\t# mul reduction2I"
16789   %}
16790   ins_encode %{
16791     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ S, 0);
16792     __ mul($dst$$Register, $tmp$$Register, $isrc$$Register);
16793     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ S, 1);
16794     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
16795   %}
16796   ins_pipe(pipe_class_default);
16797 %}
16798 
16799 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I isrc, vecX vsrc, vecX vtmp, iRegINoSp itmp)
16800 %{
16801   match(Set dst (MulReductionVI isrc vsrc));
16802   ins_cost(INSN_COST);
16803   effect(TEMP vtmp, TEMP itmp, TEMP dst);
16804   format %{ "ins   $vtmp, D, $vsrc, 0, 1\n\t"
16805             "mulv  $vtmp, T2S, $vtmp, $vsrc\n\t"
16806             "umov  $itmp, $vtmp, S, 0\n\t"
16807             "mul   $dst, $itmp, $isrc\n\t"
16808             "umov  $itmp, $vtmp, S, 1\n\t"
16809             "mul   $dst, $itmp, $dst\t# mul reduction4I"
16810   %}
16811   ins_encode %{
16812     __ ins(as_FloatRegister($vtmp$$reg), __ D,
16813            as_FloatRegister($vsrc$$reg), 0, 1);
16814     __ mulv(as_FloatRegister($vtmp$$reg), __ T2S,
16815             as_FloatRegister($vtmp$$reg), as_FloatRegister($vsrc$$reg));
16816     __ umov($itmp$$Register, as_FloatRegister($vtmp$$reg), __ S, 0);
16817     __ mul($dst$$Register, $itmp$$Register, $isrc$$Register);
16818     __ umov($itmp$$Register, as_FloatRegister($vtmp$$reg), __ S, 1);
16819     __ mul($dst$$Register, $itmp$$Register, $dst$$Register);
16820   %}
16821   ins_pipe(pipe_class_default);
16822 %}
16823 
16824 instruct reduce_add2F(vRegF dst, vRegF fsrc, vecD vsrc, vecD tmp)
16825 %{
16826   match(Set dst (AddReductionVF fsrc vsrc));
16827   ins_cost(INSN_COST);
16828   effect(TEMP tmp, TEMP dst);
16829   format %{ "fadds $dst, $fsrc, $vsrc\n\t"
16830             "ins   $tmp, S, $vsrc, 0, 1\n\t"
16831             "fadds $dst, $dst, $tmp\t# add reduction2F"
16832   %}
16833   ins_encode %{
16834     __ fadds(as_FloatRegister($dst$$reg),
16835              as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
16836     __ ins(as_FloatRegister($tmp$$reg), __ S,
16837            as_FloatRegister($vsrc$$reg), 0, 1);
16838     __ fadds(as_FloatRegister($dst$$reg),
16839              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16840   %}
16841   ins_pipe(pipe_class_default);
16842 %}
16843 
16844 instruct reduce_add4F(vRegF dst, vRegF fsrc, vecX vsrc, vecX tmp)
16845 %{
16846   match(Set dst (AddReductionVF fsrc vsrc));
16847   ins_cost(INSN_COST);
16848   effect(TEMP tmp, TEMP dst);
16849   format %{ "fadds $dst, $fsrc, $vsrc\n\t"
16850             "ins   $tmp, S, $vsrc, 0, 1\n\t"
16851             "fadds $dst, $dst, $tmp\n\t"
16852             "ins   $tmp, S, $vsrc, 0, 2\n\t"
16853             "fadds $dst, $dst, $tmp\n\t"
16854             "ins   $tmp, S, $vsrc, 0, 3\n\t"
16855             "fadds $dst, $dst, $tmp\t# add reduction4F"
16856   %}
16857   ins_encode %{
16858     __ fadds(as_FloatRegister($dst$$reg),
16859              as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
16860     __ ins(as_FloatRegister($tmp$$reg), __ S,
16861            as_FloatRegister($vsrc$$reg), 0, 1);
16862     __ fadds(as_FloatRegister($dst$$reg),
16863              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16864     __ ins(as_FloatRegister($tmp$$reg), __ S,
16865            as_FloatRegister($vsrc$$reg), 0, 2);
16866     __ fadds(as_FloatRegister($dst$$reg),
16867              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16868     __ ins(as_FloatRegister($tmp$$reg), __ S,
16869            as_FloatRegister($vsrc$$reg), 0, 3);
16870     __ fadds(as_FloatRegister($dst$$reg),
16871              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16872   %}
16873   ins_pipe(pipe_class_default);
16874 %}
16875 
16876 instruct reduce_mul2F(vRegF dst, vRegF fsrc, vecD vsrc, vecD tmp)
16877 %{
16878   match(Set dst (MulReductionVF fsrc vsrc));
16879   ins_cost(INSN_COST);
16880   effect(TEMP tmp, TEMP dst);
16881   format %{ "fmuls $dst, $fsrc, $vsrc\n\t"
16882             "ins   $tmp, S, $vsrc, 0, 1\n\t"
16883             "fmuls $dst, $dst, $tmp\t# mul reduction2F"
16884   %}
16885   ins_encode %{
16886     __ fmuls(as_FloatRegister($dst$$reg),
16887              as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
16888     __ ins(as_FloatRegister($tmp$$reg), __ S,
16889            as_FloatRegister($vsrc$$reg), 0, 1);
16890     __ fmuls(as_FloatRegister($dst$$reg),
16891              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16892   %}
16893   ins_pipe(pipe_class_default);
16894 %}
16895 
16896 instruct reduce_mul4F(vRegF dst, vRegF fsrc, vecX vsrc, vecX tmp)
16897 %{
16898   match(Set dst (MulReductionVF fsrc vsrc));
16899   ins_cost(INSN_COST);
16900   effect(TEMP tmp, TEMP dst);
16901   format %{ "fmuls $dst, $fsrc, $vsrc\n\t"
16902             "ins   $tmp, S, $vsrc, 0, 1\n\t"
16903             "fmuls $dst, $dst, $tmp\n\t"
16904             "ins   $tmp, S, $vsrc, 0, 2\n\t"
16905             "fmuls $dst, $dst, $tmp\n\t"
16906             "ins   $tmp, S, $vsrc, 0, 3\n\t"
16907             "fmuls $dst, $dst, $tmp\t# mul reduction4F"
16908   %}
16909   ins_encode %{
16910     __ fmuls(as_FloatRegister($dst$$reg),
16911              as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
16912     __ ins(as_FloatRegister($tmp$$reg), __ S,
16913            as_FloatRegister($vsrc$$reg), 0, 1);
16914     __ fmuls(as_FloatRegister($dst$$reg),
16915              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16916     __ ins(as_FloatRegister($tmp$$reg), __ S,
16917            as_FloatRegister($vsrc$$reg), 0, 2);
16918     __ fmuls(as_FloatRegister($dst$$reg),
16919              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16920     __ ins(as_FloatRegister($tmp$$reg), __ S,
16921            as_FloatRegister($vsrc$$reg), 0, 3);
16922     __ fmuls(as_FloatRegister($dst$$reg),
16923              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16924   %}
16925   ins_pipe(pipe_class_default);
16926 %}
16927 
16928 instruct reduce_add2D(vRegD dst, vRegD dsrc, vecX vsrc, vecX tmp)
16929 %{
16930   match(Set dst (AddReductionVD dsrc vsrc));
16931   ins_cost(INSN_COST);
16932   effect(TEMP tmp, TEMP dst);
16933   format %{ "faddd $dst, $dsrc, $vsrc\n\t"
16934             "ins   $tmp, D, $vsrc, 0, 1\n\t"
16935             "faddd $dst, $dst, $tmp\t# add reduction2D"
16936   %}
16937   ins_encode %{
16938     __ faddd(as_FloatRegister($dst$$reg),
16939              as_FloatRegister($dsrc$$reg), as_FloatRegister($vsrc$$reg));
16940     __ ins(as_FloatRegister($tmp$$reg), __ D,
16941            as_FloatRegister($vsrc$$reg), 0, 1);
16942     __ faddd(as_FloatRegister($dst$$reg),
16943              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16944   %}
16945   ins_pipe(pipe_class_default);
16946 %}
16947 
16948 instruct reduce_mul2D(vRegD dst, vRegD dsrc, vecX vsrc, vecX tmp)
16949 %{
16950   match(Set dst (MulReductionVD dsrc vsrc));
16951   ins_cost(INSN_COST);
16952   effect(TEMP tmp, TEMP dst);
16953   format %{ "fmuld $dst, $dsrc, $vsrc\n\t"
16954             "ins   $tmp, D, $vsrc, 0, 1\n\t"
16955             "fmuld $dst, $dst, $tmp\t# mul reduction2D"
16956   %}
16957   ins_encode %{
16958     __ fmuld(as_FloatRegister($dst$$reg),
16959              as_FloatRegister($dsrc$$reg), as_FloatRegister($vsrc$$reg));
16960     __ ins(as_FloatRegister($tmp$$reg), __ D,
16961            as_FloatRegister($vsrc$$reg), 0, 1);
16962     __ fmuld(as_FloatRegister($dst$$reg),
16963              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16964   %}
16965   ins_pipe(pipe_class_default);
16966 %}
16967 
16968 instruct reduce_max2F(vRegF dst, vRegF fsrc, vecD vsrc, vecD tmp) %{
16969   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16970   match(Set dst (MaxReductionV fsrc vsrc));
16971   ins_cost(INSN_COST);
16972   effect(TEMP_DEF dst, TEMP tmp);
16973   format %{ "fmaxs $dst, $fsrc, $vsrc\n\t"
16974             "ins   $tmp, S, $vsrc, 0, 1\n\t"
16975             "fmaxs $dst, $dst, $tmp\t# max reduction2F" %}
16976   ins_encode %{
16977     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
16978     __ ins(as_FloatRegister($tmp$$reg), __ S, as_FloatRegister($vsrc$$reg), 0, 1);
16979     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16980   %}
16981   ins_pipe(pipe_class_default);
16982 %}
16983 
16984 instruct reduce_max4F(vRegF dst, vRegF fsrc, vecX vsrc) %{
16985   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16986   match(Set dst (MaxReductionV fsrc vsrc));
16987   ins_cost(INSN_COST);
16988   effect(TEMP_DEF dst);
16989   format %{ "fmaxv $dst, T4S, $vsrc\n\t"
16990             "fmaxs $dst, $dst, $fsrc\t# max reduction4F" %}
16991   ins_encode %{
16992     __ fmaxv(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($vsrc$$reg));
16993     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($fsrc$$reg));
16994   %}
16995   ins_pipe(pipe_class_default);
16996 %}
16997 
16998 instruct reduce_max2D(vRegD dst, vRegD dsrc, vecX vsrc, vecX tmp) %{
16999   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
17000   match(Set dst (MaxReductionV dsrc vsrc));
17001   ins_cost(INSN_COST);
17002   effect(TEMP_DEF dst, TEMP tmp);
17003   format %{ "fmaxd $dst, $dsrc, $vsrc\n\t"
17004             "ins   $tmp, D, $vsrc, 0, 1\n\t"
17005             "fmaxd $dst, $dst, $tmp\t# max reduction2D" %}
17006   ins_encode %{
17007     __ fmaxd(as_FloatRegister($dst$$reg), as_FloatRegister($dsrc$$reg), as_FloatRegister($vsrc$$reg));
17008     __ ins(as_FloatRegister($tmp$$reg), __ D, as_FloatRegister($vsrc$$reg), 0, 1);
17009     __ fmaxd(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17010   %}
17011   ins_pipe(pipe_class_default);
17012 %}
17013 
17014 instruct reduce_min2F(vRegF dst, vRegF fsrc, vecD vsrc, vecD tmp) %{
17015   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
17016   match(Set dst (MinReductionV fsrc vsrc));
17017   ins_cost(INSN_COST);
17018   effect(TEMP_DEF dst, TEMP tmp);
17019   format %{ "fmins $dst, $fsrc, $vsrc\n\t"
17020             "ins   $tmp, S, $vsrc, 0, 1\n\t"
17021             "fmins $dst, $dst, $tmp\t# min reduction2F" %}
17022   ins_encode %{
17023     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
17024     __ ins(as_FloatRegister($tmp$$reg), __ S, as_FloatRegister($vsrc$$reg), 0, 1);
17025     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17026   %}
17027   ins_pipe(pipe_class_default);
17028 %}
17029 
17030 instruct reduce_min4F(vRegF dst, vRegF fsrc, vecX vsrc) %{
17031   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
17032   match(Set dst (MinReductionV fsrc vsrc));
17033   ins_cost(INSN_COST);
17034   effect(TEMP_DEF dst);
17035   format %{ "fminv $dst, T4S, $vsrc\n\t"
17036             "fmins $dst, $dst, $fsrc\t# min reduction4F" %}
17037   ins_encode %{
17038     __ fminv(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($vsrc$$reg));
17039     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($fsrc$$reg));
17040   %}
17041   ins_pipe(pipe_class_default);
17042 %}
17043 
17044 instruct reduce_min2D(vRegD dst, vRegD dsrc, vecX vsrc, vecX tmp) %{
17045   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
17046   match(Set dst (MinReductionV dsrc vsrc));
17047   ins_cost(INSN_COST);
17048   effect(TEMP_DEF dst, TEMP tmp);
17049   format %{ "fmind $dst, $dsrc, $vsrc\n\t"
17050             "ins   $tmp, D, $vsrc, 0, 1\n\t"
17051             "fmind $dst, $dst, $tmp\t# min reduction2D" %}
17052   ins_encode %{
17053     __ fmind(as_FloatRegister($dst$$reg), as_FloatRegister($dsrc$$reg), as_FloatRegister($vsrc$$reg));
17054     __ ins(as_FloatRegister($tmp$$reg), __ D, as_FloatRegister($vsrc$$reg), 0, 1);
17055     __ fmind(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17056   %}
17057   ins_pipe(pipe_class_default);
17058 %}
17059 
17060 // ====================VECTOR ARITHMETIC=======================================
17061 
17062 // --------------------------------- ADD --------------------------------------
17063 
17064 instruct vadd8B(vecD dst, vecD src1, vecD src2)
17065 %{
17066   predicate(n->as_Vector()->length() == 4 ||
17067             n->as_Vector()->length() == 8);
17068   match(Set dst (AddVB src1 src2));
17069   ins_cost(INSN_COST);
17070   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
17071   ins_encode %{
17072     __ addv(as_FloatRegister($dst$$reg), __ T8B,
17073             as_FloatRegister($src1$$reg),
17074             as_FloatRegister($src2$$reg));
17075   %}
17076   ins_pipe(vdop64);
17077 %}
17078 
17079 instruct vadd16B(vecX dst, vecX src1, vecX src2)
17080 %{
17081   predicate(n->as_Vector()->length() == 16);
17082   match(Set dst (AddVB src1 src2));
17083   ins_cost(INSN_COST);
17084   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
17085   ins_encode %{
17086     __ addv(as_FloatRegister($dst$$reg), __ T16B,
17087             as_FloatRegister($src1$$reg),
17088             as_FloatRegister($src2$$reg));
17089   %}
17090   ins_pipe(vdop128);
17091 %}
17092 
17093 instruct vadd4S(vecD dst, vecD src1, vecD src2)
17094 %{
17095   predicate(n->as_Vector()->length() == 2 ||
17096             n->as_Vector()->length() == 4);
17097   match(Set dst (AddVS src1 src2));
17098   ins_cost(INSN_COST);
17099   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
17100   ins_encode %{
17101     __ addv(as_FloatRegister($dst$$reg), __ T4H,
17102             as_FloatRegister($src1$$reg),
17103             as_FloatRegister($src2$$reg));
17104   %}
17105   ins_pipe(vdop64);
17106 %}
17107 
17108 instruct vadd8S(vecX dst, vecX src1, vecX src2)
17109 %{
17110   predicate(n->as_Vector()->length() == 8);
17111   match(Set dst (AddVS src1 src2));
17112   ins_cost(INSN_COST);
17113   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
17114   ins_encode %{
17115     __ addv(as_FloatRegister($dst$$reg), __ T8H,
17116             as_FloatRegister($src1$$reg),
17117             as_FloatRegister($src2$$reg));
17118   %}
17119   ins_pipe(vdop128);
17120 %}
17121 
17122 instruct vadd2I(vecD dst, vecD src1, vecD src2)
17123 %{
17124   predicate(n->as_Vector()->length() == 2);
17125   match(Set dst (AddVI src1 src2));
17126   ins_cost(INSN_COST);
17127   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
17128   ins_encode %{
17129     __ addv(as_FloatRegister($dst$$reg), __ T2S,
17130             as_FloatRegister($src1$$reg),
17131             as_FloatRegister($src2$$reg));
17132   %}
17133   ins_pipe(vdop64);
17134 %}
17135 
17136 instruct vadd4I(vecX dst, vecX src1, vecX src2)
17137 %{
17138   predicate(n->as_Vector()->length() == 4);
17139   match(Set dst (AddVI src1 src2));
17140   ins_cost(INSN_COST);
17141   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
17142   ins_encode %{
17143     __ addv(as_FloatRegister($dst$$reg), __ T4S,
17144             as_FloatRegister($src1$$reg),
17145             as_FloatRegister($src2$$reg));
17146   %}
17147   ins_pipe(vdop128);
17148 %}
17149 
17150 instruct vadd2L(vecX dst, vecX src1, vecX src2)
17151 %{
17152   predicate(n->as_Vector()->length() == 2);
17153   match(Set dst (AddVL src1 src2));
17154   ins_cost(INSN_COST);
17155   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
17156   ins_encode %{
17157     __ addv(as_FloatRegister($dst$$reg), __ T2D,
17158             as_FloatRegister($src1$$reg),
17159             as_FloatRegister($src2$$reg));
17160   %}
17161   ins_pipe(vdop128);
17162 %}
17163 
17164 instruct vadd2F(vecD dst, vecD src1, vecD src2)
17165 %{
17166   predicate(n->as_Vector()->length() == 2);
17167   match(Set dst (AddVF src1 src2));
17168   ins_cost(INSN_COST);
17169   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
17170   ins_encode %{
17171     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
17172             as_FloatRegister($src1$$reg),
17173             as_FloatRegister($src2$$reg));
17174   %}
17175   ins_pipe(vdop_fp64);
17176 %}
17177 
17178 instruct vadd4F(vecX dst, vecX src1, vecX src2)
17179 %{
17180   predicate(n->as_Vector()->length() == 4);
17181   match(Set dst (AddVF src1 src2));
17182   ins_cost(INSN_COST);
17183   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
17184   ins_encode %{
17185     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
17186             as_FloatRegister($src1$$reg),
17187             as_FloatRegister($src2$$reg));
17188   %}
17189   ins_pipe(vdop_fp128);
17190 %}
17191 
17192 instruct vadd2D(vecX dst, vecX src1, vecX src2)
17193 %{
17194   match(Set dst (AddVD src1 src2));
17195   ins_cost(INSN_COST);
17196   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
17197   ins_encode %{
17198     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
17199             as_FloatRegister($src1$$reg),
17200             as_FloatRegister($src2$$reg));
17201   %}
17202   ins_pipe(vdop_fp128);
17203 %}
17204 
17205 // --------------------------------- SUB --------------------------------------
17206 
17207 instruct vsub8B(vecD dst, vecD src1, vecD src2)
17208 %{
17209   predicate(n->as_Vector()->length() == 4 ||
17210             n->as_Vector()->length() == 8);
17211   match(Set dst (SubVB src1 src2));
17212   ins_cost(INSN_COST);
17213   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
17214   ins_encode %{
17215     __ subv(as_FloatRegister($dst$$reg), __ T8B,
17216             as_FloatRegister($src1$$reg),
17217             as_FloatRegister($src2$$reg));
17218   %}
17219   ins_pipe(vdop64);
17220 %}
17221 
17222 instruct vsub16B(vecX dst, vecX src1, vecX src2)
17223 %{
17224   predicate(n->as_Vector()->length() == 16);
17225   match(Set dst (SubVB src1 src2));
17226   ins_cost(INSN_COST);
17227   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
17228   ins_encode %{
17229     __ subv(as_FloatRegister($dst$$reg), __ T16B,
17230             as_FloatRegister($src1$$reg),
17231             as_FloatRegister($src2$$reg));
17232   %}
17233   ins_pipe(vdop128);
17234 %}
17235 
17236 instruct vsub4S(vecD dst, vecD src1, vecD src2)
17237 %{
17238   predicate(n->as_Vector()->length() == 2 ||
17239             n->as_Vector()->length() == 4);
17240   match(Set dst (SubVS src1 src2));
17241   ins_cost(INSN_COST);
17242   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
17243   ins_encode %{
17244     __ subv(as_FloatRegister($dst$$reg), __ T4H,
17245             as_FloatRegister($src1$$reg),
17246             as_FloatRegister($src2$$reg));
17247   %}
17248   ins_pipe(vdop64);
17249 %}
17250 
17251 instruct vsub8S(vecX dst, vecX src1, vecX src2)
17252 %{
17253   predicate(n->as_Vector()->length() == 8);
17254   match(Set dst (SubVS src1 src2));
17255   ins_cost(INSN_COST);
17256   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
17257   ins_encode %{
17258     __ subv(as_FloatRegister($dst$$reg), __ T8H,
17259             as_FloatRegister($src1$$reg),
17260             as_FloatRegister($src2$$reg));
17261   %}
17262   ins_pipe(vdop128);
17263 %}
17264 
17265 instruct vsub2I(vecD dst, vecD src1, vecD src2)
17266 %{
17267   predicate(n->as_Vector()->length() == 2);
17268   match(Set dst (SubVI src1 src2));
17269   ins_cost(INSN_COST);
17270   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
17271   ins_encode %{
17272     __ subv(as_FloatRegister($dst$$reg), __ T2S,
17273             as_FloatRegister($src1$$reg),
17274             as_FloatRegister($src2$$reg));
17275   %}
17276   ins_pipe(vdop64);
17277 %}
17278 
17279 instruct vsub4I(vecX dst, vecX src1, vecX src2)
17280 %{
17281   predicate(n->as_Vector()->length() == 4);
17282   match(Set dst (SubVI src1 src2));
17283   ins_cost(INSN_COST);
17284   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
17285   ins_encode %{
17286     __ subv(as_FloatRegister($dst$$reg), __ T4S,
17287             as_FloatRegister($src1$$reg),
17288             as_FloatRegister($src2$$reg));
17289   %}
17290   ins_pipe(vdop128);
17291 %}
17292 
17293 instruct vsub2L(vecX dst, vecX src1, vecX src2)
17294 %{
17295   predicate(n->as_Vector()->length() == 2);
17296   match(Set dst (SubVL src1 src2));
17297   ins_cost(INSN_COST);
17298   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
17299   ins_encode %{
17300     __ subv(as_FloatRegister($dst$$reg), __ T2D,
17301             as_FloatRegister($src1$$reg),
17302             as_FloatRegister($src2$$reg));
17303   %}
17304   ins_pipe(vdop128);
17305 %}
17306 
17307 instruct vsub2F(vecD dst, vecD src1, vecD src2)
17308 %{
17309   predicate(n->as_Vector()->length() == 2);
17310   match(Set dst (SubVF src1 src2));
17311   ins_cost(INSN_COST);
17312   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
17313   ins_encode %{
17314     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
17315             as_FloatRegister($src1$$reg),
17316             as_FloatRegister($src2$$reg));
17317   %}
17318   ins_pipe(vdop_fp64);
17319 %}
17320 
17321 instruct vsub4F(vecX dst, vecX src1, vecX src2)
17322 %{
17323   predicate(n->as_Vector()->length() == 4);
17324   match(Set dst (SubVF src1 src2));
17325   ins_cost(INSN_COST);
17326   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
17327   ins_encode %{
17328     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
17329             as_FloatRegister($src1$$reg),
17330             as_FloatRegister($src2$$reg));
17331   %}
17332   ins_pipe(vdop_fp128);
17333 %}
17334 
17335 instruct vsub2D(vecX dst, vecX src1, vecX src2)
17336 %{
17337   predicate(n->as_Vector()->length() == 2);
17338   match(Set dst (SubVD src1 src2));
17339   ins_cost(INSN_COST);
17340   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
17341   ins_encode %{
17342     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
17343             as_FloatRegister($src1$$reg),
17344             as_FloatRegister($src2$$reg));
17345   %}
17346   ins_pipe(vdop_fp128);
17347 %}
17348 
17349 // --------------------------------- MUL --------------------------------------
17350 
17351 instruct vmul8B(vecD dst, vecD src1, vecD src2)
17352 %{
17353   predicate(n->as_Vector()->length() == 4 ||
17354             n->as_Vector()->length() == 8);
17355   match(Set dst (MulVB src1 src2));
17356   ins_cost(INSN_COST);
17357   format %{ "mulv  $dst,$src1,$src2\t# vector (8B)" %}
17358   ins_encode %{
17359     __ mulv(as_FloatRegister($dst$$reg), __ T8B,
17360             as_FloatRegister($src1$$reg),
17361             as_FloatRegister($src2$$reg));
17362   %}
17363   ins_pipe(vmul64);
17364 %}
17365 
17366 instruct vmul16B(vecX dst, vecX src1, vecX src2)
17367 %{
17368   predicate(n->as_Vector()->length() == 16);
17369   match(Set dst (MulVB src1 src2));
17370   ins_cost(INSN_COST);
17371   format %{ "mulv  $dst,$src1,$src2\t# vector (16B)" %}
17372   ins_encode %{
17373     __ mulv(as_FloatRegister($dst$$reg), __ T16B,
17374             as_FloatRegister($src1$$reg),
17375             as_FloatRegister($src2$$reg));
17376   %}
17377   ins_pipe(vmul128);
17378 %}
17379 
17380 instruct vmul4S(vecD dst, vecD src1, vecD src2)
17381 %{
17382   predicate(n->as_Vector()->length() == 2 ||
17383             n->as_Vector()->length() == 4);
17384   match(Set dst (MulVS src1 src2));
17385   ins_cost(INSN_COST);
17386   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
17387   ins_encode %{
17388     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
17389             as_FloatRegister($src1$$reg),
17390             as_FloatRegister($src2$$reg));
17391   %}
17392   ins_pipe(vmul64);
17393 %}
17394 
17395 instruct vmul8S(vecX dst, vecX src1, vecX src2)
17396 %{
17397   predicate(n->as_Vector()->length() == 8);
17398   match(Set dst (MulVS src1 src2));
17399   ins_cost(INSN_COST);
17400   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
17401   ins_encode %{
17402     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
17403             as_FloatRegister($src1$$reg),
17404             as_FloatRegister($src2$$reg));
17405   %}
17406   ins_pipe(vmul128);
17407 %}
17408 
17409 instruct vmul2I(vecD dst, vecD src1, vecD src2)
17410 %{
17411   predicate(n->as_Vector()->length() == 2);
17412   match(Set dst (MulVI src1 src2));
17413   ins_cost(INSN_COST);
17414   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
17415   ins_encode %{
17416     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
17417             as_FloatRegister($src1$$reg),
17418             as_FloatRegister($src2$$reg));
17419   %}
17420   ins_pipe(vmul64);
17421 %}
17422 
17423 instruct vmul4I(vecX dst, vecX src1, vecX src2)
17424 %{
17425   predicate(n->as_Vector()->length() == 4);
17426   match(Set dst (MulVI src1 src2));
17427   ins_cost(INSN_COST);
17428   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
17429   ins_encode %{
17430     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
17431             as_FloatRegister($src1$$reg),
17432             as_FloatRegister($src2$$reg));
17433   %}
17434   ins_pipe(vmul128);
17435 %}
17436 
17437 instruct vmul2F(vecD dst, vecD src1, vecD src2)
17438 %{
17439   predicate(n->as_Vector()->length() == 2);
17440   match(Set dst (MulVF src1 src2));
17441   ins_cost(INSN_COST);
17442   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
17443   ins_encode %{
17444     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
17445             as_FloatRegister($src1$$reg),
17446             as_FloatRegister($src2$$reg));
17447   %}
17448   ins_pipe(vmuldiv_fp64);
17449 %}
17450 
17451 instruct vmul4F(vecX dst, vecX src1, vecX src2)
17452 %{
17453   predicate(n->as_Vector()->length() == 4);
17454   match(Set dst (MulVF src1 src2));
17455   ins_cost(INSN_COST);
17456   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
17457   ins_encode %{
17458     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
17459             as_FloatRegister($src1$$reg),
17460             as_FloatRegister($src2$$reg));
17461   %}
17462   ins_pipe(vmuldiv_fp128);
17463 %}
17464 
17465 instruct vmul2D(vecX dst, vecX src1, vecX src2)
17466 %{
17467   predicate(n->as_Vector()->length() == 2);
17468   match(Set dst (MulVD src1 src2));
17469   ins_cost(INSN_COST);
17470   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
17471   ins_encode %{
17472     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
17473             as_FloatRegister($src1$$reg),
17474             as_FloatRegister($src2$$reg));
17475   %}
17476   ins_pipe(vmuldiv_fp128);
17477 %}
17478 
17479 // --------------------------------- MLA --------------------------------------
17480 
17481 instruct vmla4S(vecD dst, vecD src1, vecD src2)
17482 %{
17483   predicate(n->as_Vector()->length() == 2 ||
17484             n->as_Vector()->length() == 4);
17485   match(Set dst (AddVS dst (MulVS src1 src2)));
17486   ins_cost(INSN_COST);
17487   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
17488   ins_encode %{
17489     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
17490             as_FloatRegister($src1$$reg),
17491             as_FloatRegister($src2$$reg));
17492   %}
17493   ins_pipe(vmla64);
17494 %}
17495 
17496 instruct vmla8S(vecX dst, vecX src1, vecX src2)
17497 %{
17498   predicate(n->as_Vector()->length() == 8);
17499   match(Set dst (AddVS dst (MulVS src1 src2)));
17500   ins_cost(INSN_COST);
17501   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
17502   ins_encode %{
17503     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
17504             as_FloatRegister($src1$$reg),
17505             as_FloatRegister($src2$$reg));
17506   %}
17507   ins_pipe(vmla128);
17508 %}
17509 
17510 instruct vmla2I(vecD dst, vecD src1, vecD src2)
17511 %{
17512   predicate(n->as_Vector()->length() == 2);
17513   match(Set dst (AddVI dst (MulVI src1 src2)));
17514   ins_cost(INSN_COST);
17515   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
17516   ins_encode %{
17517     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
17518             as_FloatRegister($src1$$reg),
17519             as_FloatRegister($src2$$reg));
17520   %}
17521   ins_pipe(vmla64);
17522 %}
17523 
17524 instruct vmla4I(vecX dst, vecX src1, vecX src2)
17525 %{
17526   predicate(n->as_Vector()->length() == 4);
17527   match(Set dst (AddVI dst (MulVI src1 src2)));
17528   ins_cost(INSN_COST);
17529   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
17530   ins_encode %{
17531     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
17532             as_FloatRegister($src1$$reg),
17533             as_FloatRegister($src2$$reg));
17534   %}
17535   ins_pipe(vmla128);
17536 %}
17537 
17538 // dst + src1 * src2
17539 instruct vmla2F(vecD dst, vecD src1, vecD src2) %{
17540   predicate(UseFMA && n->as_Vector()->length() == 2);
17541   match(Set dst (FmaVF  dst (Binary src1 src2)));
17542   format %{ "fmla  $dst,$src1,$src2\t# vector (2S)" %}
17543   ins_cost(INSN_COST);
17544   ins_encode %{
17545     __ fmla(as_FloatRegister($dst$$reg), __ T2S,
17546             as_FloatRegister($src1$$reg),
17547             as_FloatRegister($src2$$reg));
17548   %}
17549   ins_pipe(vmuldiv_fp64);
17550 %}
17551 
17552 // dst + src1 * src2
17553 instruct vmla4F(vecX dst, vecX src1, vecX src2) %{
17554   predicate(UseFMA && n->as_Vector()->length() == 4);
17555   match(Set dst (FmaVF  dst (Binary src1 src2)));
17556   format %{ "fmla  $dst,$src1,$src2\t# vector (4S)" %}
17557   ins_cost(INSN_COST);
17558   ins_encode %{
17559     __ fmla(as_FloatRegister($dst$$reg), __ T4S,
17560             as_FloatRegister($src1$$reg),
17561             as_FloatRegister($src2$$reg));
17562   %}
17563   ins_pipe(vmuldiv_fp128);
17564 %}
17565 
17566 // dst + src1 * src2
17567 instruct vmla2D(vecX dst, vecX src1, vecX src2) %{
17568   predicate(UseFMA && n->as_Vector()->length() == 2);
17569   match(Set dst (FmaVD  dst (Binary src1 src2)));
17570   format %{ "fmla  $dst,$src1,$src2\t# vector (2D)" %}
17571   ins_cost(INSN_COST);
17572   ins_encode %{
17573     __ fmla(as_FloatRegister($dst$$reg), __ T2D,
17574             as_FloatRegister($src1$$reg),
17575             as_FloatRegister($src2$$reg));
17576   %}
17577   ins_pipe(vmuldiv_fp128);
17578 %}
17579 
17580 // --------------------------------- MLS --------------------------------------
17581 
17582 instruct vmls4S(vecD dst, vecD src1, vecD src2)
17583 %{
17584   predicate(n->as_Vector()->length() == 2 ||
17585             n->as_Vector()->length() == 4);
17586   match(Set dst (SubVS dst (MulVS src1 src2)));
17587   ins_cost(INSN_COST);
17588   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
17589   ins_encode %{
17590     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
17591             as_FloatRegister($src1$$reg),
17592             as_FloatRegister($src2$$reg));
17593   %}
17594   ins_pipe(vmla64);
17595 %}
17596 
17597 instruct vmls8S(vecX dst, vecX src1, vecX src2)
17598 %{
17599   predicate(n->as_Vector()->length() == 8);
17600   match(Set dst (SubVS dst (MulVS src1 src2)));
17601   ins_cost(INSN_COST);
17602   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
17603   ins_encode %{
17604     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
17605             as_FloatRegister($src1$$reg),
17606             as_FloatRegister($src2$$reg));
17607   %}
17608   ins_pipe(vmla128);
17609 %}
17610 
17611 instruct vmls2I(vecD dst, vecD src1, vecD src2)
17612 %{
17613   predicate(n->as_Vector()->length() == 2);
17614   match(Set dst (SubVI dst (MulVI src1 src2)));
17615   ins_cost(INSN_COST);
17616   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
17617   ins_encode %{
17618     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
17619             as_FloatRegister($src1$$reg),
17620             as_FloatRegister($src2$$reg));
17621   %}
17622   ins_pipe(vmla64);
17623 %}
17624 
17625 instruct vmls4I(vecX dst, vecX src1, vecX src2)
17626 %{
17627   predicate(n->as_Vector()->length() == 4);
17628   match(Set dst (SubVI dst (MulVI src1 src2)));
17629   ins_cost(INSN_COST);
17630   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
17631   ins_encode %{
17632     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
17633             as_FloatRegister($src1$$reg),
17634             as_FloatRegister($src2$$reg));
17635   %}
17636   ins_pipe(vmla128);
17637 %}
17638 
17639 // dst - src1 * src2
17640 instruct vmls2F(vecD dst, vecD src1, vecD src2) %{
17641   predicate(UseFMA && n->as_Vector()->length() == 2);
17642   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
17643   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
17644   format %{ "fmls  $dst,$src1,$src2\t# vector (2S)" %}
17645   ins_cost(INSN_COST);
17646   ins_encode %{
17647     __ fmls(as_FloatRegister($dst$$reg), __ T2S,
17648             as_FloatRegister($src1$$reg),
17649             as_FloatRegister($src2$$reg));
17650   %}
17651   ins_pipe(vmuldiv_fp64);
17652 %}
17653 
17654 // dst - src1 * src2
17655 instruct vmls4F(vecX dst, vecX src1, vecX src2) %{
17656   predicate(UseFMA && n->as_Vector()->length() == 4);
17657   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
17658   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
17659   format %{ "fmls  $dst,$src1,$src2\t# vector (4S)" %}
17660   ins_cost(INSN_COST);
17661   ins_encode %{
17662     __ fmls(as_FloatRegister($dst$$reg), __ T4S,
17663             as_FloatRegister($src1$$reg),
17664             as_FloatRegister($src2$$reg));
17665   %}
17666   ins_pipe(vmuldiv_fp128);
17667 %}
17668 
17669 // dst - src1 * src2
17670 instruct vmls2D(vecX dst, vecX src1, vecX src2) %{
17671   predicate(UseFMA && n->as_Vector()->length() == 2);
17672   match(Set dst (FmaVD  dst (Binary (NegVD src1) src2)));
17673   match(Set dst (FmaVD  dst (Binary src1 (NegVD src2))));
17674   format %{ "fmls  $dst,$src1,$src2\t# vector (2D)" %}
17675   ins_cost(INSN_COST);
17676   ins_encode %{
17677     __ fmls(as_FloatRegister($dst$$reg), __ T2D,
17678             as_FloatRegister($src1$$reg),
17679             as_FloatRegister($src2$$reg));
17680   %}
17681   ins_pipe(vmuldiv_fp128);
17682 %}
17683 
17684 // --------------- Vector Multiply-Add Shorts into Integer --------------------
17685 
17686 instruct vmuladdS2I(vecX dst, vecX src1, vecX src2, vecX tmp) %{
17687   predicate(n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
17688   match(Set dst (MulAddVS2VI src1 src2));
17689   ins_cost(INSN_COST);
17690   effect(TEMP_DEF dst, TEMP tmp);
17691   format %{ "smullv  $tmp, $src1, $src2\t# vector (4H)\n\t"
17692             "smullv  $dst, $src1, $src2\t# vector (8H)\n\t"
17693             "addpv   $dst, $tmp, $dst\t# vector (4S)\n\t" %}
17694   ins_encode %{
17695     __ smullv(as_FloatRegister($tmp$$reg), __ T4H,
17696               as_FloatRegister($src1$$reg),
17697               as_FloatRegister($src2$$reg));
17698     __ smullv(as_FloatRegister($dst$$reg), __ T8H,
17699               as_FloatRegister($src1$$reg),
17700               as_FloatRegister($src2$$reg));
17701     __ addpv(as_FloatRegister($dst$$reg), __ T4S,
17702              as_FloatRegister($tmp$$reg),
17703              as_FloatRegister($dst$$reg));
17704   %}
17705   ins_pipe(vmuldiv_fp128);
17706 %}
17707 
17708 // --------------------------------- DIV --------------------------------------
17709 
17710 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
17711 %{
17712   predicate(n->as_Vector()->length() == 2);
17713   match(Set dst (DivVF src1 src2));
17714   ins_cost(INSN_COST);
17715   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
17716   ins_encode %{
17717     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
17718             as_FloatRegister($src1$$reg),
17719             as_FloatRegister($src2$$reg));
17720   %}
17721   ins_pipe(vmuldiv_fp64);
17722 %}
17723 
17724 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
17725 %{
17726   predicate(n->as_Vector()->length() == 4);
17727   match(Set dst (DivVF src1 src2));
17728   ins_cost(INSN_COST);
17729   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
17730   ins_encode %{
17731     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
17732             as_FloatRegister($src1$$reg),
17733             as_FloatRegister($src2$$reg));
17734   %}
17735   ins_pipe(vmuldiv_fp128);
17736 %}
17737 
17738 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
17739 %{
17740   predicate(n->as_Vector()->length() == 2);
17741   match(Set dst (DivVD src1 src2));
17742   ins_cost(INSN_COST);
17743   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
17744   ins_encode %{
17745     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
17746             as_FloatRegister($src1$$reg),
17747             as_FloatRegister($src2$$reg));
17748   %}
17749   ins_pipe(vmuldiv_fp128);
17750 %}
17751 
17752 // --------------------------------- SQRT -------------------------------------
17753 
17754 instruct vsqrt2F(vecD dst, vecD src)
17755 %{
17756   predicate(n->as_Vector()->length() == 2);
17757   match(Set dst (SqrtVF src));
17758   format %{ "fsqrt  $dst, $src\t# vector (2F)" %}
17759   ins_encode %{
17760     __ fsqrt(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($src$$reg));
17761   %}
17762   ins_pipe(vunop_fp64);
17763 %}
17764 
17765 instruct vsqrt4F(vecX dst, vecX src)
17766 %{
17767   predicate(n->as_Vector()->length() == 4);
17768   match(Set dst (SqrtVF src));
17769   format %{ "fsqrt  $dst, $src\t# vector (4F)" %}
17770   ins_encode %{
17771     __ fsqrt(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src$$reg));
17772   %}
17773   ins_pipe(vsqrt_fp128);
17774 %}
17775 
17776 instruct vsqrt2D(vecX dst, vecX src)
17777 %{
17778   predicate(n->as_Vector()->length() == 2);
17779   match(Set dst (SqrtVD src));
17780   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
17781   ins_encode %{
17782     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
17783              as_FloatRegister($src$$reg));
17784   %}
17785   ins_pipe(vsqrt_fp128);
17786 %}
17787 
17788 // --------------------------------- ABS --------------------------------------
17789 
17790 instruct vabs8B(vecD dst, vecD src)
17791 %{
17792   predicate(n->as_Vector()->length() == 4 ||
17793             n->as_Vector()->length() == 8);
17794   match(Set dst (AbsVB src));
17795   ins_cost(INSN_COST);
17796   format %{ "abs  $dst, $src\t# vector (8B)" %}
17797   ins_encode %{
17798     __ absr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($src$$reg));
17799   %}
17800   ins_pipe(vlogical64);
17801 %}
17802 
17803 instruct vabs16B(vecX dst, vecX src)
17804 %{
17805   predicate(n->as_Vector()->length() == 16);
17806   match(Set dst (AbsVB src));
17807   ins_cost(INSN_COST);
17808   format %{ "abs  $dst, $src\t# vector (16B)" %}
17809   ins_encode %{
17810     __ absr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($src$$reg));
17811   %}
17812   ins_pipe(vlogical128);
17813 %}
17814 
17815 instruct vabs4S(vecD dst, vecD src)
17816 %{
17817   predicate(n->as_Vector()->length() == 4);
17818   match(Set dst (AbsVS src));
17819   ins_cost(INSN_COST);
17820   format %{ "abs  $dst, $src\t# vector (4H)" %}
17821   ins_encode %{
17822     __ absr(as_FloatRegister($dst$$reg), __ T4H, as_FloatRegister($src$$reg));
17823   %}
17824   ins_pipe(vlogical64);
17825 %}
17826 
17827 instruct vabs8S(vecX dst, vecX src)
17828 %{
17829   predicate(n->as_Vector()->length() == 8);
17830   match(Set dst (AbsVS src));
17831   ins_cost(INSN_COST);
17832   format %{ "abs  $dst, $src\t# vector (8H)" %}
17833   ins_encode %{
17834     __ absr(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg));
17835   %}
17836   ins_pipe(vlogical128);
17837 %}
17838 
17839 instruct vabs2I(vecD dst, vecD src)
17840 %{
17841   predicate(n->as_Vector()->length() == 2);
17842   match(Set dst (AbsVI src));
17843   ins_cost(INSN_COST);
17844   format %{ "abs  $dst, $src\t# vector (2S)" %}
17845   ins_encode %{
17846     __ absr(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($src$$reg));
17847   %}
17848   ins_pipe(vlogical64);
17849 %}
17850 
17851 instruct vabs4I(vecX dst, vecX src)
17852 %{
17853   predicate(n->as_Vector()->length() == 4);
17854   match(Set dst (AbsVI src));
17855   ins_cost(INSN_COST);
17856   format %{ "abs  $dst, $src\t# vector (4S)" %}
17857   ins_encode %{
17858     __ absr(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src$$reg));
17859   %}
17860   ins_pipe(vlogical128);
17861 %}
17862 
17863 instruct vabs2L(vecX dst, vecX src)
17864 %{
17865   predicate(n->as_Vector()->length() == 2);
17866   match(Set dst (AbsVL src));
17867   ins_cost(INSN_COST);
17868   format %{ "abs  $dst, $src\t# vector (2D)" %}
17869   ins_encode %{
17870     __ absr(as_FloatRegister($dst$$reg), __ T2D, as_FloatRegister($src$$reg));
17871   %}
17872   ins_pipe(vlogical128);
17873 %}
17874 
17875 instruct vabs2F(vecD dst, vecD src)
17876 %{
17877   predicate(n->as_Vector()->length() == 2);
17878   match(Set dst (AbsVF src));
17879   ins_cost(INSN_COST * 3);
17880   format %{ "fabs  $dst,$src\t# vector (2S)" %}
17881   ins_encode %{
17882     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
17883             as_FloatRegister($src$$reg));
17884   %}
17885   ins_pipe(vunop_fp64);
17886 %}
17887 
17888 instruct vabs4F(vecX dst, vecX src)
17889 %{
17890   predicate(n->as_Vector()->length() == 4);
17891   match(Set dst (AbsVF src));
17892   ins_cost(INSN_COST * 3);
17893   format %{ "fabs  $dst,$src\t# vector (4S)" %}
17894   ins_encode %{
17895     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
17896             as_FloatRegister($src$$reg));
17897   %}
17898   ins_pipe(vunop_fp128);
17899 %}
17900 
17901 instruct vabs2D(vecX dst, vecX src)
17902 %{
17903   predicate(n->as_Vector()->length() == 2);
17904   match(Set dst (AbsVD src));
17905   ins_cost(INSN_COST * 3);
17906   format %{ "fabs  $dst,$src\t# vector (2D)" %}
17907   ins_encode %{
17908     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
17909             as_FloatRegister($src$$reg));
17910   %}
17911   ins_pipe(vunop_fp128);
17912 %}
17913 
17914 // --------------------------------- NEG --------------------------------------
17915 
17916 instruct vneg2F(vecD dst, vecD src)
17917 %{
17918   predicate(n->as_Vector()->length() == 2);
17919   match(Set dst (NegVF src));
17920   ins_cost(INSN_COST * 3);
17921   format %{ "fneg  $dst,$src\t# vector (2S)" %}
17922   ins_encode %{
17923     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
17924             as_FloatRegister($src$$reg));
17925   %}
17926   ins_pipe(vunop_fp64);
17927 %}
17928 
17929 instruct vneg4F(vecX dst, vecX src)
17930 %{
17931   predicate(n->as_Vector()->length() == 4);
17932   match(Set dst (NegVF src));
17933   ins_cost(INSN_COST * 3);
17934   format %{ "fneg  $dst,$src\t# vector (4S)" %}
17935   ins_encode %{
17936     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
17937             as_FloatRegister($src$$reg));
17938   %}
17939   ins_pipe(vunop_fp128);
17940 %}
17941 
17942 instruct vneg2D(vecX dst, vecX src)
17943 %{
17944   predicate(n->as_Vector()->length() == 2);
17945   match(Set dst (NegVD src));
17946   ins_cost(INSN_COST * 3);
17947   format %{ "fneg  $dst,$src\t# vector (2D)" %}
17948   ins_encode %{
17949     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
17950             as_FloatRegister($src$$reg));
17951   %}
17952   ins_pipe(vunop_fp128);
17953 %}
17954 
17955 // --------------------------------- AND --------------------------------------
17956 
17957 instruct vand8B(vecD dst, vecD src1, vecD src2)
17958 %{
17959   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17960             n->as_Vector()->length_in_bytes() == 8);
17961   match(Set dst (AndV src1 src2));
17962   ins_cost(INSN_COST);
17963   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
17964   ins_encode %{
17965     __ andr(as_FloatRegister($dst$$reg), __ T8B,
17966             as_FloatRegister($src1$$reg),
17967             as_FloatRegister($src2$$reg));
17968   %}
17969   ins_pipe(vlogical64);
17970 %}
17971 
17972 instruct vand16B(vecX dst, vecX src1, vecX src2)
17973 %{
17974   predicate(n->as_Vector()->length_in_bytes() == 16);
17975   match(Set dst (AndV src1 src2));
17976   ins_cost(INSN_COST);
17977   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
17978   ins_encode %{
17979     __ andr(as_FloatRegister($dst$$reg), __ T16B,
17980             as_FloatRegister($src1$$reg),
17981             as_FloatRegister($src2$$reg));
17982   %}
17983   ins_pipe(vlogical128);
17984 %}
17985 
17986 // --------------------------------- OR ---------------------------------------
17987 
17988 instruct vor8B(vecD dst, vecD src1, vecD src2)
17989 %{
17990   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17991             n->as_Vector()->length_in_bytes() == 8);
17992   match(Set dst (OrV src1 src2));
17993   ins_cost(INSN_COST);
17994   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
17995   ins_encode %{
17996     __ orr(as_FloatRegister($dst$$reg), __ T8B,
17997             as_FloatRegister($src1$$reg),
17998             as_FloatRegister($src2$$reg));
17999   %}
18000   ins_pipe(vlogical64);
18001 %}
18002 
18003 instruct vor16B(vecX dst, vecX src1, vecX src2)
18004 %{
18005   predicate(n->as_Vector()->length_in_bytes() == 16);
18006   match(Set dst (OrV src1 src2));
18007   ins_cost(INSN_COST);
18008   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
18009   ins_encode %{
18010     __ orr(as_FloatRegister($dst$$reg), __ T16B,
18011             as_FloatRegister($src1$$reg),
18012             as_FloatRegister($src2$$reg));
18013   %}
18014   ins_pipe(vlogical128);
18015 %}
18016 
18017 // --------------------------------- XOR --------------------------------------
18018 
18019 instruct vxor8B(vecD dst, vecD src1, vecD src2)
18020 %{
18021   predicate(n->as_Vector()->length_in_bytes() == 4 ||
18022             n->as_Vector()->length_in_bytes() == 8);
18023   match(Set dst (XorV src1 src2));
18024   ins_cost(INSN_COST);
18025   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
18026   ins_encode %{
18027     __ eor(as_FloatRegister($dst$$reg), __ T8B,
18028             as_FloatRegister($src1$$reg),
18029             as_FloatRegister($src2$$reg));
18030   %}
18031   ins_pipe(vlogical64);
18032 %}
18033 
18034 instruct vxor16B(vecX dst, vecX src1, vecX src2)
18035 %{
18036   predicate(n->as_Vector()->length_in_bytes() == 16);
18037   match(Set dst (XorV src1 src2));
18038   ins_cost(INSN_COST);
18039   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
18040   ins_encode %{
18041     __ eor(as_FloatRegister($dst$$reg), __ T16B,
18042             as_FloatRegister($src1$$reg),
18043             as_FloatRegister($src2$$reg));
18044   %}
18045   ins_pipe(vlogical128);
18046 %}
18047 
18048 // ------------------------------ Shift ---------------------------------------
18049 instruct vshiftcnt8B(vecD dst, iRegIorL2I cnt) %{
18050   predicate(n->as_Vector()->length_in_bytes() == 8);
18051   match(Set dst (LShiftCntV cnt));
18052   match(Set dst (RShiftCntV cnt));
18053   format %{ "dup  $dst, $cnt\t# shift count vector (8B)" %}
18054   ins_encode %{
18055     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($cnt$$reg));
18056   %}
18057   ins_pipe(vdup_reg_reg64);
18058 %}
18059 
18060 instruct vshiftcnt16B(vecX dst, iRegIorL2I cnt) %{
18061   predicate(n->as_Vector()->length_in_bytes() == 16);
18062   match(Set dst (LShiftCntV cnt));
18063   match(Set dst (RShiftCntV cnt));
18064   format %{ "dup  $dst, $cnt\t# shift count vector (16B)" %}
18065   ins_encode %{
18066     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
18067   %}
18068   ins_pipe(vdup_reg_reg128);
18069 %}
18070 
18071 instruct vsll8B(vecD dst, vecD src, vecD shift) %{
18072   predicate(n->as_Vector()->length() == 4 ||
18073             n->as_Vector()->length() == 8);
18074   match(Set dst (LShiftVB src shift));
18075   ins_cost(INSN_COST);
18076   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
18077   ins_encode %{
18078     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
18079             as_FloatRegister($src$$reg),
18080             as_FloatRegister($shift$$reg));
18081   %}
18082   ins_pipe(vshift64);
18083 %}
18084 
18085 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
18086   predicate(n->as_Vector()->length() == 16);
18087   match(Set dst (LShiftVB src shift));
18088   ins_cost(INSN_COST);
18089   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
18090   ins_encode %{
18091     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
18092             as_FloatRegister($src$$reg),
18093             as_FloatRegister($shift$$reg));
18094   %}
18095   ins_pipe(vshift128);
18096 %}
18097 
18098 // Right shifts with vector shift count on aarch64 SIMD are implemented
18099 // as left shift by negative shift count.
18100 // There are two cases for vector shift count.
18101 //
18102 // Case 1: The vector shift count is from replication.
18103 //        |            |
18104 //    LoadVector  RShiftCntV
18105 //        |       /
18106 //     RShiftVI
18107 // Note: In inner loop, multiple neg instructions are used, which can be
18108 // moved to outer loop and merge into one neg instruction.
18109 //
18110 // Case 2: The vector shift count is from loading.
18111 // This case isn't supported by middle-end now. But it's supported by
18112 // panama/vectorIntrinsics(JEP 338: Vector API).
18113 //        |            |
18114 //    LoadVector  LoadVector
18115 //        |       /
18116 //     RShiftVI
18117 //
18118 
18119 instruct vsra8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
18120   predicate(n->as_Vector()->length() == 4 ||
18121             n->as_Vector()->length() == 8);
18122   match(Set dst (RShiftVB src shift));
18123   ins_cost(INSN_COST);
18124   effect(TEMP tmp);
18125   format %{ "negr  $tmp,$shift\t"
18126             "sshl  $dst,$src,$tmp\t# vector (8B)" %}
18127   ins_encode %{
18128     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
18129             as_FloatRegister($shift$$reg));
18130     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
18131             as_FloatRegister($src$$reg),
18132             as_FloatRegister($tmp$$reg));
18133   %}
18134   ins_pipe(vshift64);
18135 %}
18136 
18137 instruct vsra16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
18138   predicate(n->as_Vector()->length() == 16);
18139   match(Set dst (RShiftVB src shift));
18140   ins_cost(INSN_COST);
18141   effect(TEMP tmp);
18142   format %{ "negr  $tmp,$shift\t"
18143             "sshl  $dst,$src,$tmp\t# vector (16B)" %}
18144   ins_encode %{
18145     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
18146             as_FloatRegister($shift$$reg));
18147     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
18148             as_FloatRegister($src$$reg),
18149             as_FloatRegister($tmp$$reg));
18150   %}
18151   ins_pipe(vshift128);
18152 %}
18153 
18154 instruct vsrl8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
18155   predicate(n->as_Vector()->length() == 4 ||
18156             n->as_Vector()->length() == 8);
18157   match(Set dst (URShiftVB src shift));
18158   ins_cost(INSN_COST);
18159   effect(TEMP tmp);
18160   format %{ "negr  $tmp,$shift\t"
18161             "ushl  $dst,$src,$tmp\t# vector (8B)" %}
18162   ins_encode %{
18163     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
18164             as_FloatRegister($shift$$reg));
18165     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
18166             as_FloatRegister($src$$reg),
18167             as_FloatRegister($tmp$$reg));
18168   %}
18169   ins_pipe(vshift64);
18170 %}
18171 
18172 instruct vsrl16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
18173   predicate(n->as_Vector()->length() == 16);
18174   match(Set dst (URShiftVB src shift));
18175   ins_cost(INSN_COST);
18176   effect(TEMP tmp);
18177   format %{ "negr  $tmp,$shift\t"
18178             "ushl  $dst,$src,$tmp\t# vector (16B)" %}
18179   ins_encode %{
18180     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
18181             as_FloatRegister($shift$$reg));
18182     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
18183             as_FloatRegister($src$$reg),
18184             as_FloatRegister($tmp$$reg));
18185   %}
18186   ins_pipe(vshift128);
18187 %}
18188 
18189 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
18190   predicate(n->as_Vector()->length() == 4 ||
18191             n->as_Vector()->length() == 8);
18192   match(Set dst (LShiftVB src (LShiftCntV shift)));
18193   ins_cost(INSN_COST);
18194   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
18195   ins_encode %{
18196     int sh = (int)$shift$$constant;
18197     if (sh >= 8) {
18198       __ eor(as_FloatRegister($dst$$reg), __ T8B,
18199              as_FloatRegister($src$$reg),
18200              as_FloatRegister($src$$reg));
18201     } else {
18202       __ shl(as_FloatRegister($dst$$reg), __ T8B,
18203              as_FloatRegister($src$$reg), sh);
18204     }
18205   %}
18206   ins_pipe(vshift64_imm);
18207 %}
18208 
18209 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
18210   predicate(n->as_Vector()->length() == 16);
18211   match(Set dst (LShiftVB src (LShiftCntV shift)));
18212   ins_cost(INSN_COST);
18213   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
18214   ins_encode %{
18215     int sh = (int)$shift$$constant;
18216     if (sh >= 8) {
18217       __ eor(as_FloatRegister($dst$$reg), __ T16B,
18218              as_FloatRegister($src$$reg),
18219              as_FloatRegister($src$$reg));
18220     } else {
18221       __ shl(as_FloatRegister($dst$$reg), __ T16B,
18222              as_FloatRegister($src$$reg), sh);
18223     }
18224   %}
18225   ins_pipe(vshift128_imm);
18226 %}
18227 
18228 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
18229   predicate(n->as_Vector()->length() == 4 ||
18230             n->as_Vector()->length() == 8);
18231   match(Set dst (RShiftVB src (RShiftCntV shift)));
18232   ins_cost(INSN_COST);
18233   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
18234   ins_encode %{
18235     int sh = (int)$shift$$constant;
18236     if (sh >= 8) sh = 7;
18237     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
18238            as_FloatRegister($src$$reg), sh);
18239   %}
18240   ins_pipe(vshift64_imm);
18241 %}
18242 
18243 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
18244   predicate(n->as_Vector()->length() == 16);
18245   match(Set dst (RShiftVB src (RShiftCntV shift)));
18246   ins_cost(INSN_COST);
18247   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
18248   ins_encode %{
18249     int sh = (int)$shift$$constant;
18250     if (sh >= 8) sh = 7;
18251     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
18252            as_FloatRegister($src$$reg), sh);
18253   %}
18254   ins_pipe(vshift128_imm);
18255 %}
18256 
18257 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
18258   predicate(n->as_Vector()->length() == 4 ||
18259             n->as_Vector()->length() == 8);
18260   match(Set dst (URShiftVB src (RShiftCntV shift)));
18261   ins_cost(INSN_COST);
18262   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
18263   ins_encode %{
18264     int sh = (int)$shift$$constant;
18265     if (sh >= 8) {
18266       __ eor(as_FloatRegister($dst$$reg), __ T8B,
18267              as_FloatRegister($src$$reg),
18268              as_FloatRegister($src$$reg));
18269     } else {
18270       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
18271              as_FloatRegister($src$$reg), sh);
18272     }
18273   %}
18274   ins_pipe(vshift64_imm);
18275 %}
18276 
18277 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
18278   predicate(n->as_Vector()->length() == 16);
18279   match(Set dst (URShiftVB src (RShiftCntV shift)));
18280   ins_cost(INSN_COST);
18281   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
18282   ins_encode %{
18283     int sh = (int)$shift$$constant;
18284     if (sh >= 8) {
18285       __ eor(as_FloatRegister($dst$$reg), __ T16B,
18286              as_FloatRegister($src$$reg),
18287              as_FloatRegister($src$$reg));
18288     } else {
18289       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
18290              as_FloatRegister($src$$reg), sh);
18291     }
18292   %}
18293   ins_pipe(vshift128_imm);
18294 %}
18295 
18296 instruct vsll4S(vecD dst, vecD src, vecD shift) %{
18297   predicate(n->as_Vector()->length() == 2 ||
18298             n->as_Vector()->length() == 4);
18299   match(Set dst (LShiftVS src shift));
18300   ins_cost(INSN_COST);
18301   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
18302   ins_encode %{
18303     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
18304             as_FloatRegister($src$$reg),
18305             as_FloatRegister($shift$$reg));
18306   %}
18307   ins_pipe(vshift64);
18308 %}
18309 
18310 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
18311   predicate(n->as_Vector()->length() == 8);
18312   match(Set dst (LShiftVS src shift));
18313   ins_cost(INSN_COST);
18314   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
18315   ins_encode %{
18316     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
18317             as_FloatRegister($src$$reg),
18318             as_FloatRegister($shift$$reg));
18319   %}
18320   ins_pipe(vshift128);
18321 %}
18322 
18323 instruct vsra4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
18324   predicate(n->as_Vector()->length() == 2 ||
18325             n->as_Vector()->length() == 4);
18326   match(Set dst (RShiftVS src shift));
18327   ins_cost(INSN_COST);
18328   effect(TEMP tmp);
18329   format %{ "negr  $tmp,$shift\t"
18330             "sshl  $dst,$src,$tmp\t# vector (4H)" %}
18331   ins_encode %{
18332     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
18333             as_FloatRegister($shift$$reg));
18334     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
18335             as_FloatRegister($src$$reg),
18336             as_FloatRegister($tmp$$reg));
18337   %}
18338   ins_pipe(vshift64);
18339 %}
18340 
18341 instruct vsra8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
18342   predicate(n->as_Vector()->length() == 8);
18343   match(Set dst (RShiftVS src shift));
18344   ins_cost(INSN_COST);
18345   effect(TEMP tmp);
18346   format %{ "negr  $tmp,$shift\t"
18347             "sshl  $dst,$src,$tmp\t# vector (8H)" %}
18348   ins_encode %{
18349     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
18350             as_FloatRegister($shift$$reg));
18351     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
18352             as_FloatRegister($src$$reg),
18353             as_FloatRegister($tmp$$reg));
18354   %}
18355   ins_pipe(vshift128);
18356 %}
18357 
18358 instruct vsrl4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
18359   predicate(n->as_Vector()->length() == 2 ||
18360             n->as_Vector()->length() == 4);
18361   match(Set dst (URShiftVS src shift));
18362   ins_cost(INSN_COST);
18363   effect(TEMP tmp);
18364   format %{ "negr  $tmp,$shift\t"
18365             "ushl  $dst,$src,$tmp\t# vector (4H)" %}
18366   ins_encode %{
18367     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
18368             as_FloatRegister($shift$$reg));
18369     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
18370             as_FloatRegister($src$$reg),
18371             as_FloatRegister($tmp$$reg));
18372   %}
18373   ins_pipe(vshift64);
18374 %}
18375 
18376 instruct vsrl8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
18377   predicate(n->as_Vector()->length() == 8);
18378   match(Set dst (URShiftVS src shift));
18379   ins_cost(INSN_COST);
18380   effect(TEMP tmp);
18381   format %{ "negr  $tmp,$shift\t"
18382             "ushl  $dst,$src,$tmp\t# vector (8H)" %}
18383   ins_encode %{
18384     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
18385             as_FloatRegister($shift$$reg));
18386     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
18387             as_FloatRegister($src$$reg),
18388             as_FloatRegister($tmp$$reg));
18389   %}
18390   ins_pipe(vshift128);
18391 %}
18392 
18393 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
18394   predicate(n->as_Vector()->length() == 2 ||
18395             n->as_Vector()->length() == 4);
18396   match(Set dst (LShiftVS src (LShiftCntV shift)));
18397   ins_cost(INSN_COST);
18398   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
18399   ins_encode %{
18400     int sh = (int)$shift$$constant;
18401     if (sh >= 16) {
18402       __ eor(as_FloatRegister($dst$$reg), __ T8B,
18403              as_FloatRegister($src$$reg),
18404              as_FloatRegister($src$$reg));
18405     } else {
18406       __ shl(as_FloatRegister($dst$$reg), __ T4H,
18407              as_FloatRegister($src$$reg), sh);
18408     }
18409   %}
18410   ins_pipe(vshift64_imm);
18411 %}
18412 
18413 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
18414   predicate(n->as_Vector()->length() == 8);
18415   match(Set dst (LShiftVS src (LShiftCntV shift)));
18416   ins_cost(INSN_COST);
18417   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
18418   ins_encode %{
18419     int sh = (int)$shift$$constant;
18420     if (sh >= 16) {
18421       __ eor(as_FloatRegister($dst$$reg), __ T16B,
18422              as_FloatRegister($src$$reg),
18423              as_FloatRegister($src$$reg));
18424     } else {
18425       __ shl(as_FloatRegister($dst$$reg), __ T8H,
18426              as_FloatRegister($src$$reg), sh);
18427     }
18428   %}
18429   ins_pipe(vshift128_imm);
18430 %}
18431 
18432 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
18433   predicate(n->as_Vector()->length() == 2 ||
18434             n->as_Vector()->length() == 4);
18435   match(Set dst (RShiftVS src (RShiftCntV shift)));
18436   ins_cost(INSN_COST);
18437   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
18438   ins_encode %{
18439     int sh = (int)$shift$$constant;
18440     if (sh >= 16) sh = 15;
18441     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
18442            as_FloatRegister($src$$reg), sh);
18443   %}
18444   ins_pipe(vshift64_imm);
18445 %}
18446 
18447 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
18448   predicate(n->as_Vector()->length() == 8);
18449   match(Set dst (RShiftVS src (RShiftCntV shift)));
18450   ins_cost(INSN_COST);
18451   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
18452   ins_encode %{
18453     int sh = (int)$shift$$constant;
18454     if (sh >= 16) sh = 15;
18455     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
18456            as_FloatRegister($src$$reg), sh);
18457   %}
18458   ins_pipe(vshift128_imm);
18459 %}
18460 
18461 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
18462   predicate(n->as_Vector()->length() == 2 ||
18463             n->as_Vector()->length() == 4);
18464   match(Set dst (URShiftVS src (RShiftCntV shift)));
18465   ins_cost(INSN_COST);
18466   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
18467   ins_encode %{
18468     int sh = (int)$shift$$constant;
18469     if (sh >= 16) {
18470       __ eor(as_FloatRegister($dst$$reg), __ T8B,
18471              as_FloatRegister($src$$reg),
18472              as_FloatRegister($src$$reg));
18473     } else {
18474       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
18475              as_FloatRegister($src$$reg), sh);
18476     }
18477   %}
18478   ins_pipe(vshift64_imm);
18479 %}
18480 
18481 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
18482   predicate(n->as_Vector()->length() == 8);
18483   match(Set dst (URShiftVS src (RShiftCntV shift)));
18484   ins_cost(INSN_COST);
18485   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
18486   ins_encode %{
18487     int sh = (int)$shift$$constant;
18488     if (sh >= 16) {
18489       __ eor(as_FloatRegister($dst$$reg), __ T16B,
18490              as_FloatRegister($src$$reg),
18491              as_FloatRegister($src$$reg));
18492     } else {
18493       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
18494              as_FloatRegister($src$$reg), sh);
18495     }
18496   %}
18497   ins_pipe(vshift128_imm);
18498 %}
18499 
18500 instruct vsll2I(vecD dst, vecD src, vecD shift) %{
18501   predicate(n->as_Vector()->length() == 2);
18502   match(Set dst (LShiftVI src shift));
18503   ins_cost(INSN_COST);
18504   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
18505   ins_encode %{
18506     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
18507             as_FloatRegister($src$$reg),
18508             as_FloatRegister($shift$$reg));
18509   %}
18510   ins_pipe(vshift64);
18511 %}
18512 
18513 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
18514   predicate(n->as_Vector()->length() == 4);
18515   match(Set dst (LShiftVI src shift));
18516   ins_cost(INSN_COST);
18517   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
18518   ins_encode %{
18519     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
18520             as_FloatRegister($src$$reg),
18521             as_FloatRegister($shift$$reg));
18522   %}
18523   ins_pipe(vshift128);
18524 %}
18525 
18526 instruct vsra2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
18527   predicate(n->as_Vector()->length() == 2);
18528   match(Set dst (RShiftVI src shift));
18529   ins_cost(INSN_COST);
18530   effect(TEMP tmp);
18531   format %{ "negr  $tmp,$shift\t"
18532             "sshl  $dst,$src,$tmp\t# vector (2S)" %}
18533   ins_encode %{
18534     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
18535             as_FloatRegister($shift$$reg));
18536     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
18537             as_FloatRegister($src$$reg),
18538             as_FloatRegister($tmp$$reg));
18539   %}
18540   ins_pipe(vshift64);
18541 %}
18542 
18543 instruct vsra4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
18544   predicate(n->as_Vector()->length() == 4);
18545   match(Set dst (RShiftVI src shift));
18546   ins_cost(INSN_COST);
18547   effect(TEMP tmp);
18548   format %{ "negr  $tmp,$shift\t"
18549             "sshl  $dst,$src,$tmp\t# vector (4S)" %}
18550   ins_encode %{
18551     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
18552             as_FloatRegister($shift$$reg));
18553     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
18554             as_FloatRegister($src$$reg),
18555             as_FloatRegister($tmp$$reg));
18556   %}
18557   ins_pipe(vshift128);
18558 %}
18559 
18560 instruct vsrl2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
18561   predicate(n->as_Vector()->length() == 2);
18562   match(Set dst (URShiftVI src shift));
18563   ins_cost(INSN_COST);
18564   effect(TEMP tmp);
18565   format %{ "negr  $tmp,$shift\t"
18566             "ushl  $dst,$src,$tmp\t# vector (2S)" %}
18567   ins_encode %{
18568     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
18569             as_FloatRegister($shift$$reg));
18570     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
18571             as_FloatRegister($src$$reg),
18572             as_FloatRegister($tmp$$reg));
18573   %}
18574   ins_pipe(vshift64);
18575 %}
18576 
18577 instruct vsrl4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
18578   predicate(n->as_Vector()->length() == 4);
18579   match(Set dst (URShiftVI src shift));
18580   ins_cost(INSN_COST);
18581   effect(TEMP tmp);
18582   format %{ "negr  $tmp,$shift\t"
18583             "ushl  $dst,$src,$tmp\t# vector (4S)" %}
18584   ins_encode %{
18585     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
18586             as_FloatRegister($shift$$reg));
18587     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
18588             as_FloatRegister($src$$reg),
18589             as_FloatRegister($tmp$$reg));
18590   %}
18591   ins_pipe(vshift128);
18592 %}
18593 
18594 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
18595   predicate(n->as_Vector()->length() == 2);
18596   match(Set dst (LShiftVI src (LShiftCntV shift)));
18597   ins_cost(INSN_COST);
18598   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
18599   ins_encode %{
18600     __ shl(as_FloatRegister($dst$$reg), __ T2S,
18601            as_FloatRegister($src$$reg),
18602            (int)$shift$$constant);
18603   %}
18604   ins_pipe(vshift64_imm);
18605 %}
18606 
18607 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
18608   predicate(n->as_Vector()->length() == 4);
18609   match(Set dst (LShiftVI src (LShiftCntV shift)));
18610   ins_cost(INSN_COST);
18611   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
18612   ins_encode %{
18613     __ shl(as_FloatRegister($dst$$reg), __ T4S,
18614            as_FloatRegister($src$$reg),
18615            (int)$shift$$constant);
18616   %}
18617   ins_pipe(vshift128_imm);
18618 %}
18619 
18620 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
18621   predicate(n->as_Vector()->length() == 2);
18622   match(Set dst (RShiftVI src (RShiftCntV shift)));
18623   ins_cost(INSN_COST);
18624   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
18625   ins_encode %{
18626     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
18627             as_FloatRegister($src$$reg),
18628             (int)$shift$$constant);
18629   %}
18630   ins_pipe(vshift64_imm);
18631 %}
18632 
18633 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
18634   predicate(n->as_Vector()->length() == 4);
18635   match(Set dst (RShiftVI src (RShiftCntV shift)));
18636   ins_cost(INSN_COST);
18637   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
18638   ins_encode %{
18639     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
18640             as_FloatRegister($src$$reg),
18641             (int)$shift$$constant);
18642   %}
18643   ins_pipe(vshift128_imm);
18644 %}
18645 
18646 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
18647   predicate(n->as_Vector()->length() == 2);
18648   match(Set dst (URShiftVI src (RShiftCntV shift)));
18649   ins_cost(INSN_COST);
18650   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
18651   ins_encode %{
18652     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
18653             as_FloatRegister($src$$reg),
18654             (int)$shift$$constant);
18655   %}
18656   ins_pipe(vshift64_imm);
18657 %}
18658 
18659 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
18660   predicate(n->as_Vector()->length() == 4);
18661   match(Set dst (URShiftVI src (RShiftCntV shift)));
18662   ins_cost(INSN_COST);
18663   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
18664   ins_encode %{
18665     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
18666             as_FloatRegister($src$$reg),
18667             (int)$shift$$constant);
18668   %}
18669   ins_pipe(vshift128_imm);
18670 %}
18671 
18672 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
18673   predicate(n->as_Vector()->length() == 2);
18674   match(Set dst (LShiftVL src shift));
18675   ins_cost(INSN_COST);
18676   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
18677   ins_encode %{
18678     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
18679             as_FloatRegister($src$$reg),
18680             as_FloatRegister($shift$$reg));
18681   %}
18682   ins_pipe(vshift128);
18683 %}
18684 
18685 instruct vsra2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
18686   predicate(n->as_Vector()->length() == 2);
18687   match(Set dst (RShiftVL src shift));
18688   ins_cost(INSN_COST);
18689   effect(TEMP tmp);
18690   format %{ "negr  $tmp,$shift\t"
18691             "sshl  $dst,$src,$tmp\t# vector (2D)" %}
18692   ins_encode %{
18693     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
18694             as_FloatRegister($shift$$reg));
18695     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
18696             as_FloatRegister($src$$reg),
18697             as_FloatRegister($tmp$$reg));
18698   %}
18699   ins_pipe(vshift128);
18700 %}
18701 
18702 instruct vsrl2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
18703   predicate(n->as_Vector()->length() == 2);
18704   match(Set dst (URShiftVL src shift));
18705   ins_cost(INSN_COST);
18706   effect(TEMP tmp);
18707   format %{ "negr  $tmp,$shift\t"
18708             "ushl  $dst,$src,$tmp\t# vector (2D)" %}
18709   ins_encode %{
18710     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
18711             as_FloatRegister($shift$$reg));
18712     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
18713             as_FloatRegister($src$$reg),
18714             as_FloatRegister($tmp$$reg));
18715   %}
18716   ins_pipe(vshift128);
18717 %}
18718 
18719 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
18720   predicate(n->as_Vector()->length() == 2);
18721   match(Set dst (LShiftVL src (LShiftCntV shift)));
18722   ins_cost(INSN_COST);
18723   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
18724   ins_encode %{
18725     __ shl(as_FloatRegister($dst$$reg), __ T2D,
18726            as_FloatRegister($src$$reg),
18727            (int)$shift$$constant);
18728   %}
18729   ins_pipe(vshift128_imm);
18730 %}
18731 
18732 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
18733   predicate(n->as_Vector()->length() == 2);
18734   match(Set dst (RShiftVL src (RShiftCntV shift)));
18735   ins_cost(INSN_COST);
18736   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
18737   ins_encode %{
18738     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
18739             as_FloatRegister($src$$reg),
18740             (int)$shift$$constant);
18741   %}
18742   ins_pipe(vshift128_imm);
18743 %}
18744 
18745 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
18746   predicate(n->as_Vector()->length() == 2);
18747   match(Set dst (URShiftVL src (RShiftCntV shift)));
18748   ins_cost(INSN_COST);
18749   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
18750   ins_encode %{
18751     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
18752             as_FloatRegister($src$$reg),
18753             (int)$shift$$constant);
18754   %}
18755   ins_pipe(vshift128_imm);
18756 %}
18757 
18758 instruct vmax2F(vecD dst, vecD src1, vecD src2)
18759 %{
18760   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18761   match(Set dst (MaxV src1 src2));
18762   ins_cost(INSN_COST);
18763   format %{ "fmax  $dst,$src1,$src2\t# vector (2F)" %}
18764   ins_encode %{
18765     __ fmax(as_FloatRegister($dst$$reg), __ T2S,
18766             as_FloatRegister($src1$$reg),
18767             as_FloatRegister($src2$$reg));
18768   %}
18769   ins_pipe(vdop_fp64);
18770 %}
18771 
18772 instruct vmax4F(vecX dst, vecX src1, vecX src2)
18773 %{
18774   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18775   match(Set dst (MaxV src1 src2));
18776   ins_cost(INSN_COST);
18777   format %{ "fmax  $dst,$src1,$src2\t# vector (4S)" %}
18778   ins_encode %{
18779     __ fmax(as_FloatRegister($dst$$reg), __ T4S,
18780             as_FloatRegister($src1$$reg),
18781             as_FloatRegister($src2$$reg));
18782   %}
18783   ins_pipe(vdop_fp128);
18784 %}
18785 
18786 instruct vmax2D(vecX dst, vecX src1, vecX src2)
18787 %{
18788   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
18789   match(Set dst (MaxV src1 src2));
18790   ins_cost(INSN_COST);
18791   format %{ "fmax  $dst,$src1,$src2\t# vector (2D)" %}
18792   ins_encode %{
18793     __ fmax(as_FloatRegister($dst$$reg), __ T2D,
18794             as_FloatRegister($src1$$reg),
18795             as_FloatRegister($src2$$reg));
18796   %}
18797   ins_pipe(vdop_fp128);
18798 %}
18799 
18800 instruct vmin2F(vecD dst, vecD src1, vecD src2)
18801 %{
18802   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18803   match(Set dst (MinV src1 src2));
18804   ins_cost(INSN_COST);
18805   format %{ "fmin  $dst,$src1,$src2\t# vector (2F)" %}
18806   ins_encode %{
18807     __ fmin(as_FloatRegister($dst$$reg), __ T2S,
18808             as_FloatRegister($src1$$reg),
18809             as_FloatRegister($src2$$reg));
18810   %}
18811   ins_pipe(vdop_fp64);
18812 %}
18813 
18814 instruct vmin4F(vecX dst, vecX src1, vecX src2)
18815 %{
18816   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18817   match(Set dst (MinV src1 src2));
18818   ins_cost(INSN_COST);
18819   format %{ "fmin  $dst,$src1,$src2\t# vector (4S)" %}
18820   ins_encode %{
18821     __ fmin(as_FloatRegister($dst$$reg), __ T4S,
18822             as_FloatRegister($src1$$reg),
18823             as_FloatRegister($src2$$reg));
18824   %}
18825   ins_pipe(vdop_fp128);
18826 %}
18827 
18828 instruct vmin2D(vecX dst, vecX src1, vecX src2)
18829 %{
18830   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
18831   match(Set dst (MinV src1 src2));
18832   ins_cost(INSN_COST);
18833   format %{ "fmin  $dst,$src1,$src2\t# vector (2D)" %}
18834   ins_encode %{
18835     __ fmin(as_FloatRegister($dst$$reg), __ T2D,
18836             as_FloatRegister($src1$$reg),
18837             as_FloatRegister($src2$$reg));
18838   %}
18839   ins_pipe(vdop_fp128);
18840 %}
18841 
18842 instruct vround2D_reg(vecX dst, vecX src, immI rmode) %{
18843   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
18844   match(Set dst (RoundDoubleModeV src rmode));
18845   format %{ "frint  $dst, $src, $rmode" %}
18846   ins_encode %{
18847     switch ($rmode$$constant) {
18848       case RoundDoubleModeNode::rmode_rint:
18849         __ frintn(as_FloatRegister($dst$$reg), __ T2D,
18850                   as_FloatRegister($src$$reg));
18851         break;
18852       case RoundDoubleModeNode::rmode_floor:
18853         __ frintm(as_FloatRegister($dst$$reg), __ T2D,
18854                   as_FloatRegister($src$$reg));
18855         break;
18856       case RoundDoubleModeNode::rmode_ceil:
18857         __ frintp(as_FloatRegister($dst$$reg), __ T2D,
18858                   as_FloatRegister($src$$reg));
18859         break;
18860     }
18861   %}
18862   ins_pipe(vdop_fp128);
18863 %}
18864 
18865 instruct vpopcount4I(vecX dst, vecX src) %{
18866   predicate(UsePopCountInstruction && n->as_Vector()->length() == 4);
18867   match(Set dst (PopCountVI src));
18868   format %{
18869     "cnt     $dst, $src\t# vector (16B)\n\t"
18870     "uaddlp  $dst, $dst\t# vector (16B)\n\t"
18871     "uaddlp  $dst, $dst\t# vector (8H)"
18872   %}
18873   ins_encode %{
18874      __ cnt(as_FloatRegister($dst$$reg), __ T16B,
18875             as_FloatRegister($src$$reg));
18876      __ uaddlp(as_FloatRegister($dst$$reg), __ T16B,
18877                as_FloatRegister($dst$$reg));
18878      __ uaddlp(as_FloatRegister($dst$$reg), __ T8H,
18879                as_FloatRegister($dst$$reg));
18880   %}
18881   ins_pipe(pipe_class_default);
18882 %}
18883 
18884 instruct vpopcount2I(vecD dst, vecD src) %{
18885   predicate(UsePopCountInstruction && n->as_Vector()->length() == 2);
18886   match(Set dst (PopCountVI src));
18887   format %{
18888     "cnt     $dst, $src\t# vector (8B)\n\t"
18889     "uaddlp  $dst, $dst\t# vector (8B)\n\t"
18890     "uaddlp  $dst, $dst\t# vector (4H)"
18891   %}
18892   ins_encode %{
18893      __ cnt(as_FloatRegister($dst$$reg), __ T8B,
18894             as_FloatRegister($src$$reg));
18895      __ uaddlp(as_FloatRegister($dst$$reg), __ T8B,
18896                as_FloatRegister($dst$$reg));
18897      __ uaddlp(as_FloatRegister($dst$$reg), __ T4H,
18898                as_FloatRegister($dst$$reg));
18899   %}
18900   ins_pipe(pipe_class_default);
18901 %}
18902 
18903 //----------PEEPHOLE RULES-----------------------------------------------------
18904 // These must follow all instruction definitions as they use the names
18905 // defined in the instructions definitions.
18906 //
18907 // peepmatch ( root_instr_name [preceding_instruction]* );
18908 //
18909 // peepconstraint %{
18910 // (instruction_number.operand_name relational_op instruction_number.operand_name
18911 //  [, ...] );
18912 // // instruction numbers are zero-based using left to right order in peepmatch
18913 //
18914 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
18915 // // provide an instruction_number.operand_name for each operand that appears
18916 // // in the replacement instruction's match rule
18917 //
18918 // ---------VM FLAGS---------------------------------------------------------
18919 //
18920 // All peephole optimizations can be turned off using -XX:-OptoPeephole
18921 //
18922 // Each peephole rule is given an identifying number starting with zero and
18923 // increasing by one in the order seen by the parser.  An individual peephole
18924 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
18925 // on the command-line.
18926 //
18927 // ---------CURRENT LIMITATIONS----------------------------------------------
18928 //
18929 // Only match adjacent instructions in same basic block
18930 // Only equality constraints
18931 // Only constraints between operands, not (0.dest_reg == RAX_enc)
18932 // Only one replacement instruction
18933 //
18934 // ---------EXAMPLE----------------------------------------------------------
18935 //
18936 // // pertinent parts of existing instructions in architecture description
18937 // instruct movI(iRegINoSp dst, iRegI src)
18938 // %{
18939 //   match(Set dst (CopyI src));
18940 // %}
18941 //
18942 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
18943 // %{
18944 //   match(Set dst (AddI dst src));
18945 //   effect(KILL cr);
18946 // %}
18947 //
18948 // // Change (inc mov) to lea
18949 // peephole %{
18950 //   // increment preceeded by register-register move
18951 //   peepmatch ( incI_iReg movI );
18952 //   // require that the destination register of the increment
18953 //   // match the destination register of the move
18954 //   peepconstraint ( 0.dst == 1.dst );
18955 //   // construct a replacement instruction that sets
18956 //   // the destination to ( move's source register + one )
18957 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
18958 // %}
18959 //
18960 
18961 // Implementation no longer uses movX instructions since
18962 // machine-independent system no longer uses CopyX nodes.
18963 //
18964 // peephole
18965 // %{
18966 //   peepmatch (incI_iReg movI);
18967 //   peepconstraint (0.dst == 1.dst);
18968 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18969 // %}
18970 
18971 // peephole
18972 // %{
18973 //   peepmatch (decI_iReg movI);
18974 //   peepconstraint (0.dst == 1.dst);
18975 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18976 // %}
18977 
18978 // peephole
18979 // %{
18980 //   peepmatch (addI_iReg_imm movI);
18981 //   peepconstraint (0.dst == 1.dst);
18982 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18983 // %}
18984 
18985 // peephole
18986 // %{
18987 //   peepmatch (incL_iReg movL);
18988 //   peepconstraint (0.dst == 1.dst);
18989 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
18990 // %}
18991 
18992 // peephole
18993 // %{
18994 //   peepmatch (decL_iReg movL);
18995 //   peepconstraint (0.dst == 1.dst);
18996 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
18997 // %}
18998 
18999 // peephole
19000 // %{
19001 //   peepmatch (addL_iReg_imm movL);
19002 //   peepconstraint (0.dst == 1.dst);
19003 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
19004 // %}
19005 
19006 // peephole
19007 // %{
19008 //   peepmatch (addP_iReg_imm movP);
19009 //   peepconstraint (0.dst == 1.dst);
19010 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
19011 // %}
19012 
19013 // // Change load of spilled value to only a spill
19014 // instruct storeI(memory mem, iRegI src)
19015 // %{
19016 //   match(Set mem (StoreI mem src));
19017 // %}
19018 //
19019 // instruct loadI(iRegINoSp dst, memory mem)
19020 // %{
19021 //   match(Set dst (LoadI mem));
19022 // %}
19023 //
19024 
19025 //----------SMARTSPILL RULES---------------------------------------------------
19026 // These must follow all instruction definitions as they use the names
19027 // defined in the instructions definitions.
19028 
19029 // Local Variables:
19030 // mode: c++
19031 // End: