1 //
    2 // Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
    3 // Copyright (c) 2014, 2020, Red Hat, Inc. All rights reserved.
    4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    5 //
    6 // This code is free software; you can redistribute it and/or modify it
    7 // under the terms of the GNU General Public License version 2 only, as
    8 // published by the Free Software Foundation.
    9 //
   10 // This code is distributed in the hope that it will be useful, but WITHOUT
   11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   13 // version 2 for more details (a copy is included in the LICENSE file that
   14 // accompanied this code).
   15 //
   16 // You should have received a copy of the GNU General Public License version
   17 // 2 along with this work; if not, write to the Free Software Foundation,
   18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   19 //
   20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   21 // or visit www.oracle.com if you need additional information or have any
   22 // questions.
   23 //
   24 //
   25 
   26 // AArch64 Architecture Description File
   27 
   28 //----------REGISTER DEFINITION BLOCK------------------------------------------
   29 // This information is used by the matcher and the register allocator to
   30 // describe individual registers and classes of registers within the target
   31 // archtecture.
   32 
   33 register %{
   34 //----------Architecture Description Register Definitions----------------------
   35 // General Registers
   36 // "reg_def"  name ( register save type, C convention save type,
   37 //                   ideal register type, encoding );
   38 // Register Save Types:
   39 //
   40 // NS  = No-Save:       The register allocator assumes that these registers
   41 //                      can be used without saving upon entry to the method, &
   42 //                      that they do not need to be saved at call sites.
   43 //
   44 // SOC = Save-On-Call:  The register allocator assumes that these registers
   45 //                      can be used without saving upon entry to the method,
   46 //                      but that they must be saved at call sites.
   47 //
   48 // SOE = Save-On-Entry: The register allocator assumes that these registers
   49 //                      must be saved before using them upon entry to the
   50 //                      method, but they do not need to be saved at call
   51 //                      sites.
   52 //
   53 // AS  = Always-Save:   The register allocator assumes that these registers
   54 //                      must be saved before using them upon entry to the
   55 //                      method, & that they must be saved at call sites.
   56 //
   57 // Ideal Register Type is used to determine how to save & restore a
   58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
   59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
   60 //
   61 // The encoding number is the actual bit-pattern placed into the opcodes.
   62 
   63 // We must define the 64 bit int registers in two 32 bit halves, the
   64 // real lower register and a virtual upper half register. upper halves
   65 // are used by the register allocator but are not actually supplied as
   66 // operands to memory ops.
   67 //
   68 // follow the C1 compiler in making registers
   69 //
   70 //   r0-r7,r10-r26 volatile (caller save)
   71 //   r27-r32 system (no save, no allocate)
   72 //   r8-r9 non-allocatable (so we can use them as scratch regs)
   73 //
   74 // as regards Java usage. we don't use any callee save registers
   75 // because this makes it difficult to de-optimise a frame (see comment
   76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
   77 //
   78 
   79 // General Registers
   80 
   81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
   82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
   83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
   84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
   85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
   86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
   87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
   88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
   89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
   90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
   91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
   92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
   93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
   94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
   95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
   96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
   97 reg_def R8      ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()         ); // rscratch1, non-allocatable
   98 reg_def R8_H    ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()->next() );
   99 reg_def R9      ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()         ); // rscratch2, non-allocatable
  100 reg_def R9_H    ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()->next() );
  101 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  102 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  103 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
  104 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
  105 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
  106 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
  107 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
  108 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
  109 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
  110 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
  111 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
  112 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
  113 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
  114 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
  115 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
  116 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
  117 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
  118 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
  119 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
  120 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
  121 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
  122 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
  123 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
  124 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
  125 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
  126 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
  127 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
  128 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
  129 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
  130 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
  131 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
  132 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
  133 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
  134 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
  135 reg_def R27     ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
  136 reg_def R27_H   ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()->next());
  137 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
  138 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
  139 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
  140 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
  141 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
  142 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
  143 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
  144 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
  145 
  146 // ----------------------------
  147 // Float/Double/Vector Registers
  148 // ----------------------------
  149 
  150 // Double Registers
  151 
  152 // The rules of ADL require that double registers be defined in pairs.
  153 // Each pair must be two 32-bit values, but not necessarily a pair of
  154 // single float registers. In each pair, ADLC-assigned register numbers
  155 // must be adjacent, with the lower number even. Finally, when the
  156 // CPU stores such a register pair to memory, the word associated with
  157 // the lower ADLC-assigned number must be stored to the lower address.
  158 
  159 // AArch64 has 32 floating-point registers. Each can store a vector of
  160 // single or double precision floating-point values up to 8 * 32
  161 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
  162 // use the first float or double element of the vector.
  163 
  164 // for Java use float registers v0-v15 are always save on call whereas
  165 // the platform ABI treats v8-v15 as callee save). float registers
  166 // v16-v31 are SOC as per the platform spec
  167 
  168 // For SVE vector registers, we simply extend vector register size to 8
  169 // slots. A vector register with lower 4 slots, denotes a 128-bit vector
  170 // NEON vector register. While a vector register with whole 8 slots,
  171 // indicating an SVE scalable vector register with vector size >= 128
  172 // bits (128 ~ 2048 bits, multiple of 128 bits). A 128-bit SVE vector
  173 // register also has 8 slots, but the the actual size is 128 bits, the
  174 // same as a NEON vector register. Since during JIT compilation, the
  175 // real SVE vector register size can be detected, so register allocator
  176 // is able to do the right thing with the real register size, e.g. for
  177 // spilling/unspilling.
  178 
  179   reg_def V0   ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()          );
  180   reg_def V0_H ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next()  );
  181   reg_def V0_J ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(2) );
  182   reg_def V0_K ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(3) );
  183   reg_def V0_L ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(4) );
  184   reg_def V0_M ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(5) );
  185   reg_def V0_N ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(6) );
  186   reg_def V0_O ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(7) );
  187 
  188   reg_def V1   ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()          );
  189   reg_def V1_H ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next()  );
  190   reg_def V1_J ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(2) );
  191   reg_def V1_K ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(3) );
  192   reg_def V1_L ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(4) );
  193   reg_def V1_M ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(5) );
  194   reg_def V1_N ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(6) );
  195   reg_def V1_O ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(7) );
  196 
  197   reg_def V2   ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()          );
  198   reg_def V2_H ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next()  );
  199   reg_def V2_J ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(2) );
  200   reg_def V2_K ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(3) );
  201   reg_def V2_L ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(4) );
  202   reg_def V2_M ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(5) );
  203   reg_def V2_N ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(6) );
  204   reg_def V2_O ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(7) );
  205 
  206   reg_def V3   ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()          );
  207   reg_def V3_H ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next()  );
  208   reg_def V3_J ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(2) );
  209   reg_def V3_K ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(3) );
  210   reg_def V3_L ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(4) );
  211   reg_def V3_M ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(5) );
  212   reg_def V3_N ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(6) );
  213   reg_def V3_O ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(7) );
  214 
  215   reg_def V4   ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()          );
  216   reg_def V4_H ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next()  );
  217   reg_def V4_J ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(2) );
  218   reg_def V4_K ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(3) );
  219   reg_def V4_L ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(4) );
  220   reg_def V4_M ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(5) );
  221   reg_def V4_N ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(6) );
  222   reg_def V4_O ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(7) );
  223 
  224   reg_def V5   ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()          );
  225   reg_def V5_H ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next()  );
  226   reg_def V5_J ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(2) );
  227   reg_def V5_K ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(3) );
  228   reg_def V5_L ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(4) );
  229   reg_def V5_M ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(5) );
  230   reg_def V5_N ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(6) );
  231   reg_def V5_O ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(7) );
  232 
  233   reg_def V6   ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()          );
  234   reg_def V6_H ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next()  );
  235   reg_def V6_J ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(2) );
  236   reg_def V6_K ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(3) );
  237   reg_def V6_L ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(4) );
  238   reg_def V6_M ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(5) );
  239   reg_def V6_N ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(6) );
  240   reg_def V6_O ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(7) );
  241 
  242   reg_def V7   ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()          );
  243   reg_def V7_H ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next()  );
  244   reg_def V7_J ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(2) );
  245   reg_def V7_K ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(3) );
  246   reg_def V7_L ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(4) );
  247   reg_def V7_M ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(5) );
  248   reg_def V7_N ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(6) );
  249   reg_def V7_O ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(7) );
  250 
  251   reg_def V8   ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()          );
  252   reg_def V8_H ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next()  );
  253   reg_def V8_J ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(2) );
  254   reg_def V8_K ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(3) );
  255   reg_def V8_L ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(4) );
  256   reg_def V8_M ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(5) );
  257   reg_def V8_N ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(6) );
  258   reg_def V8_O ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(7) );
  259 
  260   reg_def V9   ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()          );
  261   reg_def V9_H ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next()  );
  262   reg_def V9_J ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(2) );
  263   reg_def V9_K ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(3) );
  264   reg_def V9_L ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(4) );
  265   reg_def V9_M ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(5) );
  266   reg_def V9_N ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(6) );
  267   reg_def V9_O ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(7) );
  268 
  269   reg_def V10   ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()          );
  270   reg_def V10_H ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next()  );
  271   reg_def V10_J ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2) );
  272   reg_def V10_K ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3) );
  273   reg_def V10_L ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(4) );
  274   reg_def V10_M ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(5) );
  275   reg_def V10_N ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(6) );
  276   reg_def V10_O ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(7) );
  277 
  278   reg_def V11   ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()          );
  279   reg_def V11_H ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next()  );
  280   reg_def V11_J ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2) );
  281   reg_def V11_K ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3) );
  282   reg_def V11_L ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(4) );
  283   reg_def V11_M ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(5) );
  284   reg_def V11_N ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(6) );
  285   reg_def V11_O ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(7) );
  286 
  287   reg_def V12   ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()          );
  288   reg_def V12_H ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next()  );
  289   reg_def V12_J ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2) );
  290   reg_def V12_K ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3) );
  291   reg_def V12_L ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(4) );
  292   reg_def V12_M ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(5) );
  293   reg_def V12_N ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(6) );
  294   reg_def V12_O ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(7) );
  295 
  296   reg_def V13   ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()          );
  297   reg_def V13_H ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next()  );
  298   reg_def V13_J ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2) );
  299   reg_def V13_K ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3) );
  300   reg_def V13_L ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(4) );
  301   reg_def V13_M ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(5) );
  302   reg_def V13_N ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(6) );
  303   reg_def V13_O ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(7) );
  304 
  305   reg_def V14   ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()          );
  306   reg_def V14_H ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next()  );
  307   reg_def V14_J ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2) );
  308   reg_def V14_K ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3) );
  309   reg_def V14_L ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(4) );
  310   reg_def V14_M ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(5) );
  311   reg_def V14_N ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(6) );
  312   reg_def V14_O ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(7) );
  313 
  314   reg_def V15   ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()          );
  315   reg_def V15_H ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next()  );
  316   reg_def V15_J ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2) );
  317   reg_def V15_K ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3) );
  318   reg_def V15_L ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(4) );
  319   reg_def V15_M ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(5) );
  320   reg_def V15_N ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(6) );
  321   reg_def V15_O ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(7) );
  322 
  323   reg_def V16   ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()          );
  324   reg_def V16_H ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next()  );
  325   reg_def V16_J ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2) );
  326   reg_def V16_K ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3) );
  327   reg_def V16_L ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(4) );
  328   reg_def V16_M ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(5) );
  329   reg_def V16_N ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(6) );
  330   reg_def V16_O ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(7) );
  331 
  332   reg_def V17   ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()          );
  333   reg_def V17_H ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next()  );
  334   reg_def V17_J ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2) );
  335   reg_def V17_K ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3) );
  336   reg_def V17_L ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(4) );
  337   reg_def V17_M ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(5) );
  338   reg_def V17_N ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(6) );
  339   reg_def V17_O ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(7) );
  340 
  341   reg_def V18   ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()          );
  342   reg_def V18_H ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next()  );
  343   reg_def V18_J ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2) );
  344   reg_def V18_K ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3) );
  345   reg_def V18_L ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(4) );
  346   reg_def V18_M ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(5) );
  347   reg_def V18_N ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(6) );
  348   reg_def V18_O ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(7) );
  349 
  350   reg_def V19   ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()          );
  351   reg_def V19_H ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next()  );
  352   reg_def V19_J ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2) );
  353   reg_def V19_K ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3) );
  354   reg_def V19_L ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(4) );
  355   reg_def V19_M ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(5) );
  356   reg_def V19_N ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(6) );
  357   reg_def V19_O ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(7) );
  358 
  359   reg_def V20   ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()          );
  360   reg_def V20_H ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next()  );
  361   reg_def V20_J ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2) );
  362   reg_def V20_K ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3) );
  363   reg_def V20_L ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(4) );
  364   reg_def V20_M ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(5) );
  365   reg_def V20_N ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(6) );
  366   reg_def V20_O ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(7) );
  367 
  368   reg_def V21   ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()          );
  369   reg_def V21_H ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next()  );
  370   reg_def V21_J ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2) );
  371   reg_def V21_K ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3) );
  372   reg_def V21_L ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(4) );
  373   reg_def V21_M ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(5) );
  374   reg_def V21_N ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(6) );
  375   reg_def V21_O ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(7) );
  376 
  377   reg_def V22   ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()          );
  378   reg_def V22_H ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next()  );
  379   reg_def V22_J ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2) );
  380   reg_def V22_K ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3) );
  381   reg_def V22_L ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(4) );
  382   reg_def V22_M ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(5) );
  383   reg_def V22_N ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(6) );
  384   reg_def V22_O ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(7) );
  385 
  386   reg_def V23   ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()          );
  387   reg_def V23_H ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next()  );
  388   reg_def V23_J ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2) );
  389   reg_def V23_K ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3) );
  390   reg_def V23_L ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(4) );
  391   reg_def V23_M ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(5) );
  392   reg_def V23_N ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(6) );
  393   reg_def V23_O ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(7) );
  394 
  395   reg_def V24   ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()          );
  396   reg_def V24_H ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next()  );
  397   reg_def V24_J ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2) );
  398   reg_def V24_K ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3) );
  399   reg_def V24_L ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(4) );
  400   reg_def V24_M ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(5) );
  401   reg_def V24_N ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(6) );
  402   reg_def V24_O ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(7) );
  403 
  404   reg_def V25   ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()          );
  405   reg_def V25_H ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next()  );
  406   reg_def V25_J ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2) );
  407   reg_def V25_K ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3) );
  408   reg_def V25_L ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(4) );
  409   reg_def V25_M ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(5) );
  410   reg_def V25_N ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(6) );
  411   reg_def V25_O ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(7) );
  412 
  413   reg_def V26   ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()          );
  414   reg_def V26_H ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next()  );
  415   reg_def V26_J ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2) );
  416   reg_def V26_K ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3) );
  417   reg_def V26_L ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(4) );
  418   reg_def V26_M ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(5) );
  419   reg_def V26_N ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(6) );
  420   reg_def V26_O ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(7) );
  421 
  422   reg_def V27   ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()          );
  423   reg_def V27_H ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next()  );
  424   reg_def V27_J ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2) );
  425   reg_def V27_K ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3) );
  426   reg_def V27_L ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(4) );
  427   reg_def V27_M ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(5) );
  428   reg_def V27_N ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(6) );
  429   reg_def V27_O ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(7) );
  430 
  431   reg_def V28   ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()          );
  432   reg_def V28_H ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next()  );
  433   reg_def V28_J ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2) );
  434   reg_def V28_K ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3) );
  435   reg_def V28_L ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(4) );
  436   reg_def V28_M ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(5) );
  437   reg_def V28_N ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(6) );
  438   reg_def V28_O ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(7) );
  439 
  440   reg_def V29   ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()          );
  441   reg_def V29_H ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next()  );
  442   reg_def V29_J ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2) );
  443   reg_def V29_K ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3) );
  444   reg_def V29_L ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(4) );
  445   reg_def V29_M ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(5) );
  446   reg_def V29_N ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(6) );
  447   reg_def V29_O ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(7) );
  448 
  449   reg_def V30   ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()          );
  450   reg_def V30_H ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next()  );
  451   reg_def V30_J ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2) );
  452   reg_def V30_K ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3) );
  453   reg_def V30_L ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(4) );
  454   reg_def V30_M ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(5) );
  455   reg_def V30_N ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(6) );
  456   reg_def V30_O ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(7) );
  457 
  458   reg_def V31   ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()          );
  459   reg_def V31_H ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next()  );
  460   reg_def V31_J ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2) );
  461   reg_def V31_K ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3) );
  462   reg_def V31_L ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(4) );
  463   reg_def V31_M ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(5) );
  464   reg_def V31_N ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(6) );
  465   reg_def V31_O ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(7) );
  466 
  467 
  468 // ----------------------------
  469 // SVE Predicate Registers
  470 // ----------------------------
  471   reg_def P0 (SOC, SOC, Op_RegVMask, 0, p0->as_VMReg());
  472   reg_def P1 (SOC, SOC, Op_RegVMask, 1, p1->as_VMReg());
  473   reg_def P2 (SOC, SOC, Op_RegVMask, 2, p2->as_VMReg());
  474   reg_def P3 (SOC, SOC, Op_RegVMask, 3, p3->as_VMReg());
  475   reg_def P4 (SOC, SOC, Op_RegVMask, 4, p4->as_VMReg());
  476   reg_def P5 (SOC, SOC, Op_RegVMask, 5, p5->as_VMReg());
  477   reg_def P6 (SOC, SOC, Op_RegVMask, 6, p6->as_VMReg());
  478   reg_def P7 (SOC, SOC, Op_RegVMask, 7, p7->as_VMReg());
  479   reg_def P8 (SOC, SOC, Op_RegVMask, 8, p8->as_VMReg());
  480   reg_def P9 (SOC, SOC, Op_RegVMask, 9, p9->as_VMReg());
  481   reg_def P10 (SOC, SOC, Op_RegVMask, 10, p10->as_VMReg());
  482   reg_def P11 (SOC, SOC, Op_RegVMask, 11, p11->as_VMReg());
  483   reg_def P12 (SOC, SOC, Op_RegVMask, 12, p12->as_VMReg());
  484   reg_def P13 (SOC, SOC, Op_RegVMask, 13, p13->as_VMReg());
  485   reg_def P14 (SOC, SOC, Op_RegVMask, 14, p14->as_VMReg());
  486   reg_def P15 (SOC, SOC, Op_RegVMask, 15, p15->as_VMReg());
  487 
  488 // ----------------------------
  489 // Special Registers
  490 // ----------------------------
  491 
  492 // the AArch64 CSPR status flag register is not directly acessible as
  493 // instruction operand. the FPSR status flag register is a system
  494 // register which can be written/read using MSR/MRS but again does not
  495 // appear as an operand (a code identifying the FSPR occurs as an
  496 // immediate value in the instruction).
  497 
  498 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
  499 
  500 // Specify priority of register selection within phases of register
  501 // allocation.  Highest priority is first.  A useful heuristic is to
  502 // give registers a low priority when they are required by machine
  503 // instructions, like EAX and EDX on I486, and choose no-save registers
  504 // before save-on-call, & save-on-call before save-on-entry.  Registers
  505 // which participate in fixed calling sequences should come last.
  506 // Registers which are used as pairs must fall on an even boundary.
  507 
  508 alloc_class chunk0(
  509     // volatiles
  510     R10, R10_H,
  511     R11, R11_H,
  512     R12, R12_H,
  513     R13, R13_H,
  514     R14, R14_H,
  515     R15, R15_H,
  516     R16, R16_H,
  517     R17, R17_H,
  518     R18, R18_H,
  519 
  520     // arg registers
  521     R0, R0_H,
  522     R1, R1_H,
  523     R2, R2_H,
  524     R3, R3_H,
  525     R4, R4_H,
  526     R5, R5_H,
  527     R6, R6_H,
  528     R7, R7_H,
  529 
  530     // non-volatiles
  531     R19, R19_H,
  532     R20, R20_H,
  533     R21, R21_H,
  534     R22, R22_H,
  535     R23, R23_H,
  536     R24, R24_H,
  537     R25, R25_H,
  538     R26, R26_H,
  539 
  540     // non-allocatable registers
  541 
  542     R27, R27_H, // heapbase
  543     R28, R28_H, // thread
  544     R29, R29_H, // fp
  545     R30, R30_H, // lr
  546     R31, R31_H, // sp
  547     R8, R8_H,   // rscratch1
  548     R9, R9_H,   // rscratch2
  549 );
  550 
  551 alloc_class chunk1(
  552 
  553     // no save
  554     V16, V16_H, V16_J, V16_K, V16_L, V16_M, V16_N, V16_O,
  555     V17, V17_H, V17_J, V17_K, V17_L, V17_M, V17_N, V17_O,
  556     V18, V18_H, V18_J, V18_K, V18_L, V18_M, V18_N, V18_O,
  557     V19, V19_H, V19_J, V19_K, V19_L, V19_M, V19_N, V19_O,
  558     V20, V20_H, V20_J, V20_K, V20_L, V20_M, V20_N, V20_O,
  559     V21, V21_H, V21_J, V21_K, V21_L, V21_M, V21_N, V21_O,
  560     V22, V22_H, V22_J, V22_K, V22_L, V22_M, V22_N, V22_O,
  561     V23, V23_H, V23_J, V23_K, V23_L, V23_M, V23_N, V23_O,
  562     V24, V24_H, V24_J, V24_K, V24_L, V24_M, V24_N, V24_O,
  563     V25, V25_H, V25_J, V25_K, V25_L, V25_M, V25_N, V25_O,
  564     V26, V26_H, V26_J, V26_K, V26_L, V26_M, V26_N, V26_O,
  565     V27, V27_H, V27_J, V27_K, V27_L, V27_M, V27_N, V27_O,
  566     V28, V28_H, V28_J, V28_K, V28_L, V28_M, V28_N, V28_O,
  567     V29, V29_H, V29_J, V29_K, V29_L, V29_M, V29_N, V29_O,
  568     V30, V30_H, V30_J, V30_K, V30_L, V30_M, V30_N, V30_O,
  569     V31, V31_H, V31_J, V31_K, V31_L, V31_M, V31_N, V31_O,
  570 
  571     // arg registers
  572     V0, V0_H, V0_J, V0_K, V0_L, V0_M, V0_N, V0_O,
  573     V1, V1_H, V1_J, V1_K, V1_L, V1_M, V1_N, V1_O,
  574     V2, V2_H, V2_J, V2_K, V2_L, V2_M, V2_N, V2_O,
  575     V3, V3_H, V3_J, V3_K, V3_L, V3_M, V3_N, V3_O,
  576     V4, V4_H, V4_J, V4_K, V4_L, V4_M, V4_N, V4_O,
  577     V5, V5_H, V5_J, V5_K, V5_L, V5_M, V5_N, V5_O,
  578     V6, V6_H, V6_J, V6_K, V6_L, V6_M, V6_N, V6_O,
  579     V7, V7_H, V7_J, V7_K, V7_L, V7_M, V7_N, V7_O,
  580 
  581     // non-volatiles
  582     V8, V8_H, V8_J, V8_K, V8_L, V8_M, V8_N, V8_O,
  583     V9, V9_H, V9_J, V9_K, V9_L, V9_M, V9_N, V9_O,
  584     V10, V10_H, V10_J, V10_K, V10_L, V10_M, V10_N, V10_O,
  585     V11, V11_H, V11_J, V11_K, V11_L, V11_M, V11_N, V11_O,
  586     V12, V12_H, V12_J, V12_K, V12_L, V12_M, V12_N, V12_O,
  587     V13, V13_H, V13_J, V13_K, V13_L, V13_M, V13_N, V13_O,
  588     V14, V14_H, V14_J, V14_K, V14_L, V14_M, V14_N, V14_O,
  589     V15, V15_H, V15_J, V15_K, V15_L, V15_M, V15_N, V15_O,
  590 );
  591 
  592 alloc_class chunk2 (
  593     P0,
  594     P1,
  595     P2,
  596     P3,
  597     P4,
  598     P5,
  599     P6,
  600     P7,
  601 
  602     P8,
  603     P9,
  604     P10,
  605     P11,
  606     P12,
  607     P13,
  608     P14,
  609     P15,
  610 );
  611 
  612 alloc_class chunk3(RFLAGS);
  613 
  614 //----------Architecture Description Register Classes--------------------------
  615 // Several register classes are automatically defined based upon information in
  616 // this architecture description.
  617 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
  618 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
  619 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
  620 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
  621 //
  622 
  623 // Class for all 32 bit general purpose registers
  624 reg_class all_reg32(
  625     R0,
  626     R1,
  627     R2,
  628     R3,
  629     R4,
  630     R5,
  631     R6,
  632     R7,
  633     R10,
  634     R11,
  635     R12,
  636     R13,
  637     R14,
  638     R15,
  639     R16,
  640     R17,
  641     R18,
  642     R19,
  643     R20,
  644     R21,
  645     R22,
  646     R23,
  647     R24,
  648     R25,
  649     R26,
  650     R27,
  651     R28,
  652     R29,
  653     R30,
  654     R31
  655 );
  656 
  657 
  658 // Class for all 32 bit integer registers (excluding SP which
  659 // will never be used as an integer register)
  660 reg_class any_reg32 %{
  661   return _ANY_REG32_mask;
  662 %}
  663 
  664 // Singleton class for R0 int register
  665 reg_class int_r0_reg(R0);
  666 
  667 // Singleton class for R2 int register
  668 reg_class int_r2_reg(R2);
  669 
  670 // Singleton class for R3 int register
  671 reg_class int_r3_reg(R3);
  672 
  673 // Singleton class for R4 int register
  674 reg_class int_r4_reg(R4);
  675 
  676 // Singleton class for R31 int register
  677 reg_class int_r31_reg(R31);
  678 
  679 // Class for all 64 bit general purpose registers
  680 reg_class all_reg(
  681     R0, R0_H,
  682     R1, R1_H,
  683     R2, R2_H,
  684     R3, R3_H,
  685     R4, R4_H,
  686     R5, R5_H,
  687     R6, R6_H,
  688     R7, R7_H,
  689     R10, R10_H,
  690     R11, R11_H,
  691     R12, R12_H,
  692     R13, R13_H,
  693     R14, R14_H,
  694     R15, R15_H,
  695     R16, R16_H,
  696     R17, R17_H,
  697     R18, R18_H,
  698     R19, R19_H,
  699     R20, R20_H,
  700     R21, R21_H,
  701     R22, R22_H,
  702     R23, R23_H,
  703     R24, R24_H,
  704     R25, R25_H,
  705     R26, R26_H,
  706     R27, R27_H,
  707     R28, R28_H,
  708     R29, R29_H,
  709     R30, R30_H,
  710     R31, R31_H
  711 );
  712 
  713 // Class for all long integer registers (including SP)
  714 reg_class any_reg %{
  715   return _ANY_REG_mask;
  716 %}
  717 
  718 // Class for non-allocatable 32 bit registers
  719 reg_class non_allocatable_reg32(
  720     R28,                        // thread
  721     R30,                        // lr
  722     R31                         // sp
  723 );
  724 
  725 // Class for non-allocatable 64 bit registers
  726 reg_class non_allocatable_reg(
  727     R28, R28_H,                 // thread
  728     R30, R30_H,                 // lr
  729     R31, R31_H                  // sp
  730 );
  731 
  732 // Class for all non-special integer registers
  733 reg_class no_special_reg32 %{
  734   return _NO_SPECIAL_REG32_mask;
  735 %}
  736 
  737 // Class for all non-special long integer registers
  738 reg_class no_special_reg %{
  739   return _NO_SPECIAL_REG_mask;
  740 %}
  741 
  742 // Class for 64 bit register r0
  743 reg_class r0_reg(
  744     R0, R0_H
  745 );
  746 
  747 // Class for 64 bit register r1
  748 reg_class r1_reg(
  749     R1, R1_H
  750 );
  751 
  752 // Class for 64 bit register r2
  753 reg_class r2_reg(
  754     R2, R2_H
  755 );
  756 
  757 // Class for 64 bit register r3
  758 reg_class r3_reg(
  759     R3, R3_H
  760 );
  761 
  762 // Class for 64 bit register r4
  763 reg_class r4_reg(
  764     R4, R4_H
  765 );
  766 
  767 // Class for 64 bit register r5
  768 reg_class r5_reg(
  769     R5, R5_H
  770 );
  771 
  772 // Class for 64 bit register r10
  773 reg_class r10_reg(
  774     R10, R10_H
  775 );
  776 
  777 // Class for 64 bit register r11
  778 reg_class r11_reg(
  779     R11, R11_H
  780 );
  781 
  782 // Class for method register
  783 reg_class method_reg(
  784     R12, R12_H
  785 );
  786 
  787 // Class for heapbase register
  788 reg_class heapbase_reg(
  789     R27, R27_H
  790 );
  791 
  792 // Class for thread register
  793 reg_class thread_reg(
  794     R28, R28_H
  795 );
  796 
  797 // Class for frame pointer register
  798 reg_class fp_reg(
  799     R29, R29_H
  800 );
  801 
  802 // Class for link register
  803 reg_class lr_reg(
  804     R30, R30_H
  805 );
  806 
  807 // Class for long sp register
  808 reg_class sp_reg(
  809   R31, R31_H
  810 );
  811 
  812 // Class for all pointer registers
  813 reg_class ptr_reg %{
  814   return _PTR_REG_mask;
  815 %}
  816 
  817 // Class for all non_special pointer registers
  818 reg_class no_special_ptr_reg %{
  819   return _NO_SPECIAL_PTR_REG_mask;
  820 %}
  821 
  822 // Class for all float registers
  823 reg_class float_reg(
  824     V0,
  825     V1,
  826     V2,
  827     V3,
  828     V4,
  829     V5,
  830     V6,
  831     V7,
  832     V8,
  833     V9,
  834     V10,
  835     V11,
  836     V12,
  837     V13,
  838     V14,
  839     V15,
  840     V16,
  841     V17,
  842     V18,
  843     V19,
  844     V20,
  845     V21,
  846     V22,
  847     V23,
  848     V24,
  849     V25,
  850     V26,
  851     V27,
  852     V28,
  853     V29,
  854     V30,
  855     V31
  856 );
  857 
  858 // Double precision float registers have virtual `high halves' that
  859 // are needed by the allocator.
  860 // Class for all double registers
  861 reg_class double_reg(
  862     V0, V0_H,
  863     V1, V1_H,
  864     V2, V2_H,
  865     V3, V3_H,
  866     V4, V4_H,
  867     V5, V5_H,
  868     V6, V6_H,
  869     V7, V7_H,
  870     V8, V8_H,
  871     V9, V9_H,
  872     V10, V10_H,
  873     V11, V11_H,
  874     V12, V12_H,
  875     V13, V13_H,
  876     V14, V14_H,
  877     V15, V15_H,
  878     V16, V16_H,
  879     V17, V17_H,
  880     V18, V18_H,
  881     V19, V19_H,
  882     V20, V20_H,
  883     V21, V21_H,
  884     V22, V22_H,
  885     V23, V23_H,
  886     V24, V24_H,
  887     V25, V25_H,
  888     V26, V26_H,
  889     V27, V27_H,
  890     V28, V28_H,
  891     V29, V29_H,
  892     V30, V30_H,
  893     V31, V31_H
  894 );
  895 
  896 // Class for all SVE vector registers.
  897 reg_class vectora_reg (
  898     V0, V0_H, V0_J, V0_K, V0_L, V0_M, V0_N, V0_O,
  899     V1, V1_H, V1_J, V1_K, V1_L, V1_M, V1_N, V1_O,
  900     V2, V2_H, V2_J, V2_K, V2_L, V2_M, V2_N, V2_O,
  901     V3, V3_H, V3_J, V3_K, V3_L, V3_M, V3_N, V3_O,
  902     V4, V4_H, V4_J, V4_K, V4_L, V4_M, V4_N, V4_O,
  903     V5, V5_H, V5_J, V5_K, V5_L, V5_M, V5_N, V5_O,
  904     V6, V6_H, V6_J, V6_K, V6_L, V6_M, V6_N, V6_O,
  905     V7, V7_H, V7_J, V7_K, V7_L, V7_M, V7_N, V7_O,
  906     V8, V8_H, V8_J, V8_K, V8_L, V8_M, V8_N, V8_O,
  907     V9, V9_H, V9_J, V9_K, V9_L, V9_M, V9_N, V9_O,
  908     V10, V10_H, V10_J, V10_K, V10_L, V10_M, V10_N, V10_O,
  909     V11, V11_H, V11_J, V11_K, V11_L, V11_M, V11_N, V11_O,
  910     V12, V12_H, V12_J, V12_K, V12_L, V12_M, V12_N, V12_O,
  911     V13, V13_H, V13_J, V13_K, V13_L, V13_M, V13_N, V13_O,
  912     V14, V14_H, V14_J, V14_K, V14_L, V14_M, V14_N, V14_O,
  913     V15, V15_H, V15_J, V15_K, V15_L, V15_M, V15_N, V15_O,
  914     V16, V16_H, V16_J, V16_K, V16_L, V16_M, V16_N, V16_O,
  915     V17, V17_H, V17_J, V17_K, V17_L, V17_M, V17_N, V17_O,
  916     V18, V18_H, V18_J, V18_K, V18_L, V18_M, V18_N, V18_O,
  917     V19, V19_H, V19_J, V19_K, V19_L, V19_M, V19_N, V19_O,
  918     V20, V20_H, V20_J, V20_K, V20_L, V20_M, V20_N, V20_O,
  919     V21, V21_H, V21_J, V21_K, V21_L, V21_M, V21_N, V21_O,
  920     V22, V22_H, V22_J, V22_K, V22_L, V22_M, V22_N, V22_O,
  921     V23, V23_H, V23_J, V23_K, V23_L, V23_M, V23_N, V23_O,
  922     V24, V24_H, V24_J, V24_K, V24_L, V24_M, V24_N, V24_O,
  923     V25, V25_H, V25_J, V25_K, V25_L, V25_M, V25_N, V25_O,
  924     V26, V26_H, V26_J, V26_K, V26_L, V26_M, V26_N, V26_O,
  925     V27, V27_H, V27_J, V27_K, V27_L, V27_M, V27_N, V27_O,
  926     V28, V28_H, V28_J, V28_K, V28_L, V28_M, V28_N, V28_O,
  927     V29, V29_H, V29_J, V29_K, V29_L, V29_M, V29_N, V29_O,
  928     V30, V30_H, V30_J, V30_K, V30_L, V30_M, V30_N, V30_O,
  929     V31, V31_H, V31_J, V31_K, V31_L, V31_M, V31_N, V31_O,
  930 );
  931 
  932 // Class for all 64bit vector registers
  933 reg_class vectord_reg(
  934     V0, V0_H,
  935     V1, V1_H,
  936     V2, V2_H,
  937     V3, V3_H,
  938     V4, V4_H,
  939     V5, V5_H,
  940     V6, V6_H,
  941     V7, V7_H,
  942     V8, V8_H,
  943     V9, V9_H,
  944     V10, V10_H,
  945     V11, V11_H,
  946     V12, V12_H,
  947     V13, V13_H,
  948     V14, V14_H,
  949     V15, V15_H,
  950     V16, V16_H,
  951     V17, V17_H,
  952     V18, V18_H,
  953     V19, V19_H,
  954     V20, V20_H,
  955     V21, V21_H,
  956     V22, V22_H,
  957     V23, V23_H,
  958     V24, V24_H,
  959     V25, V25_H,
  960     V26, V26_H,
  961     V27, V27_H,
  962     V28, V28_H,
  963     V29, V29_H,
  964     V30, V30_H,
  965     V31, V31_H
  966 );
  967 
  968 // Class for all 128bit vector registers
  969 reg_class vectorx_reg(
  970     V0, V0_H, V0_J, V0_K,
  971     V1, V1_H, V1_J, V1_K,
  972     V2, V2_H, V2_J, V2_K,
  973     V3, V3_H, V3_J, V3_K,
  974     V4, V4_H, V4_J, V4_K,
  975     V5, V5_H, V5_J, V5_K,
  976     V6, V6_H, V6_J, V6_K,
  977     V7, V7_H, V7_J, V7_K,
  978     V8, V8_H, V8_J, V8_K,
  979     V9, V9_H, V9_J, V9_K,
  980     V10, V10_H, V10_J, V10_K,
  981     V11, V11_H, V11_J, V11_K,
  982     V12, V12_H, V12_J, V12_K,
  983     V13, V13_H, V13_J, V13_K,
  984     V14, V14_H, V14_J, V14_K,
  985     V15, V15_H, V15_J, V15_K,
  986     V16, V16_H, V16_J, V16_K,
  987     V17, V17_H, V17_J, V17_K,
  988     V18, V18_H, V18_J, V18_K,
  989     V19, V19_H, V19_J, V19_K,
  990     V20, V20_H, V20_J, V20_K,
  991     V21, V21_H, V21_J, V21_K,
  992     V22, V22_H, V22_J, V22_K,
  993     V23, V23_H, V23_J, V23_K,
  994     V24, V24_H, V24_J, V24_K,
  995     V25, V25_H, V25_J, V25_K,
  996     V26, V26_H, V26_J, V26_K,
  997     V27, V27_H, V27_J, V27_K,
  998     V28, V28_H, V28_J, V28_K,
  999     V29, V29_H, V29_J, V29_K,
 1000     V30, V30_H, V30_J, V30_K,
 1001     V31, V31_H, V31_J, V31_K
 1002 );
 1003 
 1004 // Class for 128 bit register v0
 1005 reg_class v0_reg(
 1006     V0, V0_H
 1007 );
 1008 
 1009 // Class for 128 bit register v1
 1010 reg_class v1_reg(
 1011     V1, V1_H
 1012 );
 1013 
 1014 // Class for 128 bit register v2
 1015 reg_class v2_reg(
 1016     V2, V2_H
 1017 );
 1018 
 1019 // Class for 128 bit register v3
 1020 reg_class v3_reg(
 1021     V3, V3_H
 1022 );
 1023 
 1024 // Class for 128 bit register v4
 1025 reg_class v4_reg(
 1026     V4, V4_H
 1027 );
 1028 
 1029 // Class for 128 bit register v5
 1030 reg_class v5_reg(
 1031     V5, V5_H
 1032 );
 1033 
 1034 // Class for 128 bit register v6
 1035 reg_class v6_reg(
 1036     V6, V6_H
 1037 );
 1038 
 1039 // Class for 128 bit register v7
 1040 reg_class v7_reg(
 1041     V7, V7_H
 1042 );
 1043 
 1044 // Class for 128 bit register v8
 1045 reg_class v8_reg(
 1046     V8, V8_H
 1047 );
 1048 
 1049 // Class for 128 bit register v9
 1050 reg_class v9_reg(
 1051     V9, V9_H
 1052 );
 1053 
 1054 // Class for 128 bit register v10
 1055 reg_class v10_reg(
 1056     V10, V10_H
 1057 );
 1058 
 1059 // Class for 128 bit register v11
 1060 reg_class v11_reg(
 1061     V11, V11_H
 1062 );
 1063 
 1064 // Class for 128 bit register v12
 1065 reg_class v12_reg(
 1066     V12, V12_H
 1067 );
 1068 
 1069 // Class for 128 bit register v13
 1070 reg_class v13_reg(
 1071     V13, V13_H
 1072 );
 1073 
 1074 // Class for 128 bit register v14
 1075 reg_class v14_reg(
 1076     V14, V14_H
 1077 );
 1078 
 1079 // Class for 128 bit register v15
 1080 reg_class v15_reg(
 1081     V15, V15_H
 1082 );
 1083 
 1084 // Class for 128 bit register v16
 1085 reg_class v16_reg(
 1086     V16, V16_H
 1087 );
 1088 
 1089 // Class for 128 bit register v17
 1090 reg_class v17_reg(
 1091     V17, V17_H
 1092 );
 1093 
 1094 // Class for 128 bit register v18
 1095 reg_class v18_reg(
 1096     V18, V18_H
 1097 );
 1098 
 1099 // Class for 128 bit register v19
 1100 reg_class v19_reg(
 1101     V19, V19_H
 1102 );
 1103 
 1104 // Class for 128 bit register v20
 1105 reg_class v20_reg(
 1106     V20, V20_H
 1107 );
 1108 
 1109 // Class for 128 bit register v21
 1110 reg_class v21_reg(
 1111     V21, V21_H
 1112 );
 1113 
 1114 // Class for 128 bit register v22
 1115 reg_class v22_reg(
 1116     V22, V22_H
 1117 );
 1118 
 1119 // Class for 128 bit register v23
 1120 reg_class v23_reg(
 1121     V23, V23_H
 1122 );
 1123 
 1124 // Class for 128 bit register v24
 1125 reg_class v24_reg(
 1126     V24, V24_H
 1127 );
 1128 
 1129 // Class for 128 bit register v25
 1130 reg_class v25_reg(
 1131     V25, V25_H
 1132 );
 1133 
 1134 // Class for 128 bit register v26
 1135 reg_class v26_reg(
 1136     V26, V26_H
 1137 );
 1138 
 1139 // Class for 128 bit register v27
 1140 reg_class v27_reg(
 1141     V27, V27_H
 1142 );
 1143 
 1144 // Class for 128 bit register v28
 1145 reg_class v28_reg(
 1146     V28, V28_H
 1147 );
 1148 
 1149 // Class for 128 bit register v29
 1150 reg_class v29_reg(
 1151     V29, V29_H
 1152 );
 1153 
 1154 // Class for 128 bit register v30
 1155 reg_class v30_reg(
 1156     V30, V30_H
 1157 );
 1158 
 1159 // Class for 128 bit register v31
 1160 reg_class v31_reg(
 1161     V31, V31_H
 1162 );
 1163 
 1164 // Class for all SVE predicate registers.
 1165 reg_class pr_reg (
 1166     P0,
 1167     P1,
 1168     P2,
 1169     P3,
 1170     P4,
 1171     P5,
 1172     P6,
 1173     P7,
 1174     P8,
 1175     P9,
 1176     P10,
 1177     P11,
 1178     P12,
 1179     P13,
 1180     P14,
 1181     P15
 1182 );
 1183 
 1184 // Class for SVE governing predicate registers, which are used
 1185 // to determine the active elements of a predicated instruction.
 1186 reg_class gov_pr (
 1187     P0,
 1188     P1,
 1189     P2,
 1190     P3,
 1191     P4,
 1192     P5,
 1193     P6,
 1194     P7
 1195 );
 1196 
 1197 // Singleton class for condition codes
 1198 reg_class int_flags(RFLAGS);
 1199 
 1200 %}
 1201 
 1202 //----------DEFINITION BLOCK---------------------------------------------------
 1203 // Define name --> value mappings to inform the ADLC of an integer valued name
 1204 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 1205 // Format:
 1206 //        int_def  <name>         ( <int_value>, <expression>);
 1207 // Generated Code in ad_<arch>.hpp
 1208 //        #define  <name>   (<expression>)
 1209 //        // value == <int_value>
 1210 // Generated code in ad_<arch>.cpp adlc_verification()
 1211 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 1212 //
 1213 
 1214 // we follow the ppc-aix port in using a simple cost model which ranks
 1215 // register operations as cheap, memory ops as more expensive and
 1216 // branches as most expensive. the first two have a low as well as a
 1217 // normal cost. huge cost appears to be a way of saying don't do
 1218 // something
 1219 
 1220 definitions %{
 1221   // The default cost (of a register move instruction).
 1222   int_def INSN_COST            (    100,     100);
 1223   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 1224   int_def CALL_COST            (    200,     2 * INSN_COST);
 1225   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 1226 %}
 1227 
 1228 
 1229 //----------SOURCE BLOCK-------------------------------------------------------
 1230 // This is a block of C++ code which provides values, functions, and
 1231 // definitions necessary in the rest of the architecture description
 1232 
 1233 source_hpp %{
 1234 
 1235 #include "asm/macroAssembler.hpp"
 1236 #include "gc/shared/cardTable.hpp"
 1237 #include "gc/shared/cardTableBarrierSet.hpp"
 1238 #include "gc/shared/collectedHeap.hpp"
 1239 #include "opto/addnode.hpp"
 1240 #include "opto/convertnode.hpp"
 1241 
 1242 extern RegMask _ANY_REG32_mask;
 1243 extern RegMask _ANY_REG_mask;
 1244 extern RegMask _PTR_REG_mask;
 1245 extern RegMask _NO_SPECIAL_REG32_mask;
 1246 extern RegMask _NO_SPECIAL_REG_mask;
 1247 extern RegMask _NO_SPECIAL_PTR_REG_mask;
 1248 
 1249 class CallStubImpl {
 1250 
 1251   //--------------------------------------------------------------
 1252   //---<  Used for optimization in Compile::shorten_branches  >---
 1253   //--------------------------------------------------------------
 1254 
 1255  public:
 1256   // Size of call trampoline stub.
 1257   static uint size_call_trampoline() {
 1258     return 0; // no call trampolines on this platform
 1259   }
 1260 
 1261   // number of relocations needed by a call trampoline stub
 1262   static uint reloc_call_trampoline() {
 1263     return 0; // no call trampolines on this platform
 1264   }
 1265 };
 1266 
 1267 class HandlerImpl {
 1268 
 1269  public:
 1270 
 1271   static int emit_exception_handler(CodeBuffer &cbuf);
 1272   static int emit_deopt_handler(CodeBuffer& cbuf);
 1273 
 1274   static uint size_exception_handler() {
 1275     return MacroAssembler::far_branch_size();
 1276   }
 1277 
 1278   static uint size_deopt_handler() {
 1279     // count one adr and one far branch instruction
 1280     return 4 * NativeInstruction::instruction_size;
 1281   }
 1282 };
 1283 
 1284 class Node::PD {
 1285 public:
 1286   enum NodeFlags {
 1287     _last_flag = Node::_last_flag
 1288   };
 1289 };
 1290 
 1291  bool is_CAS(int opcode, bool maybe_volatile);
 1292 
 1293   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
 1294 
 1295   bool unnecessary_acquire(const Node *barrier);
 1296   bool needs_acquiring_load(const Node *load);
 1297 
 1298   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
 1299 
 1300   bool unnecessary_release(const Node *barrier);
 1301   bool unnecessary_volatile(const Node *barrier);
 1302   bool needs_releasing_store(const Node *store);
 1303 
 1304   // predicate controlling translation of CompareAndSwapX
 1305   bool needs_acquiring_load_exclusive(const Node *load);
 1306 
 1307   // predicate controlling addressing modes
 1308   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
 1309 %}
 1310 
 1311 source %{
 1312 
 1313   // Derived RegMask with conditionally allocatable registers
 1314 
 1315   void PhaseOutput::pd_perform_mach_node_analysis() {
 1316   }
 1317 
 1318   int MachNode::pd_alignment_required() const {
 1319     return 1;
 1320   }
 1321 
 1322   int MachNode::compute_padding(int current_offset) const {
 1323     return 0;
 1324   }
 1325 
 1326   RegMask _ANY_REG32_mask;
 1327   RegMask _ANY_REG_mask;
 1328   RegMask _PTR_REG_mask;
 1329   RegMask _NO_SPECIAL_REG32_mask;
 1330   RegMask _NO_SPECIAL_REG_mask;
 1331   RegMask _NO_SPECIAL_PTR_REG_mask;
 1332 
 1333   void reg_mask_init() {
 1334     // We derive below RegMask(s) from the ones which are auto-generated from
 1335     // adlc register classes to make AArch64 rheapbase (r27) and rfp (r29)
 1336     // registers conditionally reserved.
 1337 
 1338     _ANY_REG32_mask = _ALL_REG32_mask;
 1339     _ANY_REG32_mask.Remove(OptoReg::as_OptoReg(r31_sp->as_VMReg()));
 1340 
 1341     _ANY_REG_mask = _ALL_REG_mask;
 1342 
 1343     _PTR_REG_mask = _ALL_REG_mask;
 1344 
 1345     _NO_SPECIAL_REG32_mask = _ALL_REG32_mask;
 1346     _NO_SPECIAL_REG32_mask.SUBTRACT(_NON_ALLOCATABLE_REG32_mask);
 1347 
 1348     _NO_SPECIAL_REG_mask = _ALL_REG_mask;
 1349     _NO_SPECIAL_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1350 
 1351     _NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask;
 1352     _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1353 
 1354     // r27 is not allocatable when compressed oops is on and heapbase is not
 1355     // zero, compressed klass pointers doesn't use r27 after JDK-8234794
 1356     if (UseCompressedOops && (CompressedOops::ptrs_base() != NULL || UseAOT)) {
 1357       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1358       _NO_SPECIAL_REG_mask.SUBTRACT(_HEAPBASE_REG_mask);
 1359       _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_HEAPBASE_REG_mask);
 1360     }
 1361 
 1362     // r29 is not allocatable when PreserveFramePointer is on
 1363     if (PreserveFramePointer) {
 1364       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1365       _NO_SPECIAL_REG_mask.SUBTRACT(_FP_REG_mask);
 1366       _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_FP_REG_mask);
 1367     }
 1368   }
 1369 
 1370   // Optimizaton of volatile gets and puts
 1371   // -------------------------------------
 1372   //
 1373   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
 1374   // use to implement volatile reads and writes. For a volatile read
 1375   // we simply need
 1376   //
 1377   //   ldar<x>
 1378   //
 1379   // and for a volatile write we need
 1380   //
 1381   //   stlr<x>
 1382   //
 1383   // Alternatively, we can implement them by pairing a normal
 1384   // load/store with a memory barrier. For a volatile read we need
 1385   //
 1386   //   ldr<x>
 1387   //   dmb ishld
 1388   //
 1389   // for a volatile write
 1390   //
 1391   //   dmb ish
 1392   //   str<x>
 1393   //   dmb ish
 1394   //
 1395   // We can also use ldaxr and stlxr to implement compare and swap CAS
 1396   // sequences. These are normally translated to an instruction
 1397   // sequence like the following
 1398   //
 1399   //   dmb      ish
 1400   // retry:
 1401   //   ldxr<x>   rval raddr
 1402   //   cmp       rval rold
 1403   //   b.ne done
 1404   //   stlxr<x>  rval, rnew, rold
 1405   //   cbnz      rval retry
 1406   // done:
 1407   //   cset      r0, eq
 1408   //   dmb ishld
 1409   //
 1410   // Note that the exclusive store is already using an stlxr
 1411   // instruction. That is required to ensure visibility to other
 1412   // threads of the exclusive write (assuming it succeeds) before that
 1413   // of any subsequent writes.
 1414   //
 1415   // The following instruction sequence is an improvement on the above
 1416   //
 1417   // retry:
 1418   //   ldaxr<x>  rval raddr
 1419   //   cmp       rval rold
 1420   //   b.ne done
 1421   //   stlxr<x>  rval, rnew, rold
 1422   //   cbnz      rval retry
 1423   // done:
 1424   //   cset      r0, eq
 1425   //
 1426   // We don't need the leading dmb ish since the stlxr guarantees
 1427   // visibility of prior writes in the case that the swap is
 1428   // successful. Crucially we don't have to worry about the case where
 1429   // the swap is not successful since no valid program should be
 1430   // relying on visibility of prior changes by the attempting thread
 1431   // in the case where the CAS fails.
 1432   //
 1433   // Similarly, we don't need the trailing dmb ishld if we substitute
 1434   // an ldaxr instruction since that will provide all the guarantees we
 1435   // require regarding observation of changes made by other threads
 1436   // before any change to the CAS address observed by the load.
 1437   //
 1438   // In order to generate the desired instruction sequence we need to
 1439   // be able to identify specific 'signature' ideal graph node
 1440   // sequences which i) occur as a translation of a volatile reads or
 1441   // writes or CAS operations and ii) do not occur through any other
 1442   // translation or graph transformation. We can then provide
 1443   // alternative aldc matching rules which translate these node
 1444   // sequences to the desired machine code sequences. Selection of the
 1445   // alternative rules can be implemented by predicates which identify
 1446   // the relevant node sequences.
 1447   //
 1448   // The ideal graph generator translates a volatile read to the node
 1449   // sequence
 1450   //
 1451   //   LoadX[mo_acquire]
 1452   //   MemBarAcquire
 1453   //
 1454   // As a special case when using the compressed oops optimization we
 1455   // may also see this variant
 1456   //
 1457   //   LoadN[mo_acquire]
 1458   //   DecodeN
 1459   //   MemBarAcquire
 1460   //
 1461   // A volatile write is translated to the node sequence
 1462   //
 1463   //   MemBarRelease
 1464   //   StoreX[mo_release] {CardMark}-optional
 1465   //   MemBarVolatile
 1466   //
 1467   // n.b. the above node patterns are generated with a strict
 1468   // 'signature' configuration of input and output dependencies (see
 1469   // the predicates below for exact details). The card mark may be as
 1470   // simple as a few extra nodes or, in a few GC configurations, may
 1471   // include more complex control flow between the leading and
 1472   // trailing memory barriers. However, whatever the card mark
 1473   // configuration these signatures are unique to translated volatile
 1474   // reads/stores -- they will not appear as a result of any other
 1475   // bytecode translation or inlining nor as a consequence of
 1476   // optimizing transforms.
 1477   //
 1478   // We also want to catch inlined unsafe volatile gets and puts and
 1479   // be able to implement them using either ldar<x>/stlr<x> or some
 1480   // combination of ldr<x>/stlr<x> and dmb instructions.
 1481   //
 1482   // Inlined unsafe volatiles puts manifest as a minor variant of the
 1483   // normal volatile put node sequence containing an extra cpuorder
 1484   // membar
 1485   //
 1486   //   MemBarRelease
 1487   //   MemBarCPUOrder
 1488   //   StoreX[mo_release] {CardMark}-optional
 1489   //   MemBarCPUOrder
 1490   //   MemBarVolatile
 1491   //
 1492   // n.b. as an aside, a cpuorder membar is not itself subject to
 1493   // matching and translation by adlc rules.  However, the rule
 1494   // predicates need to detect its presence in order to correctly
 1495   // select the desired adlc rules.
 1496   //
 1497   // Inlined unsafe volatile gets manifest as a slightly different
 1498   // node sequence to a normal volatile get because of the
 1499   // introduction of some CPUOrder memory barriers to bracket the
 1500   // Load. However, but the same basic skeleton of a LoadX feeding a
 1501   // MemBarAcquire, possibly thorugh an optional DecodeN, is still
 1502   // present
 1503   //
 1504   //   MemBarCPUOrder
 1505   //        ||       \\
 1506   //   MemBarCPUOrder LoadX[mo_acquire]
 1507   //        ||            |
 1508   //        ||       {DecodeN} optional
 1509   //        ||       /
 1510   //     MemBarAcquire
 1511   //
 1512   // In this case the acquire membar does not directly depend on the
 1513   // load. However, we can be sure that the load is generated from an
 1514   // inlined unsafe volatile get if we see it dependent on this unique
 1515   // sequence of membar nodes. Similarly, given an acquire membar we
 1516   // can know that it was added because of an inlined unsafe volatile
 1517   // get if it is fed and feeds a cpuorder membar and if its feed
 1518   // membar also feeds an acquiring load.
 1519   //
 1520   // Finally an inlined (Unsafe) CAS operation is translated to the
 1521   // following ideal graph
 1522   //
 1523   //   MemBarRelease
 1524   //   MemBarCPUOrder
 1525   //   CompareAndSwapX {CardMark}-optional
 1526   //   MemBarCPUOrder
 1527   //   MemBarAcquire
 1528   //
 1529   // So, where we can identify these volatile read and write
 1530   // signatures we can choose to plant either of the above two code
 1531   // sequences. For a volatile read we can simply plant a normal
 1532   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
 1533   // also choose to inhibit translation of the MemBarAcquire and
 1534   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
 1535   //
 1536   // When we recognise a volatile store signature we can choose to
 1537   // plant at a dmb ish as a translation for the MemBarRelease, a
 1538   // normal str<x> and then a dmb ish for the MemBarVolatile.
 1539   // Alternatively, we can inhibit translation of the MemBarRelease
 1540   // and MemBarVolatile and instead plant a simple stlr<x>
 1541   // instruction.
 1542   //
 1543   // when we recognise a CAS signature we can choose to plant a dmb
 1544   // ish as a translation for the MemBarRelease, the conventional
 1545   // macro-instruction sequence for the CompareAndSwap node (which
 1546   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
 1547   // Alternatively, we can elide generation of the dmb instructions
 1548   // and plant the alternative CompareAndSwap macro-instruction
 1549   // sequence (which uses ldaxr<x>).
 1550   //
 1551   // Of course, the above only applies when we see these signature
 1552   // configurations. We still want to plant dmb instructions in any
 1553   // other cases where we may see a MemBarAcquire, MemBarRelease or
 1554   // MemBarVolatile. For example, at the end of a constructor which
 1555   // writes final/volatile fields we will see a MemBarRelease
 1556   // instruction and this needs a 'dmb ish' lest we risk the
 1557   // constructed object being visible without making the
 1558   // final/volatile field writes visible.
 1559   //
 1560   // n.b. the translation rules below which rely on detection of the
 1561   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
 1562   // If we see anything other than the signature configurations we
 1563   // always just translate the loads and stores to ldr<x> and str<x>
 1564   // and translate acquire, release and volatile membars to the
 1565   // relevant dmb instructions.
 1566   //
 1567 
 1568   // is_CAS(int opcode, bool maybe_volatile)
 1569   //
 1570   // return true if opcode is one of the possible CompareAndSwapX
 1571   // values otherwise false.
 1572 
 1573   bool is_CAS(int opcode, bool maybe_volatile)
 1574   {
 1575     switch(opcode) {
 1576       // We handle these
 1577     case Op_CompareAndSwapI:
 1578     case Op_CompareAndSwapL:
 1579     case Op_CompareAndSwapP:
 1580     case Op_CompareAndSwapN:
 1581     case Op_ShenandoahCompareAndSwapP:
 1582     case Op_ShenandoahCompareAndSwapN:
 1583     case Op_CompareAndSwapB:
 1584     case Op_CompareAndSwapS:
 1585     case Op_GetAndSetI:
 1586     case Op_GetAndSetL:
 1587     case Op_GetAndSetP:
 1588     case Op_GetAndSetN:
 1589     case Op_GetAndAddI:
 1590     case Op_GetAndAddL:
 1591       return true;
 1592     case Op_CompareAndExchangeI:
 1593     case Op_CompareAndExchangeN:
 1594     case Op_CompareAndExchangeB:
 1595     case Op_CompareAndExchangeS:
 1596     case Op_CompareAndExchangeL:
 1597     case Op_CompareAndExchangeP:
 1598     case Op_WeakCompareAndSwapB:
 1599     case Op_WeakCompareAndSwapS:
 1600     case Op_WeakCompareAndSwapI:
 1601     case Op_WeakCompareAndSwapL:
 1602     case Op_WeakCompareAndSwapP:
 1603     case Op_WeakCompareAndSwapN:
 1604     case Op_ShenandoahWeakCompareAndSwapP:
 1605     case Op_ShenandoahWeakCompareAndSwapN:
 1606     case Op_ShenandoahCompareAndExchangeP:
 1607     case Op_ShenandoahCompareAndExchangeN:
 1608       return maybe_volatile;
 1609     default:
 1610       return false;
 1611     }
 1612   }
 1613 
 1614   // helper to determine the maximum number of Phi nodes we may need to
 1615   // traverse when searching from a card mark membar for the merge mem
 1616   // feeding a trailing membar or vice versa
 1617 
 1618 // predicates controlling emit of ldr<x>/ldar<x>
 1619 
 1620 bool unnecessary_acquire(const Node *barrier)
 1621 {
 1622   assert(barrier->is_MemBar(), "expecting a membar");
 1623 
 1624   MemBarNode* mb = barrier->as_MemBar();
 1625 
 1626   if (mb->trailing_load()) {
 1627     return true;
 1628   }
 1629 
 1630   if (mb->trailing_load_store()) {
 1631     Node* load_store = mb->in(MemBarNode::Precedent);
 1632     assert(load_store->is_LoadStore(), "unexpected graph shape");
 1633     return is_CAS(load_store->Opcode(), true);
 1634   }
 1635 
 1636   return false;
 1637 }
 1638 
 1639 bool needs_acquiring_load(const Node *n)
 1640 {
 1641   assert(n->is_Load(), "expecting a load");
 1642   LoadNode *ld = n->as_Load();
 1643   return ld->is_acquire();
 1644 }
 1645 
 1646 bool unnecessary_release(const Node *n)
 1647 {
 1648   assert((n->is_MemBar() &&
 1649           n->Opcode() == Op_MemBarRelease),
 1650          "expecting a release membar");
 1651 
 1652   MemBarNode *barrier = n->as_MemBar();
 1653   if (!barrier->leading()) {
 1654     return false;
 1655   } else {
 1656     Node* trailing = barrier->trailing_membar();
 1657     MemBarNode* trailing_mb = trailing->as_MemBar();
 1658     assert(trailing_mb->trailing(), "Not a trailing membar?");
 1659     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
 1660 
 1661     Node* mem = trailing_mb->in(MemBarNode::Precedent);
 1662     if (mem->is_Store()) {
 1663       assert(mem->as_Store()->is_release(), "");
 1664       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
 1665       return true;
 1666     } else {
 1667       assert(mem->is_LoadStore(), "");
 1668       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
 1669       return is_CAS(mem->Opcode(), true);
 1670     }
 1671   }
 1672   return false;
 1673 }
 1674 
 1675 bool unnecessary_volatile(const Node *n)
 1676 {
 1677   // assert n->is_MemBar();
 1678   MemBarNode *mbvol = n->as_MemBar();
 1679 
 1680   bool release = mbvol->trailing_store();
 1681   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
 1682 #ifdef ASSERT
 1683   if (release) {
 1684     Node* leading = mbvol->leading_membar();
 1685     assert(leading->Opcode() == Op_MemBarRelease, "");
 1686     assert(leading->as_MemBar()->leading_store(), "");
 1687     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
 1688   }
 1689 #endif
 1690 
 1691   return release;
 1692 }
 1693 
 1694 // predicates controlling emit of str<x>/stlr<x>
 1695 
 1696 bool needs_releasing_store(const Node *n)
 1697 {
 1698   // assert n->is_Store();
 1699   StoreNode *st = n->as_Store();
 1700   return st->trailing_membar() != NULL;
 1701 }
 1702 
 1703 // predicate controlling translation of CAS
 1704 //
 1705 // returns true if CAS needs to use an acquiring load otherwise false
 1706 
 1707 bool needs_acquiring_load_exclusive(const Node *n)
 1708 {
 1709   assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
 1710   LoadStoreNode* ldst = n->as_LoadStore();
 1711   if (is_CAS(n->Opcode(), false)) {
 1712     assert(ldst->trailing_membar() != NULL, "expected trailing membar");
 1713   } else {
 1714     return ldst->trailing_membar() != NULL;
 1715   }
 1716 
 1717   // so we can just return true here
 1718   return true;
 1719 }
 1720 
 1721 #define __ _masm.
 1722 
 1723 // advance declarations for helper functions to convert register
 1724 // indices to register objects
 1725 
 1726 // the ad file has to provide implementations of certain methods
 1727 // expected by the generic code
 1728 //
 1729 // REQUIRED FUNCTIONALITY
 1730 
 1731 //=============================================================================
 1732 
 1733 // !!!!! Special hack to get all types of calls to specify the byte offset
 1734 //       from the start of the call to the point where the return address
 1735 //       will point.
 1736 
 1737 int MachCallStaticJavaNode::ret_addr_offset()
 1738 {
 1739   // call should be a simple bl
 1740   int off = 4;
 1741   return off;
 1742 }
 1743 
 1744 int MachCallDynamicJavaNode::ret_addr_offset()
 1745 {
 1746   return 16; // movz, movk, movk, bl
 1747 }
 1748 
 1749 int MachCallRuntimeNode::ret_addr_offset() {
 1750   // for generated stubs the call will be
 1751   //   far_call(addr)
 1752   // for real runtime callouts it will be six instructions
 1753   // see aarch64_enc_java_to_runtime
 1754   //   adr(rscratch2, retaddr)
 1755   //   lea(rscratch1, RuntimeAddress(addr)
 1756   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
 1757   //   blr(rscratch1)
 1758   CodeBlob *cb = CodeCache::find_blob(_entry_point);
 1759   if (cb) {
 1760     return MacroAssembler::far_branch_size();
 1761   } else {
 1762     return 6 * NativeInstruction::instruction_size;
 1763   }
 1764 }
 1765 
 1766 // Indicate if the safepoint node needs the polling page as an input
 1767 
 1768 // the shared code plants the oop data at the start of the generated
 1769 // code for the safepoint node and that needs ot be at the load
 1770 // instruction itself. so we cannot plant a mov of the safepoint poll
 1771 // address followed by a load. setting this to true means the mov is
 1772 // scheduled as a prior instruction. that's better for scheduling
 1773 // anyway.
 1774 
 1775 bool SafePointNode::needs_polling_address_input()
 1776 {
 1777   return true;
 1778 }
 1779 
 1780 //=============================================================================
 1781 
 1782 #ifndef PRODUCT
 1783 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1784   st->print("BREAKPOINT");
 1785 }
 1786 #endif
 1787 
 1788 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1789   C2_MacroAssembler _masm(&cbuf);
 1790   __ brk(0);
 1791 }
 1792 
 1793 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
 1794   return MachNode::size(ra_);
 1795 }
 1796 
 1797 //=============================================================================
 1798 
 1799 #ifndef PRODUCT
 1800   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
 1801     st->print("nop \t# %d bytes pad for loops and calls", _count);
 1802   }
 1803 #endif
 1804 
 1805   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
 1806     C2_MacroAssembler _masm(&cbuf);
 1807     for (int i = 0; i < _count; i++) {
 1808       __ nop();
 1809     }
 1810   }
 1811 
 1812   uint MachNopNode::size(PhaseRegAlloc*) const {
 1813     return _count * NativeInstruction::instruction_size;
 1814   }
 1815 
 1816 //=============================================================================
 1817 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
 1818 
 1819 int ConstantTable::calculate_table_base_offset() const {
 1820   return 0;  // absolute addressing, no offset
 1821 }
 1822 
 1823 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
 1824 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
 1825   ShouldNotReachHere();
 1826 }
 1827 
 1828 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
 1829   // Empty encoding
 1830 }
 1831 
 1832 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
 1833   return 0;
 1834 }
 1835 
 1836 #ifndef PRODUCT
 1837 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
 1838   st->print("-- \t// MachConstantBaseNode (empty encoding)");
 1839 }
 1840 #endif
 1841 
 1842 #ifndef PRODUCT
 1843 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1844   Compile* C = ra_->C;
 1845 
 1846   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1847 
 1848   if (C->output()->need_stack_bang(framesize))
 1849     st->print("# stack bang size=%d\n\t", framesize);
 1850 
 1851   if (framesize < ((1 << 9) + 2 * wordSize)) {
 1852     st->print("sub  sp, sp, #%d\n\t", framesize);
 1853     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
 1854     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
 1855   } else {
 1856     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
 1857     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
 1858     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1859     st->print("sub  sp, sp, rscratch1");
 1860   }
 1861   if (C->stub_function() == NULL && BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) {
 1862     st->print("\n\t");
 1863     st->print("ldr  rscratch1, [guard]\n\t");
 1864     st->print("dmb ishld\n\t");
 1865     st->print("ldr  rscratch2, [rthread, #thread_disarmed_offset]\n\t");
 1866     st->print("cmp  rscratch1, rscratch2\n\t");
 1867     st->print("b.eq skip");
 1868     st->print("\n\t");
 1869     st->print("blr #nmethod_entry_barrier_stub\n\t");
 1870     st->print("b skip\n\t");
 1871     st->print("guard: int\n\t");
 1872     st->print("\n\t");
 1873     st->print("skip:\n\t");
 1874   }
 1875 }
 1876 #endif
 1877 
 1878 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1879   Compile* C = ra_->C;
 1880   C2_MacroAssembler _masm(&cbuf);
 1881 
 1882   // n.b. frame size includes space for return pc and rfp
 1883   const int framesize = C->output()->frame_size_in_bytes();
 1884   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
 1885 
 1886   // insert a nop at the start of the prolog so we can patch in a
 1887   // branch if we need to invalidate the method later
 1888   __ nop();
 1889 
 1890   if (C->clinit_barrier_on_entry()) {
 1891     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
 1892 
 1893     Label L_skip_barrier;
 1894 
 1895     __ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
 1896     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 1897     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 1898     __ bind(L_skip_barrier);
 1899   }
 1900 
 1901   int bangsize = C->output()->bang_size_in_bytes();
 1902   if (C->output()->need_stack_bang(bangsize) && UseStackBanging)
 1903     __ generate_stack_overflow_check(bangsize);
 1904 
 1905   __ build_frame(framesize);
 1906 
 1907   if (C->stub_function() == NULL) {
 1908     BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 1909     bs->nmethod_entry_barrier(&_masm);
 1910   }
 1911 
 1912   if (VerifyStackAtCalls) {
 1913     Unimplemented();
 1914   }
 1915 
 1916   C->output()->set_frame_complete(cbuf.insts_size());
 1917 
 1918   if (C->has_mach_constant_base_node()) {
 1919     // NOTE: We set the table base offset here because users might be
 1920     // emitted before MachConstantBaseNode.
 1921     ConstantTable& constant_table = C->output()->constant_table();
 1922     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
 1923   }
 1924 }
 1925 
 1926 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
 1927 {
 1928   return MachNode::size(ra_); // too many variables; just compute it
 1929                               // the hard way
 1930 }
 1931 
 1932 int MachPrologNode::reloc() const
 1933 {
 1934   return 0;
 1935 }
 1936 
 1937 //=============================================================================
 1938 
 1939 #ifndef PRODUCT
 1940 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1941   Compile* C = ra_->C;
 1942   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1943 
 1944   st->print("# pop frame %d\n\t",framesize);
 1945 
 1946   if (framesize == 0) {
 1947     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1948   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
 1949     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
 1950     st->print("add  sp, sp, #%d\n\t", framesize);
 1951   } else {
 1952     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1953     st->print("add  sp, sp, rscratch1\n\t");
 1954     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1955   }
 1956 
 1957   if (do_polling() && C->is_method_compilation()) {
 1958     st->print("# touch polling page\n\t");
 1959     st->print("ldr rscratch1, [rthread],#polling_page_offset\n\t");
 1960     st->print("ldr zr, [rscratch1]");
 1961   }
 1962 }
 1963 #endif
 1964 
 1965 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1966   Compile* C = ra_->C;
 1967   C2_MacroAssembler _masm(&cbuf);
 1968   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1969 
 1970   __ remove_frame(framesize);
 1971 
 1972   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
 1973     __ reserved_stack_check();
 1974   }
 1975 
 1976   if (do_polling() && C->is_method_compilation()) {
 1977     __ fetch_and_read_polling_page(rscratch1, relocInfo::poll_return_type);
 1978   }
 1979 }
 1980 
 1981 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 1982   // Variable size. Determine dynamically.
 1983   return MachNode::size(ra_);
 1984 }
 1985 
 1986 int MachEpilogNode::reloc() const {
 1987   // Return number of relocatable values contained in this instruction.
 1988   return 1; // 1 for polling page.
 1989 }
 1990 
 1991 const Pipeline * MachEpilogNode::pipeline() const {
 1992   return MachNode::pipeline_class();
 1993 }
 1994 
 1995 //=============================================================================
 1996 
 1997 // Figure out which register class each belongs in: rc_int, rc_float or
 1998 // rc_stack.
 1999 enum RC { rc_bad, rc_int, rc_float, rc_predicate, rc_stack };
 2000 
 2001 static enum RC rc_class(OptoReg::Name reg) {
 2002 
 2003   if (reg == OptoReg::Bad) {
 2004     return rc_bad;
 2005   }
 2006 
 2007   // we have 32 int registers * 2 halves
 2008   int slots_of_int_registers = RegisterImpl::max_slots_per_register * RegisterImpl::number_of_registers;
 2009 
 2010   if (reg < slots_of_int_registers) {
 2011     return rc_int;
 2012   }
 2013 
 2014   // we have 32 float register * 8 halves
 2015   int slots_of_float_registers = FloatRegisterImpl::max_slots_per_register * FloatRegisterImpl::number_of_registers;
 2016   if (reg < slots_of_int_registers + slots_of_float_registers) {
 2017     return rc_float;
 2018   }
 2019 
 2020   int slots_of_predicate_registers = PRegisterImpl::max_slots_per_register * PRegisterImpl::number_of_registers;
 2021   if (reg < slots_of_int_registers + slots_of_float_registers + slots_of_predicate_registers) {
 2022     return rc_predicate;
 2023   }
 2024 
 2025   // Between predicate regs & stack is the flags.
 2026   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
 2027 
 2028   return rc_stack;
 2029 }
 2030 
 2031 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
 2032   Compile* C = ra_->C;
 2033 
 2034   // Get registers to move.
 2035   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
 2036   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
 2037   OptoReg::Name dst_hi = ra_->get_reg_second(this);
 2038   OptoReg::Name dst_lo = ra_->get_reg_first(this);
 2039 
 2040   enum RC src_hi_rc = rc_class(src_hi);
 2041   enum RC src_lo_rc = rc_class(src_lo);
 2042   enum RC dst_hi_rc = rc_class(dst_hi);
 2043   enum RC dst_lo_rc = rc_class(dst_lo);
 2044 
 2045   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
 2046 
 2047   if (src_hi != OptoReg::Bad) {
 2048     assert((src_lo&1)==0 && src_lo+1==src_hi &&
 2049            (dst_lo&1)==0 && dst_lo+1==dst_hi,
 2050            "expected aligned-adjacent pairs");
 2051   }
 2052 
 2053   if (src_lo == dst_lo && src_hi == dst_hi) {
 2054     return 0;            // Self copy, no move.
 2055   }
 2056 
 2057   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
 2058               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
 2059   int src_offset = ra_->reg2offset(src_lo);
 2060   int dst_offset = ra_->reg2offset(dst_lo);
 2061 
 2062   if (bottom_type()->isa_vect() != NULL) {
 2063     uint ireg = ideal_reg();
 2064     if (ireg == Op_VecA && cbuf) {
 2065       Unimplemented();
 2066     } else if (cbuf) {
 2067       assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
 2068       C2_MacroAssembler _masm(cbuf);
 2069       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
 2070       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 2071         // stack->stack
 2072         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
 2073         if (ireg == Op_VecD) {
 2074           __ unspill(rscratch1, true, src_offset);
 2075           __ spill(rscratch1, true, dst_offset);
 2076         } else {
 2077           __ spill_copy128(src_offset, dst_offset);
 2078         }
 2079       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 2080         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2081                ireg == Op_VecD ? __ T8B : __ T16B,
 2082                as_FloatRegister(Matcher::_regEncode[src_lo]));
 2083       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 2084         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 2085                  ireg == Op_VecD ? __ D : __ Q,
 2086                  ra_->reg2offset(dst_lo));
 2087       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 2088         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2089                    ireg == Op_VecD ? __ D : __ Q,
 2090                    ra_->reg2offset(src_lo));
 2091       } else {
 2092         ShouldNotReachHere();
 2093       }
 2094     }
 2095   } else if (cbuf) {
 2096     C2_MacroAssembler _masm(cbuf);
 2097     switch (src_lo_rc) {
 2098     case rc_int:
 2099       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
 2100         if (is64) {
 2101             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
 2102                    as_Register(Matcher::_regEncode[src_lo]));
 2103         } else {
 2104             C2_MacroAssembler _masm(cbuf);
 2105             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
 2106                     as_Register(Matcher::_regEncode[src_lo]));
 2107         }
 2108       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
 2109         if (is64) {
 2110             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2111                      as_Register(Matcher::_regEncode[src_lo]));
 2112         } else {
 2113             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2114                      as_Register(Matcher::_regEncode[src_lo]));
 2115         }
 2116       } else {                    // gpr --> stack spill
 2117         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2118         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
 2119       }
 2120       break;
 2121     case rc_float:
 2122       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
 2123         if (is64) {
 2124             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
 2125                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2126         } else {
 2127             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
 2128                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2129         }
 2130       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
 2131           if (cbuf) {
 2132             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2133                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2134         } else {
 2135             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2136                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2137         }
 2138       } else {                    // fpr --> stack spill
 2139         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2140         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 2141                  is64 ? __ D : __ S, dst_offset);
 2142       }
 2143       break;
 2144     case rc_stack:
 2145       if (dst_lo_rc == rc_int) {  // stack --> gpr load
 2146         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
 2147       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
 2148         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2149                    is64 ? __ D : __ S, src_offset);
 2150       } else {                    // stack --> stack copy
 2151         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2152         __ unspill(rscratch1, is64, src_offset);
 2153         __ spill(rscratch1, is64, dst_offset);
 2154       }
 2155       break;
 2156     default:
 2157       assert(false, "bad rc_class for spill");
 2158       ShouldNotReachHere();
 2159     }
 2160   }
 2161 
 2162   if (st) {
 2163     st->print("spill ");
 2164     if (src_lo_rc == rc_stack) {
 2165       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
 2166     } else {
 2167       st->print("%s -> ", Matcher::regName[src_lo]);
 2168     }
 2169     if (dst_lo_rc == rc_stack) {
 2170       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
 2171     } else {
 2172       st->print("%s", Matcher::regName[dst_lo]);
 2173     }
 2174     if (bottom_type()->isa_vect() != NULL) {
 2175       int vsize = 0;
 2176       switch (ideal_reg()) {
 2177       case Op_VecD:
 2178         vsize = 64;
 2179         break;
 2180       case Op_VecX:
 2181         vsize = 128;
 2182         break;
 2183       case Op_VecA:
 2184         vsize = Matcher::scalable_vector_reg_size(T_BYTE) * 8;
 2185         break;
 2186       default:
 2187         assert(false, "bad register type for spill");
 2188         ShouldNotReachHere();
 2189       }
 2190       st->print("\t# vector spill size = %d", vsize);
 2191     } else {
 2192       st->print("\t# spill size = %d", is64 ? 64 : 32);
 2193     }
 2194   }
 2195 
 2196   return 0;
 2197 
 2198 }
 2199 
 2200 #ifndef PRODUCT
 2201 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2202   if (!ra_)
 2203     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
 2204   else
 2205     implementation(NULL, ra_, false, st);
 2206 }
 2207 #endif
 2208 
 2209 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 2210   implementation(&cbuf, ra_, false, NULL);
 2211 }
 2212 
 2213 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
 2214   return MachNode::size(ra_);
 2215 }
 2216 
 2217 //=============================================================================
 2218 
 2219 #ifndef PRODUCT
 2220 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2221   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2222   int reg = ra_->get_reg_first(this);
 2223   st->print("add %s, rsp, #%d]\t# box lock",
 2224             Matcher::regName[reg], offset);
 2225 }
 2226 #endif
 2227 
 2228 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 2229   C2_MacroAssembler _masm(&cbuf);
 2230 
 2231   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2232   int reg    = ra_->get_encode(this);
 2233 
 2234   // This add will handle any 24-bit signed offset. 24 bits allows an
 2235   // 8 megabyte stack frame.
 2236   __ add(as_Register(reg), sp, offset);
 2237 }
 2238 
 2239 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
 2240   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
 2241   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2242 
 2243   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
 2244     return NativeInstruction::instruction_size;
 2245   } else {
 2246     return 2 * NativeInstruction::instruction_size;
 2247   }
 2248 }
 2249 
 2250 //=============================================================================
 2251 
 2252 #ifndef PRODUCT
 2253 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 2254 {
 2255   st->print_cr("# MachUEPNode");
 2256   if (UseCompressedClassPointers) {
 2257     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2258     if (CompressedKlassPointers::shift() != 0) {
 2259       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
 2260     }
 2261   } else {
 2262    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2263   }
 2264   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
 2265   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
 2266 }
 2267 #endif
 2268 
 2269 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
 2270 {
 2271   // This is the unverified entry point.
 2272   C2_MacroAssembler _masm(&cbuf);
 2273 
 2274   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
 2275   Label skip;
 2276   // TODO
 2277   // can we avoid this skip and still use a reloc?
 2278   __ br(Assembler::EQ, skip);
 2279   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 2280   __ bind(skip);
 2281 }
 2282 
 2283 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
 2284 {
 2285   return MachNode::size(ra_);
 2286 }
 2287 
 2288 // REQUIRED EMIT CODE
 2289 
 2290 //=============================================================================
 2291 
 2292 // Emit exception handler code.
 2293 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
 2294 {
 2295   // mov rscratch1 #exception_blob_entry_point
 2296   // br rscratch1
 2297   // Note that the code buffer's insts_mark is always relative to insts.
 2298   // That's why we must use the macroassembler to generate a handler.
 2299   C2_MacroAssembler _masm(&cbuf);
 2300   address base = __ start_a_stub(size_exception_handler());
 2301   if (base == NULL) {
 2302     ciEnv::current()->record_failure("CodeCache is full");
 2303     return 0;  // CodeBuffer::expand failed
 2304   }
 2305   int offset = __ offset();
 2306   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
 2307   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
 2308   __ end_a_stub();
 2309   return offset;
 2310 }
 2311 
 2312 // Emit deopt handler code.
 2313 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
 2314 {
 2315   // Note that the code buffer's insts_mark is always relative to insts.
 2316   // That's why we must use the macroassembler to generate a handler.
 2317   C2_MacroAssembler _masm(&cbuf);
 2318   address base = __ start_a_stub(size_deopt_handler());
 2319   if (base == NULL) {
 2320     ciEnv::current()->record_failure("CodeCache is full");
 2321     return 0;  // CodeBuffer::expand failed
 2322   }
 2323   int offset = __ offset();
 2324 
 2325   __ adr(lr, __ pc());
 2326   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 2327 
 2328   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
 2329   __ end_a_stub();
 2330   return offset;
 2331 }
 2332 
 2333 // REQUIRED MATCHER CODE
 2334 
 2335 //=============================================================================
 2336 
 2337 const bool Matcher::match_rule_supported(int opcode) {
 2338   if (!has_match_rule(opcode))
 2339     return false;
 2340 
 2341   bool ret_value = true;
 2342   switch (opcode) {
 2343     case Op_CacheWB:
 2344     case Op_CacheWBPreSync:
 2345     case Op_CacheWBPostSync:
 2346       if (!VM_Version::supports_data_cache_line_flush()) {
 2347         ret_value = false;
 2348       }
 2349       break;
 2350   }
 2351 
 2352   return ret_value; // Per default match rules are supported.
 2353 }
 2354 
 2355 // Identify extra cases that we might want to provide match rules for vector nodes and
 2356 // other intrinsics guarded with vector length (vlen) and element type (bt).
 2357 const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
 2358   if (!match_rule_supported(opcode)) {
 2359     return false;
 2360   }
 2361 
 2362   // Special cases which require vector length
 2363   switch (opcode) {
 2364     case Op_MulAddVS2VI: {
 2365       if (vlen != 4) {
 2366         return false;
 2367       }
 2368       break;
 2369     }
 2370   }
 2371 
 2372   return true; // Per default match rules are supported.
 2373 }
 2374 
 2375 const bool Matcher::has_predicated_vectors(void) {
 2376   return UseSVE > 0;
 2377 }
 2378 
 2379 const int Matcher::float_pressure(int default_pressure_threshold) {
 2380   return default_pressure_threshold;
 2381 }
 2382 
 2383 int Matcher::regnum_to_fpu_offset(int regnum)
 2384 {
 2385   Unimplemented();
 2386   return 0;
 2387 }
 2388 
 2389 // Is this branch offset short enough that a short branch can be used?
 2390 //
 2391 // NOTE: If the platform does not provide any short branch variants, then
 2392 //       this method should return false for offset 0.
 2393 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
 2394   // The passed offset is relative to address of the branch.
 2395 
 2396   return (-32768 <= offset && offset < 32768);
 2397 }
 2398 
 2399 const bool Matcher::isSimpleConstant64(jlong value) {
 2400   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
 2401   // Probably always true, even if a temp register is required.
 2402   return true;
 2403 }
 2404 
 2405 // true just means we have fast l2f conversion
 2406 const bool Matcher::convL2FSupported(void) {
 2407   return true;
 2408 }
 2409 
 2410 // Vector width in bytes.
 2411 const int Matcher::vector_width_in_bytes(BasicType bt) {
 2412   int size = MIN2(16, (int)MaxVectorSize);
 2413   // Minimum 2 values in vector
 2414   if (size < 2*type2aelembytes(bt)) size = 0;
 2415   // But never < 4
 2416   if (size < 4) size = 0;
 2417   return size;
 2418 }
 2419 
 2420 // Limits on vector size (number of elements) loaded into vector.
 2421 const int Matcher::max_vector_size(const BasicType bt) {
 2422   return vector_width_in_bytes(bt)/type2aelembytes(bt);
 2423 }
 2424 const int Matcher::min_vector_size(const BasicType bt) {
 2425   int max_size = max_vector_size(bt);
 2426   if ((UseSVE > 0) && (MaxVectorSize >= 16)) {
 2427     // Currently vector length less than SVE vector register size is not supported.
 2428     return max_size;
 2429   } else {
 2430     //  For the moment limit the vector size to 8 bytes with NEON.
 2431     int size = 8 / type2aelembytes(bt);
 2432     if (size < 2) size = 2;
 2433     return size;
 2434   }
 2435 }
 2436 
 2437 const bool Matcher::supports_scalable_vector() {
 2438   return UseSVE > 0;
 2439 }
 2440 
 2441 // Actual max scalable vector register length.
 2442 const int Matcher::scalable_vector_reg_size(const BasicType bt) {
 2443   return Matcher::max_vector_size(bt);
 2444 }
 2445 
 2446 // Vector ideal reg.
 2447 const uint Matcher::vector_ideal_reg(int len) {
 2448   if (UseSVE > 0 && 16 <= len && len <= 256) {
 2449     return Op_VecA;
 2450   }
 2451   switch(len) {
 2452     case  8: return Op_VecD;
 2453     case 16: return Op_VecX;
 2454   }
 2455   ShouldNotReachHere();
 2456   return 0;
 2457 }
 2458 
 2459 // AES support not yet implemented
 2460 const bool Matcher::pass_original_key_for_aes() {
 2461   return false;
 2462 }
 2463 
 2464 // aarch64 supports misaligned vectors store/load.
 2465 const bool Matcher::misaligned_vectors_ok() {
 2466   return true;
 2467 }
 2468 
 2469 // false => size gets scaled to BytesPerLong, ok.
 2470 const bool Matcher::init_array_count_is_in_bytes = false;
 2471 
 2472 // Use conditional move (CMOVL)
 2473 const int Matcher::long_cmove_cost() {
 2474   // long cmoves are no more expensive than int cmoves
 2475   return 0;
 2476 }
 2477 
 2478 const int Matcher::float_cmove_cost() {
 2479   // float cmoves are no more expensive than int cmoves
 2480   return 0;
 2481 }
 2482 
 2483 // Does the CPU require late expand (see block.cpp for description of late expand)?
 2484 const bool Matcher::require_postalloc_expand = false;
 2485 
 2486 // Do we need to mask the count passed to shift instructions or does
 2487 // the cpu only look at the lower 5/6 bits anyway?
 2488 const bool Matcher::need_masked_shift_count = false;
 2489 
 2490 // No support for generic vector operands.
 2491 const bool Matcher::supports_generic_vector_operands  = false;
 2492 
 2493 MachOper* Matcher::pd_specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg, bool is_temp) {
 2494   ShouldNotReachHere(); // generic vector operands not supported
 2495   return NULL;
 2496 }
 2497 
 2498 bool Matcher::is_generic_reg2reg_move(MachNode* m) {
 2499   ShouldNotReachHere();  // generic vector operands not supported
 2500   return false;
 2501 }
 2502 
 2503 bool Matcher::is_generic_vector(MachOper* opnd)  {
 2504   ShouldNotReachHere();  // generic vector operands not supported
 2505   return false;
 2506 }
 2507 
 2508 // This affects two different things:
 2509 //  - how Decode nodes are matched
 2510 //  - how ImplicitNullCheck opportunities are recognized
 2511 // If true, the matcher will try to remove all Decodes and match them
 2512 // (as operands) into nodes. NullChecks are not prepared to deal with
 2513 // Decodes by final_graph_reshaping().
 2514 // If false, final_graph_reshaping() forces the decode behind the Cmp
 2515 // for a NullCheck. The matcher matches the Decode node into a register.
 2516 // Implicit_null_check optimization moves the Decode along with the
 2517 // memory operation back up before the NullCheck.
 2518 bool Matcher::narrow_oop_use_complex_address() {
 2519   return CompressedOops::shift() == 0;
 2520 }
 2521 
 2522 bool Matcher::narrow_klass_use_complex_address() {
 2523 // TODO
 2524 // decide whether we need to set this to true
 2525   return false;
 2526 }
 2527 
 2528 bool Matcher::const_oop_prefer_decode() {
 2529   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
 2530   return CompressedOops::base() == NULL;
 2531 }
 2532 
 2533 bool Matcher::const_klass_prefer_decode() {
 2534   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
 2535   return CompressedKlassPointers::base() == NULL;
 2536 }
 2537 
 2538 // Is it better to copy float constants, or load them directly from
 2539 // memory?  Intel can load a float constant from a direct address,
 2540 // requiring no extra registers.  Most RISCs will have to materialize
 2541 // an address into a register first, so they would do better to copy
 2542 // the constant from stack.
 2543 const bool Matcher::rematerialize_float_constants = false;
 2544 
 2545 // If CPU can load and store mis-aligned doubles directly then no
 2546 // fixup is needed.  Else we split the double into 2 integer pieces
 2547 // and move it piece-by-piece.  Only happens when passing doubles into
 2548 // C code as the Java calling convention forces doubles to be aligned.
 2549 const bool Matcher::misaligned_doubles_ok = true;
 2550 
 2551 // No-op on amd64
 2552 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
 2553   Unimplemented();
 2554 }
 2555 
 2556 // Advertise here if the CPU requires explicit rounding operations to implement strictfp mode.
 2557 const bool Matcher::strict_fp_requires_explicit_rounding = false;
 2558 
 2559 // Are floats converted to double when stored to stack during
 2560 // deoptimization?
 2561 bool Matcher::float_in_double() { return false; }
 2562 
 2563 // Do ints take an entire long register or just half?
 2564 // The relevant question is how the int is callee-saved:
 2565 // the whole long is written but de-opt'ing will have to extract
 2566 // the relevant 32 bits.
 2567 const bool Matcher::int_in_long = true;
 2568 
 2569 // Return whether or not this register is ever used as an argument.
 2570 // This function is used on startup to build the trampoline stubs in
 2571 // generateOptoStub.  Registers not mentioned will be killed by the VM
 2572 // call in the trampoline, and arguments in those registers not be
 2573 // available to the callee.
 2574 bool Matcher::can_be_java_arg(int reg)
 2575 {
 2576   return
 2577     reg ==  R0_num || reg == R0_H_num ||
 2578     reg ==  R1_num || reg == R1_H_num ||
 2579     reg ==  R2_num || reg == R2_H_num ||
 2580     reg ==  R3_num || reg == R3_H_num ||
 2581     reg ==  R4_num || reg == R4_H_num ||
 2582     reg ==  R5_num || reg == R5_H_num ||
 2583     reg ==  R6_num || reg == R6_H_num ||
 2584     reg ==  R7_num || reg == R7_H_num ||
 2585     reg ==  V0_num || reg == V0_H_num ||
 2586     reg ==  V1_num || reg == V1_H_num ||
 2587     reg ==  V2_num || reg == V2_H_num ||
 2588     reg ==  V3_num || reg == V3_H_num ||
 2589     reg ==  V4_num || reg == V4_H_num ||
 2590     reg ==  V5_num || reg == V5_H_num ||
 2591     reg ==  V6_num || reg == V6_H_num ||
 2592     reg ==  V7_num || reg == V7_H_num;
 2593 }
 2594 
 2595 bool Matcher::is_spillable_arg(int reg)
 2596 {
 2597   return can_be_java_arg(reg);
 2598 }
 2599 
 2600 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
 2601   return false;
 2602 }
 2603 
 2604 RegMask Matcher::divI_proj_mask() {
 2605   ShouldNotReachHere();
 2606   return RegMask();
 2607 }
 2608 
 2609 // Register for MODI projection of divmodI.
 2610 RegMask Matcher::modI_proj_mask() {
 2611   ShouldNotReachHere();
 2612   return RegMask();
 2613 }
 2614 
 2615 // Register for DIVL projection of divmodL.
 2616 RegMask Matcher::divL_proj_mask() {
 2617   ShouldNotReachHere();
 2618   return RegMask();
 2619 }
 2620 
 2621 // Register for MODL projection of divmodL.
 2622 RegMask Matcher::modL_proj_mask() {
 2623   ShouldNotReachHere();
 2624   return RegMask();
 2625 }
 2626 
 2627 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
 2628   return FP_REG_mask();
 2629 }
 2630 
 2631 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
 2632   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
 2633     Node* u = addp->fast_out(i);
 2634     if (u->is_Mem()) {
 2635       int opsize = u->as_Mem()->memory_size();
 2636       assert(opsize > 0, "unexpected memory operand size");
 2637       if (u->as_Mem()->memory_size() != (1<<shift)) {
 2638         return false;
 2639       }
 2640     }
 2641   }
 2642   return true;
 2643 }
 2644 
 2645 const bool Matcher::convi2l_type_required = false;
 2646 
 2647 // Should the matcher clone input 'm' of node 'n'?
 2648 bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
 2649   if (is_vshift_con_pattern(n, m)) { // ShiftV src (ShiftCntV con)
 2650     mstack.push(m, Visit);           // m = ShiftCntV
 2651     return true;
 2652   }
 2653   return false;
 2654 }
 2655 
 2656 // Should the Matcher clone shifts on addressing modes, expecting them
 2657 // to be subsumed into complex addressing expressions or compute them
 2658 // into registers?
 2659 bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
 2660   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
 2661     return true;
 2662   }
 2663 
 2664   Node *off = m->in(AddPNode::Offset);
 2665   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
 2666       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
 2667       // Are there other uses besides address expressions?
 2668       !is_visited(off)) {
 2669     address_visited.set(off->_idx); // Flag as address_visited
 2670     mstack.push(off->in(2), Visit);
 2671     Node *conv = off->in(1);
 2672     if (conv->Opcode() == Op_ConvI2L &&
 2673         // Are there other uses besides address expressions?
 2674         !is_visited(conv)) {
 2675       address_visited.set(conv->_idx); // Flag as address_visited
 2676       mstack.push(conv->in(1), Pre_Visit);
 2677     } else {
 2678       mstack.push(conv, Pre_Visit);
 2679     }
 2680     address_visited.test_set(m->_idx); // Flag as address_visited
 2681     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2682     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2683     return true;
 2684   } else if (off->Opcode() == Op_ConvI2L &&
 2685              // Are there other uses besides address expressions?
 2686              !is_visited(off)) {
 2687     address_visited.test_set(m->_idx); // Flag as address_visited
 2688     address_visited.set(off->_idx); // Flag as address_visited
 2689     mstack.push(off->in(1), Pre_Visit);
 2690     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2691     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2692     return true;
 2693   }
 2694   return false;
 2695 }
 2696 
 2697 void Compile::reshape_address(AddPNode* addp) {
 2698 }
 2699 
 2700 
 2701 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
 2702   C2_MacroAssembler _masm(&cbuf);                                       \
 2703   {                                                                     \
 2704     guarantee(INDEX == -1, "mode not permitted for volatile");          \
 2705     guarantee(DISP == 0, "mode not permitted for volatile");            \
 2706     guarantee(SCALE == 0, "mode not permitted for volatile");           \
 2707     __ INSN(REG, as_Register(BASE));                                    \
 2708   }
 2709 
 2710 
 2711 static Address mem2address(int opcode, Register base, int index, int size, int disp)
 2712   {
 2713     Address::extend scale;
 2714 
 2715     // Hooboy, this is fugly.  We need a way to communicate to the
 2716     // encoder that the index needs to be sign extended, so we have to
 2717     // enumerate all the cases.
 2718     switch (opcode) {
 2719     case INDINDEXSCALEDI2L:
 2720     case INDINDEXSCALEDI2LN:
 2721     case INDINDEXI2L:
 2722     case INDINDEXI2LN:
 2723       scale = Address::sxtw(size);
 2724       break;
 2725     default:
 2726       scale = Address::lsl(size);
 2727     }
 2728 
 2729     if (index == -1) {
 2730       return Address(base, disp);
 2731     } else {
 2732       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2733       return Address(base, as_Register(index), scale);
 2734     }
 2735   }
 2736 
 2737 
 2738 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
 2739 typedef void (MacroAssembler::* mem_insn2)(Register Rt, Register adr);
 2740 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
 2741 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
 2742                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
 2743 
 2744   // Used for all non-volatile memory accesses.  The use of
 2745   // $mem->opcode() to discover whether this pattern uses sign-extended
 2746   // offsets is something of a kludge.
 2747   static void loadStore(C2_MacroAssembler masm, mem_insn insn,
 2748                         Register reg, int opcode,
 2749                         Register base, int index, int scale, int disp,
 2750                         int size_in_memory)
 2751   {
 2752     Address addr = mem2address(opcode, base, index, scale, disp);
 2753     if (addr.getMode() == Address::base_plus_offset) {
 2754       /* If we get an out-of-range offset it is a bug in the compiler,
 2755          so we assert here. */
 2756       assert(Address::offset_ok_for_immed(addr.offset(), exact_log2(size_in_memory)),
 2757              "c2 compiler bug");
 2758       /* Fix up any out-of-range offsets. */
 2759       assert_different_registers(rscratch1, base);
 2760       assert_different_registers(rscratch1, reg);
 2761       addr = masm.legitimize_address(addr, size_in_memory, rscratch1);
 2762     }
 2763     (masm.*insn)(reg, addr);
 2764   }
 2765 
 2766   static void loadStore(C2_MacroAssembler masm, mem_float_insn insn,
 2767                         FloatRegister reg, int opcode,
 2768                         Register base, int index, int size, int disp,
 2769                         int size_in_memory)
 2770   {
 2771     Address::extend scale;
 2772 
 2773     switch (opcode) {
 2774     case INDINDEXSCALEDI2L:
 2775     case INDINDEXSCALEDI2LN:
 2776       scale = Address::sxtw(size);
 2777       break;
 2778     default:
 2779       scale = Address::lsl(size);
 2780     }
 2781 
 2782     if (index == -1) {
 2783       /* If we get an out-of-range offset it is a bug in the compiler,
 2784          so we assert here. */
 2785       assert(Address::offset_ok_for_immed(disp, exact_log2(size_in_memory)), "c2 compiler bug");
 2786       /* Fix up any out-of-range offsets. */
 2787       assert_different_registers(rscratch1, base);
 2788       Address addr = Address(base, disp);
 2789       addr = masm.legitimize_address(addr, size_in_memory, rscratch1);
 2790       (masm.*insn)(reg, addr);
 2791     } else {
 2792       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2793       (masm.*insn)(reg, Address(base, as_Register(index), scale));
 2794     }
 2795   }
 2796 
 2797   static void loadStore(C2_MacroAssembler masm, mem_vector_insn insn,
 2798                         FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
 2799                         int opcode, Register base, int index, int size, int disp)
 2800   {
 2801     if (index == -1) {
 2802       (masm.*insn)(reg, T, Address(base, disp));
 2803     } else {
 2804       assert(disp == 0, "unsupported address mode");
 2805       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
 2806     }
 2807   }
 2808 
 2809 %}
 2810 
 2811 
 2812 
 2813 //----------ENCODING BLOCK-----------------------------------------------------
 2814 // This block specifies the encoding classes used by the compiler to
 2815 // output byte streams.  Encoding classes are parameterized macros
 2816 // used by Machine Instruction Nodes in order to generate the bit
 2817 // encoding of the instruction.  Operands specify their base encoding
 2818 // interface with the interface keyword.  There are currently
 2819 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
 2820 // COND_INTER.  REG_INTER causes an operand to generate a function
 2821 // which returns its register number when queried.  CONST_INTER causes
 2822 // an operand to generate a function which returns the value of the
 2823 // constant when queried.  MEMORY_INTER causes an operand to generate
 2824 // four functions which return the Base Register, the Index Register,
 2825 // the Scale Value, and the Offset Value of the operand when queried.
 2826 // COND_INTER causes an operand to generate six functions which return
 2827 // the encoding code (ie - encoding bits for the instruction)
 2828 // associated with each basic boolean condition for a conditional
 2829 // instruction.
 2830 //
 2831 // Instructions specify two basic values for encoding.  Again, a
 2832 // function is available to check if the constant displacement is an
 2833 // oop. They use the ins_encode keyword to specify their encoding
 2834 // classes (which must be a sequence of enc_class names, and their
 2835 // parameters, specified in the encoding block), and they use the
 2836 // opcode keyword to specify, in order, their primary, secondary, and
 2837 // tertiary opcode.  Only the opcode sections which a particular
 2838 // instruction needs for encoding need to be specified.
 2839 encode %{
 2840   // Build emit functions for each basic byte or larger field in the
 2841   // intel encoding scheme (opcode, rm, sib, immediate), and call them
 2842   // from C++ code in the enc_class source block.  Emit functions will
 2843   // live in the main source block for now.  In future, we can
 2844   // generalize this by adding a syntax that specifies the sizes of
 2845   // fields in an order, so that the adlc can build the emit functions
 2846   // automagically
 2847 
 2848   // catch all for unimplemented encodings
 2849   enc_class enc_unimplemented %{
 2850     C2_MacroAssembler _masm(&cbuf);
 2851     __ unimplemented("C2 catch all");
 2852   %}
 2853 
 2854   // BEGIN Non-volatile memory access
 2855 
 2856   // This encoding class is generated automatically from ad_encode.m4.
 2857   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2858   enc_class aarch64_enc_ldrsbw(iRegI dst, memory1 mem) %{
 2859     Register dst_reg = as_Register($dst$$reg);
 2860     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
 2861                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2862   %}
 2863 
 2864   // This encoding class is generated automatically from ad_encode.m4.
 2865   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2866   enc_class aarch64_enc_ldrsb(iRegI dst, memory1 mem) %{
 2867     Register dst_reg = as_Register($dst$$reg);
 2868     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
 2869                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2870   %}
 2871 
 2872   // This encoding class is generated automatically from ad_encode.m4.
 2873   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2874   enc_class aarch64_enc_ldrb(iRegI dst, memory1 mem) %{
 2875     Register dst_reg = as_Register($dst$$reg);
 2876     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2877                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2878   %}
 2879 
 2880   // This encoding class is generated automatically from ad_encode.m4.
 2881   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2882   enc_class aarch64_enc_ldrb(iRegL dst, memory1 mem) %{
 2883     Register dst_reg = as_Register($dst$$reg);
 2884     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2885                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2886   %}
 2887 
 2888   // This encoding class is generated automatically from ad_encode.m4.
 2889   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2890   enc_class aarch64_enc_ldrshw(iRegI dst, memory2 mem) %{
 2891     Register dst_reg = as_Register($dst$$reg);
 2892     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
 2893                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2894   %}
 2895 
 2896   // This encoding class is generated automatically from ad_encode.m4.
 2897   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2898   enc_class aarch64_enc_ldrsh(iRegI dst, memory2 mem) %{
 2899     Register dst_reg = as_Register($dst$$reg);
 2900     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
 2901                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2902   %}
 2903 
 2904   // This encoding class is generated automatically from ad_encode.m4.
 2905   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2906   enc_class aarch64_enc_ldrh(iRegI dst, memory2 mem) %{
 2907     Register dst_reg = as_Register($dst$$reg);
 2908     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2909                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2910   %}
 2911 
 2912   // This encoding class is generated automatically from ad_encode.m4.
 2913   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2914   enc_class aarch64_enc_ldrh(iRegL dst, memory2 mem) %{
 2915     Register dst_reg = as_Register($dst$$reg);
 2916     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2917                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2918   %}
 2919 
 2920   // This encoding class is generated automatically from ad_encode.m4.
 2921   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2922   enc_class aarch64_enc_ldrw(iRegI dst, memory4 mem) %{
 2923     Register dst_reg = as_Register($dst$$reg);
 2924     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2925                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2926   %}
 2927 
 2928   // This encoding class is generated automatically from ad_encode.m4.
 2929   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2930   enc_class aarch64_enc_ldrw(iRegL dst, memory4 mem) %{
 2931     Register dst_reg = as_Register($dst$$reg);
 2932     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2933                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2934   %}
 2935 
 2936   // This encoding class is generated automatically from ad_encode.m4.
 2937   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2938   enc_class aarch64_enc_ldrsw(iRegL dst, memory4 mem) %{
 2939     Register dst_reg = as_Register($dst$$reg);
 2940     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
 2941                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2942   %}
 2943 
 2944   // This encoding class is generated automatically from ad_encode.m4.
 2945   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2946   enc_class aarch64_enc_ldr(iRegL dst, memory8 mem) %{
 2947     Register dst_reg = as_Register($dst$$reg);
 2948     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
 2949                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 2950   %}
 2951 
 2952   // This encoding class is generated automatically from ad_encode.m4.
 2953   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2954   enc_class aarch64_enc_ldrs(vRegF dst, memory4 mem) %{
 2955     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 2956     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
 2957                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2958   %}
 2959 
 2960   // This encoding class is generated automatically from ad_encode.m4.
 2961   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2962   enc_class aarch64_enc_ldrd(vRegD dst, memory8 mem) %{
 2963     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 2964     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
 2965                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 2966   %}
 2967 
 2968   // This encoding class is generated automatically from ad_encode.m4.
 2969   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2970   enc_class aarch64_enc_strb(iRegI src, memory1 mem) %{
 2971     Register src_reg = as_Register($src$$reg);
 2972     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
 2973                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2974   %}
 2975 
 2976   // This encoding class is generated automatically from ad_encode.m4.
 2977   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2978   enc_class aarch64_enc_strb0(memory1 mem) %{
 2979     C2_MacroAssembler _masm(&cbuf);
 2980     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
 2981                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2982   %}
 2983 
 2984   // This encoding class is generated automatically from ad_encode.m4.
 2985   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2986   enc_class aarch64_enc_strh(iRegI src, memory2 mem) %{
 2987     Register src_reg = as_Register($src$$reg);
 2988     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
 2989                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2990   %}
 2991 
 2992   // This encoding class is generated automatically from ad_encode.m4.
 2993   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2994   enc_class aarch64_enc_strh0(memory2 mem) %{
 2995     C2_MacroAssembler _masm(&cbuf);
 2996     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
 2997                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2998   %}
 2999 
 3000   // This encoding class is generated automatically from ad_encode.m4.
 3001   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3002   enc_class aarch64_enc_strw(iRegI src, memory4 mem) %{
 3003     Register src_reg = as_Register($src$$reg);
 3004     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
 3005                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3006   %}
 3007 
 3008   // This encoding class is generated automatically from ad_encode.m4.
 3009   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3010   enc_class aarch64_enc_strw0(memory4 mem) %{
 3011     C2_MacroAssembler _masm(&cbuf);
 3012     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
 3013                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3014   %}
 3015 
 3016   // This encoding class is generated automatically from ad_encode.m4.
 3017   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3018   enc_class aarch64_enc_str(iRegL src, memory8 mem) %{
 3019     Register src_reg = as_Register($src$$reg);
 3020     // we sometimes get asked to store the stack pointer into the
 3021     // current thread -- we cannot do that directly on AArch64
 3022     if (src_reg == r31_sp) {
 3023       C2_MacroAssembler _masm(&cbuf);
 3024       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 3025       __ mov(rscratch2, sp);
 3026       src_reg = rscratch2;
 3027     }
 3028     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
 3029                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3030   %}
 3031 
 3032   // This encoding class is generated automatically from ad_encode.m4.
 3033   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3034   enc_class aarch64_enc_str0(memory8 mem) %{
 3035     C2_MacroAssembler _masm(&cbuf);
 3036     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
 3037                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3038   %}
 3039 
 3040   // This encoding class is generated automatically from ad_encode.m4.
 3041   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3042   enc_class aarch64_enc_strs(vRegF src, memory4 mem) %{
 3043     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3044     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
 3045                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3046   %}
 3047 
 3048   // This encoding class is generated automatically from ad_encode.m4.
 3049   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3050   enc_class aarch64_enc_strd(vRegD src, memory8 mem) %{
 3051     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3052     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
 3053                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3054   %}
 3055 
 3056   // This encoding class is generated automatically from ad_encode.m4.
 3057   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3058   enc_class aarch64_enc_strw_immn(immN src, memory1 mem) %{
 3059     C2_MacroAssembler _masm(&cbuf);
 3060     address con = (address)$src$$constant;
 3061     // need to do this the hard way until we can manage relocs
 3062     // for 32 bit constants
 3063     __ movoop(rscratch2, (jobject)con);
 3064     if (con) __ encode_heap_oop_not_null(rscratch2);
 3065     loadStore(_masm, &MacroAssembler::strw, rscratch2, $mem->opcode(),
 3066                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3067   %}
 3068 
 3069   // This encoding class is generated automatically from ad_encode.m4.
 3070   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3071   enc_class aarch64_enc_strw_immnk(immN src, memory4 mem) %{
 3072     C2_MacroAssembler _masm(&cbuf);
 3073     address con = (address)$src$$constant;
 3074     // need to do this the hard way until we can manage relocs
 3075     // for 32 bit constants
 3076     __ movoop(rscratch2, (jobject)con);
 3077     __ encode_klass_not_null(rscratch2);
 3078     loadStore(_masm, &MacroAssembler::strw, rscratch2, $mem->opcode(),
 3079                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3080   %}
 3081 
 3082   // This encoding class is generated automatically from ad_encode.m4.
 3083   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3084   enc_class aarch64_enc_strb0_ordered(memory4 mem) %{
 3085       C2_MacroAssembler _masm(&cbuf);
 3086       __ membar(Assembler::StoreStore);
 3087       loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
 3088                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 3089   %}
 3090 
 3091   // END Non-volatile memory access
 3092 
 3093   // Vector loads and stores
 3094   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
 3095     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3096     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
 3097        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3098   %}
 3099 
 3100   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
 3101     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3102     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
 3103        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3104   %}
 3105 
 3106   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
 3107     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3108     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
 3109        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3110   %}
 3111 
 3112   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
 3113     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3114     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
 3115        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3116   %}
 3117 
 3118   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
 3119     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3120     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
 3121        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3122   %}
 3123 
 3124   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
 3125     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3126     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
 3127        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3128   %}
 3129 
 3130   // volatile loads and stores
 3131 
 3132   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
 3133     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3134                  rscratch1, stlrb);
 3135   %}
 3136 
 3137   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
 3138     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3139                  rscratch1, stlrh);
 3140   %}
 3141 
 3142   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
 3143     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3144                  rscratch1, stlrw);
 3145   %}
 3146 
 3147 
 3148   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
 3149     Register dst_reg = as_Register($dst$$reg);
 3150     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3151              rscratch1, ldarb);
 3152     __ sxtbw(dst_reg, dst_reg);
 3153   %}
 3154 
 3155   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
 3156     Register dst_reg = as_Register($dst$$reg);
 3157     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3158              rscratch1, ldarb);
 3159     __ sxtb(dst_reg, dst_reg);
 3160   %}
 3161 
 3162   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
 3163     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3164              rscratch1, ldarb);
 3165   %}
 3166 
 3167   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
 3168     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3169              rscratch1, ldarb);
 3170   %}
 3171 
 3172   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
 3173     Register dst_reg = as_Register($dst$$reg);
 3174     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3175              rscratch1, ldarh);
 3176     __ sxthw(dst_reg, dst_reg);
 3177   %}
 3178 
 3179   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
 3180     Register dst_reg = as_Register($dst$$reg);
 3181     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3182              rscratch1, ldarh);
 3183     __ sxth(dst_reg, dst_reg);
 3184   %}
 3185 
 3186   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
 3187     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3188              rscratch1, ldarh);
 3189   %}
 3190 
 3191   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
 3192     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3193              rscratch1, ldarh);
 3194   %}
 3195 
 3196   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
 3197     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3198              rscratch1, ldarw);
 3199   %}
 3200 
 3201   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
 3202     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3203              rscratch1, ldarw);
 3204   %}
 3205 
 3206   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
 3207     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3208              rscratch1, ldar);
 3209   %}
 3210 
 3211   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
 3212     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3213              rscratch1, ldarw);
 3214     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
 3215   %}
 3216 
 3217   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
 3218     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3219              rscratch1, ldar);
 3220     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
 3221   %}
 3222 
 3223   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
 3224     Register src_reg = as_Register($src$$reg);
 3225     // we sometimes get asked to store the stack pointer into the
 3226     // current thread -- we cannot do that directly on AArch64
 3227     if (src_reg == r31_sp) {
 3228       C2_MacroAssembler _masm(&cbuf);
 3229       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 3230       __ mov(rscratch2, sp);
 3231       src_reg = rscratch2;
 3232     }
 3233     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3234                  rscratch1, stlr);
 3235   %}
 3236 
 3237   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
 3238     {
 3239       C2_MacroAssembler _masm(&cbuf);
 3240       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3241       __ fmovs(rscratch2, src_reg);
 3242     }
 3243     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3244                  rscratch1, stlrw);
 3245   %}
 3246 
 3247   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
 3248     {
 3249       C2_MacroAssembler _masm(&cbuf);
 3250       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3251       __ fmovd(rscratch2, src_reg);
 3252     }
 3253     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3254                  rscratch1, stlr);
 3255   %}
 3256 
 3257   // synchronized read/update encodings
 3258 
 3259   enc_class aarch64_enc_ldaxr(iRegL dst, memory8 mem) %{
 3260     C2_MacroAssembler _masm(&cbuf);
 3261     Register dst_reg = as_Register($dst$$reg);
 3262     Register base = as_Register($mem$$base);
 3263     int index = $mem$$index;
 3264     int scale = $mem$$scale;
 3265     int disp = $mem$$disp;
 3266     if (index == -1) {
 3267        if (disp != 0) {
 3268         __ lea(rscratch1, Address(base, disp));
 3269         __ ldaxr(dst_reg, rscratch1);
 3270       } else {
 3271         // TODO
 3272         // should we ever get anything other than this case?
 3273         __ ldaxr(dst_reg, base);
 3274       }
 3275     } else {
 3276       Register index_reg = as_Register(index);
 3277       if (disp == 0) {
 3278         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
 3279         __ ldaxr(dst_reg, rscratch1);
 3280       } else {
 3281         __ lea(rscratch1, Address(base, disp));
 3282         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
 3283         __ ldaxr(dst_reg, rscratch1);
 3284       }
 3285     }
 3286   %}
 3287 
 3288   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory8 mem) %{
 3289     C2_MacroAssembler _masm(&cbuf);
 3290     Register src_reg = as_Register($src$$reg);
 3291     Register base = as_Register($mem$$base);
 3292     int index = $mem$$index;
 3293     int scale = $mem$$scale;
 3294     int disp = $mem$$disp;
 3295     if (index == -1) {
 3296        if (disp != 0) {
 3297         __ lea(rscratch2, Address(base, disp));
 3298         __ stlxr(rscratch1, src_reg, rscratch2);
 3299       } else {
 3300         // TODO
 3301         // should we ever get anything other than this case?
 3302         __ stlxr(rscratch1, src_reg, base);
 3303       }
 3304     } else {
 3305       Register index_reg = as_Register(index);
 3306       if (disp == 0) {
 3307         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
 3308         __ stlxr(rscratch1, src_reg, rscratch2);
 3309       } else {
 3310         __ lea(rscratch2, Address(base, disp));
 3311         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
 3312         __ stlxr(rscratch1, src_reg, rscratch2);
 3313       }
 3314     }
 3315     __ cmpw(rscratch1, zr);
 3316   %}
 3317 
 3318   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 3319     C2_MacroAssembler _masm(&cbuf);
 3320     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3321     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3322                Assembler::xword, /*acquire*/ false, /*release*/ true,
 3323                /*weak*/ false, noreg);
 3324   %}
 3325 
 3326   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3327     C2_MacroAssembler _masm(&cbuf);
 3328     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3329     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3330                Assembler::word, /*acquire*/ false, /*release*/ true,
 3331                /*weak*/ false, noreg);
 3332   %}
 3333 
 3334   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3335     C2_MacroAssembler _masm(&cbuf);
 3336     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3337     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3338                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 3339                /*weak*/ false, noreg);
 3340   %}
 3341 
 3342   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3343     C2_MacroAssembler _masm(&cbuf);
 3344     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3345     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3346                Assembler::byte, /*acquire*/ false, /*release*/ true,
 3347                /*weak*/ false, noreg);
 3348   %}
 3349 
 3350 
 3351   // The only difference between aarch64_enc_cmpxchg and
 3352   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
 3353   // CompareAndSwap sequence to serve as a barrier on acquiring a
 3354   // lock.
 3355   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 3356     C2_MacroAssembler _masm(&cbuf);
 3357     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3358     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3359                Assembler::xword, /*acquire*/ true, /*release*/ true,
 3360                /*weak*/ false, noreg);
 3361   %}
 3362 
 3363   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3364     C2_MacroAssembler _masm(&cbuf);
 3365     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3366     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3367                Assembler::word, /*acquire*/ true, /*release*/ true,
 3368                /*weak*/ false, noreg);
 3369   %}
 3370 
 3371   enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3372     C2_MacroAssembler _masm(&cbuf);
 3373     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3374     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3375                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 3376                /*weak*/ false, noreg);
 3377   %}
 3378 
 3379   enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3380     C2_MacroAssembler _masm(&cbuf);
 3381     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3382     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3383                Assembler::byte, /*acquire*/ true, /*release*/ true,
 3384                /*weak*/ false, noreg);
 3385   %}
 3386 
 3387   // auxiliary used for CompareAndSwapX to set result register
 3388   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
 3389     C2_MacroAssembler _masm(&cbuf);
 3390     Register res_reg = as_Register($res$$reg);
 3391     __ cset(res_reg, Assembler::EQ);
 3392   %}
 3393 
 3394   // prefetch encodings
 3395 
 3396   enc_class aarch64_enc_prefetchw(memory mem) %{
 3397     C2_MacroAssembler _masm(&cbuf);
 3398     Register base = as_Register($mem$$base);
 3399     int index = $mem$$index;
 3400     int scale = $mem$$scale;
 3401     int disp = $mem$$disp;
 3402     if (index == -1) {
 3403       __ prfm(Address(base, disp), PSTL1KEEP);
 3404     } else {
 3405       Register index_reg = as_Register(index);
 3406       if (disp == 0) {
 3407         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3408       } else {
 3409         __ lea(rscratch1, Address(base, disp));
 3410 	__ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3411       }
 3412     }
 3413   %}
 3414 
 3415   /// mov envcodings
 3416 
 3417   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
 3418     C2_MacroAssembler _masm(&cbuf);
 3419     uint32_t con = (uint32_t)$src$$constant;
 3420     Register dst_reg = as_Register($dst$$reg);
 3421     if (con == 0) {
 3422       __ movw(dst_reg, zr);
 3423     } else {
 3424       __ movw(dst_reg, con);
 3425     }
 3426   %}
 3427 
 3428   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
 3429     C2_MacroAssembler _masm(&cbuf);
 3430     Register dst_reg = as_Register($dst$$reg);
 3431     uint64_t con = (uint64_t)$src$$constant;
 3432     if (con == 0) {
 3433       __ mov(dst_reg, zr);
 3434     } else {
 3435       __ mov(dst_reg, con);
 3436     }
 3437   %}
 3438 
 3439   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
 3440     C2_MacroAssembler _masm(&cbuf);
 3441     Register dst_reg = as_Register($dst$$reg);
 3442     address con = (address)$src$$constant;
 3443     if (con == NULL || con == (address)1) {
 3444       ShouldNotReachHere();
 3445     } else {
 3446       relocInfo::relocType rtype = $src->constant_reloc();
 3447       if (rtype == relocInfo::oop_type) {
 3448         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
 3449       } else if (rtype == relocInfo::metadata_type) {
 3450         __ mov_metadata(dst_reg, (Metadata*)con);
 3451       } else {
 3452         assert(rtype == relocInfo::none, "unexpected reloc type");
 3453         if (con < (address)(uintptr_t)os::vm_page_size()) {
 3454           __ mov(dst_reg, con);
 3455         } else {
 3456           uintptr_t offset;
 3457           __ adrp(dst_reg, con, offset);
 3458           __ add(dst_reg, dst_reg, offset);
 3459         }
 3460       }
 3461     }
 3462   %}
 3463 
 3464   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
 3465     C2_MacroAssembler _masm(&cbuf);
 3466     Register dst_reg = as_Register($dst$$reg);
 3467     __ mov(dst_reg, zr);
 3468   %}
 3469 
 3470   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
 3471     C2_MacroAssembler _masm(&cbuf);
 3472     Register dst_reg = as_Register($dst$$reg);
 3473     __ mov(dst_reg, (uint64_t)1);
 3474   %}
 3475 
 3476   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
 3477     C2_MacroAssembler _masm(&cbuf);
 3478     __ load_byte_map_base($dst$$Register);
 3479   %}
 3480 
 3481   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
 3482     C2_MacroAssembler _masm(&cbuf);
 3483     Register dst_reg = as_Register($dst$$reg);
 3484     address con = (address)$src$$constant;
 3485     if (con == NULL) {
 3486       ShouldNotReachHere();
 3487     } else {
 3488       relocInfo::relocType rtype = $src->constant_reloc();
 3489       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
 3490       __ set_narrow_oop(dst_reg, (jobject)con);
 3491     }
 3492   %}
 3493 
 3494   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
 3495     C2_MacroAssembler _masm(&cbuf);
 3496     Register dst_reg = as_Register($dst$$reg);
 3497     __ mov(dst_reg, zr);
 3498   %}
 3499 
 3500   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
 3501     C2_MacroAssembler _masm(&cbuf);
 3502     Register dst_reg = as_Register($dst$$reg);
 3503     address con = (address)$src$$constant;
 3504     if (con == NULL) {
 3505       ShouldNotReachHere();
 3506     } else {
 3507       relocInfo::relocType rtype = $src->constant_reloc();
 3508       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
 3509       __ set_narrow_klass(dst_reg, (Klass *)con);
 3510     }
 3511   %}
 3512 
 3513   // arithmetic encodings
 3514 
 3515   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
 3516     C2_MacroAssembler _masm(&cbuf);
 3517     Register dst_reg = as_Register($dst$$reg);
 3518     Register src_reg = as_Register($src1$$reg);
 3519     int32_t con = (int32_t)$src2$$constant;
 3520     // add has primary == 0, subtract has primary == 1
 3521     if ($primary) { con = -con; }
 3522     if (con < 0) {
 3523       __ subw(dst_reg, src_reg, -con);
 3524     } else {
 3525       __ addw(dst_reg, src_reg, con);
 3526     }
 3527   %}
 3528 
 3529   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
 3530     C2_MacroAssembler _masm(&cbuf);
 3531     Register dst_reg = as_Register($dst$$reg);
 3532     Register src_reg = as_Register($src1$$reg);
 3533     int32_t con = (int32_t)$src2$$constant;
 3534     // add has primary == 0, subtract has primary == 1
 3535     if ($primary) { con = -con; }
 3536     if (con < 0) {
 3537       __ sub(dst_reg, src_reg, -con);
 3538     } else {
 3539       __ add(dst_reg, src_reg, con);
 3540     }
 3541   %}
 3542 
 3543   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
 3544     C2_MacroAssembler _masm(&cbuf);
 3545    Register dst_reg = as_Register($dst$$reg);
 3546    Register src1_reg = as_Register($src1$$reg);
 3547    Register src2_reg = as_Register($src2$$reg);
 3548     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3549   %}
 3550 
 3551   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
 3552     C2_MacroAssembler _masm(&cbuf);
 3553    Register dst_reg = as_Register($dst$$reg);
 3554    Register src1_reg = as_Register($src1$$reg);
 3555    Register src2_reg = as_Register($src2$$reg);
 3556     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3557   %}
 3558 
 3559   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
 3560     C2_MacroAssembler _masm(&cbuf);
 3561    Register dst_reg = as_Register($dst$$reg);
 3562    Register src1_reg = as_Register($src1$$reg);
 3563    Register src2_reg = as_Register($src2$$reg);
 3564     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3565   %}
 3566 
 3567   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
 3568     C2_MacroAssembler _masm(&cbuf);
 3569    Register dst_reg = as_Register($dst$$reg);
 3570    Register src1_reg = as_Register($src1$$reg);
 3571    Register src2_reg = as_Register($src2$$reg);
 3572     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3573   %}
 3574 
 3575   // compare instruction encodings
 3576 
 3577   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
 3578     C2_MacroAssembler _masm(&cbuf);
 3579     Register reg1 = as_Register($src1$$reg);
 3580     Register reg2 = as_Register($src2$$reg);
 3581     __ cmpw(reg1, reg2);
 3582   %}
 3583 
 3584   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
 3585     C2_MacroAssembler _masm(&cbuf);
 3586     Register reg = as_Register($src1$$reg);
 3587     int32_t val = $src2$$constant;
 3588     if (val >= 0) {
 3589       __ subsw(zr, reg, val);
 3590     } else {
 3591       __ addsw(zr, reg, -val);
 3592     }
 3593   %}
 3594 
 3595   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
 3596     C2_MacroAssembler _masm(&cbuf);
 3597     Register reg1 = as_Register($src1$$reg);
 3598     uint32_t val = (uint32_t)$src2$$constant;
 3599     __ movw(rscratch1, val);
 3600     __ cmpw(reg1, rscratch1);
 3601   %}
 3602 
 3603   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
 3604     C2_MacroAssembler _masm(&cbuf);
 3605     Register reg1 = as_Register($src1$$reg);
 3606     Register reg2 = as_Register($src2$$reg);
 3607     __ cmp(reg1, reg2);
 3608   %}
 3609 
 3610   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
 3611     C2_MacroAssembler _masm(&cbuf);
 3612     Register reg = as_Register($src1$$reg);
 3613     int64_t val = $src2$$constant;
 3614     if (val >= 0) {
 3615       __ subs(zr, reg, val);
 3616     } else if (val != -val) {
 3617       __ adds(zr, reg, -val);
 3618     } else {
 3619     // aargh, Long.MIN_VALUE is a special case
 3620       __ orr(rscratch1, zr, (uint64_t)val);
 3621       __ subs(zr, reg, rscratch1);
 3622     }
 3623   %}
 3624 
 3625   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
 3626     C2_MacroAssembler _masm(&cbuf);
 3627     Register reg1 = as_Register($src1$$reg);
 3628     uint64_t val = (uint64_t)$src2$$constant;
 3629     __ mov(rscratch1, val);
 3630     __ cmp(reg1, rscratch1);
 3631   %}
 3632 
 3633   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
 3634     C2_MacroAssembler _masm(&cbuf);
 3635     Register reg1 = as_Register($src1$$reg);
 3636     Register reg2 = as_Register($src2$$reg);
 3637     __ cmp(reg1, reg2);
 3638   %}
 3639 
 3640   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
 3641     C2_MacroAssembler _masm(&cbuf);
 3642     Register reg1 = as_Register($src1$$reg);
 3643     Register reg2 = as_Register($src2$$reg);
 3644     __ cmpw(reg1, reg2);
 3645   %}
 3646 
 3647   enc_class aarch64_enc_testp(iRegP src) %{
 3648     C2_MacroAssembler _masm(&cbuf);
 3649     Register reg = as_Register($src$$reg);
 3650     __ cmp(reg, zr);
 3651   %}
 3652 
 3653   enc_class aarch64_enc_testn(iRegN src) %{
 3654     C2_MacroAssembler _masm(&cbuf);
 3655     Register reg = as_Register($src$$reg);
 3656     __ cmpw(reg, zr);
 3657   %}
 3658 
 3659   enc_class aarch64_enc_b(label lbl) %{
 3660     C2_MacroAssembler _masm(&cbuf);
 3661     Label *L = $lbl$$label;
 3662     __ b(*L);
 3663   %}
 3664 
 3665   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
 3666     C2_MacroAssembler _masm(&cbuf);
 3667     Label *L = $lbl$$label;
 3668     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3669   %}
 3670 
 3671   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
 3672     C2_MacroAssembler _masm(&cbuf);
 3673     Label *L = $lbl$$label;
 3674     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3675   %}
 3676 
 3677   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
 3678   %{
 3679      Register sub_reg = as_Register($sub$$reg);
 3680      Register super_reg = as_Register($super$$reg);
 3681      Register temp_reg = as_Register($temp$$reg);
 3682      Register result_reg = as_Register($result$$reg);
 3683 
 3684      Label miss;
 3685      C2_MacroAssembler _masm(&cbuf);
 3686      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
 3687                                      NULL, &miss,
 3688                                      /*set_cond_codes:*/ true);
 3689      if ($primary) {
 3690        __ mov(result_reg, zr);
 3691      }
 3692      __ bind(miss);
 3693   %}
 3694 
 3695   enc_class aarch64_enc_java_static_call(method meth) %{
 3696     C2_MacroAssembler _masm(&cbuf);
 3697 
 3698     address addr = (address)$meth$$method;
 3699     address call;
 3700     if (!_method) {
 3701       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
 3702       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
 3703     } else {
 3704       int method_index = resolved_method_index(cbuf);
 3705       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
 3706                                                   : static_call_Relocation::spec(method_index);
 3707       call = __ trampoline_call(Address(addr, rspec), &cbuf);
 3708 
 3709       // Emit stub for static call
 3710       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
 3711       if (stub == NULL) {
 3712         ciEnv::current()->record_failure("CodeCache is full");
 3713         return;
 3714       }
 3715     }
 3716     if (call == NULL) {
 3717       ciEnv::current()->record_failure("CodeCache is full");
 3718       return;
 3719     }
 3720   %}
 3721 
 3722   enc_class aarch64_enc_java_dynamic_call(method meth) %{
 3723     C2_MacroAssembler _masm(&cbuf);
 3724     int method_index = resolved_method_index(cbuf);
 3725     address call = __ ic_call((address)$meth$$method, method_index);
 3726     if (call == NULL) {
 3727       ciEnv::current()->record_failure("CodeCache is full");
 3728       return;
 3729     }
 3730   %}
 3731 
 3732   enc_class aarch64_enc_call_epilog() %{
 3733     C2_MacroAssembler _masm(&cbuf);
 3734     if (VerifyStackAtCalls) {
 3735       // Check that stack depth is unchanged: find majik cookie on stack
 3736       __ call_Unimplemented();
 3737     }
 3738   %}
 3739 
 3740   enc_class aarch64_enc_java_to_runtime(method meth) %{
 3741     C2_MacroAssembler _masm(&cbuf);
 3742 
 3743     // some calls to generated routines (arraycopy code) are scheduled
 3744     // by C2 as runtime calls. if so we can call them using a br (they
 3745     // will be in a reachable segment) otherwise we have to use a blr
 3746     // which loads the absolute address into a register.
 3747     address entry = (address)$meth$$method;
 3748     CodeBlob *cb = CodeCache::find_blob(entry);
 3749     if (cb) {
 3750       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
 3751       if (call == NULL) {
 3752         ciEnv::current()->record_failure("CodeCache is full");
 3753         return;
 3754       }
 3755     } else {
 3756       Label retaddr;
 3757       __ adr(rscratch2, retaddr);
 3758       __ lea(rscratch1, RuntimeAddress(entry));
 3759       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
 3760       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
 3761       __ blr(rscratch1);
 3762       __ bind(retaddr);
 3763       __ add(sp, sp, 2 * wordSize);
 3764     }
 3765   %}
 3766 
 3767   enc_class aarch64_enc_rethrow() %{
 3768     C2_MacroAssembler _masm(&cbuf);
 3769     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
 3770   %}
 3771 
 3772   enc_class aarch64_enc_ret() %{
 3773     C2_MacroAssembler _masm(&cbuf);
 3774     __ ret(lr);
 3775   %}
 3776 
 3777   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
 3778     C2_MacroAssembler _masm(&cbuf);
 3779     Register target_reg = as_Register($jump_target$$reg);
 3780     __ br(target_reg);
 3781   %}
 3782 
 3783   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
 3784     C2_MacroAssembler _masm(&cbuf);
 3785     Register target_reg = as_Register($jump_target$$reg);
 3786     // exception oop should be in r0
 3787     // ret addr has been popped into lr
 3788     // callee expects it in r3
 3789     __ mov(r3, lr);
 3790     __ br(target_reg);
 3791   %}
 3792 
 3793   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
 3794     C2_MacroAssembler _masm(&cbuf);
 3795     Register oop = as_Register($object$$reg);
 3796     Register box = as_Register($box$$reg);
 3797     Register disp_hdr = as_Register($tmp$$reg);
 3798     Register tmp = as_Register($tmp2$$reg);
 3799     Label cont;
 3800     Label object_has_monitor;
 3801     Label cas_failed;
 3802 
 3803     assert_different_registers(oop, box, tmp, disp_hdr);
 3804 
 3805     // Load markWord from object into displaced_header.
 3806     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
 3807 
 3808     if (UseBiasedLocking && !UseOptoBiasInlining) {
 3809       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
 3810     }
 3811 
 3812     // Check for existing monitor
 3813     __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
 3814 
 3815     // Set tmp to be (markWord of object | UNLOCK_VALUE).
 3816     __ orr(tmp, disp_hdr, markWord::unlocked_value);
 3817 
 3818     // Initialize the box. (Must happen before we update the object mark!)
 3819     __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 3820 
 3821     // Compare object markWord with an unlocked value (tmp) and if
 3822     // equal exchange the stack address of our box with object markWord.
 3823     // On failure disp_hdr contains the possibly locked markWord.
 3824     __ cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
 3825                /*release*/ true, /*weak*/ false, disp_hdr);
 3826     __ br(Assembler::EQ, cont);
 3827 
 3828     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
 3829 
 3830     // If the compare-and-exchange succeeded, then we found an unlocked
 3831     // object, will have now locked it will continue at label cont
 3832 
 3833     __ bind(cas_failed);
 3834     // We did not see an unlocked object so try the fast recursive case.
 3835 
 3836     // Check if the owner is self by comparing the value in the
 3837     // markWord of object (disp_hdr) with the stack pointer.
 3838     __ mov(rscratch1, sp);
 3839     __ sub(disp_hdr, disp_hdr, rscratch1);
 3840     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
 3841     // If condition is true we are cont and hence we can store 0 as the
 3842     // displaced header in the box, which indicates that it is a recursive lock.
 3843     __ ands(tmp/*==0?*/, disp_hdr, tmp);   // Sets flags for result
 3844     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 3845 
 3846     __ b(cont);
 3847 
 3848     // Handle existing monitor.
 3849     __ bind(object_has_monitor);
 3850 
 3851     // The object's monitor m is unlocked iff m->owner == NULL,
 3852     // otherwise m->owner may contain a thread or a stack address.
 3853     //
 3854     // Try to CAS m->owner from NULL to current thread.
 3855     __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markWord::monitor_value));
 3856     __ cmpxchg(tmp, zr, rthread, Assembler::xword, /*acquire*/ true,
 3857                /*release*/ true, /*weak*/ false, noreg); // Sets flags for result
 3858 
 3859     // Store a non-null value into the box to avoid looking like a re-entrant
 3860     // lock. The fast-path monitor unlock code checks for
 3861     // markWord::monitor_value so use markWord::unused_mark which has the
 3862     // relevant bit set, and also matches ObjectSynchronizer::enter.
 3863     __ mov(tmp, (address)markWord::unused_mark().value());
 3864     __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 3865 
 3866     __ bind(cont);
 3867     // flag == EQ indicates success
 3868     // flag == NE indicates failure
 3869   %}
 3870 
 3871   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
 3872     C2_MacroAssembler _masm(&cbuf);
 3873     Register oop = as_Register($object$$reg);
 3874     Register box = as_Register($box$$reg);
 3875     Register disp_hdr = as_Register($tmp$$reg);
 3876     Register tmp = as_Register($tmp2$$reg);
 3877     Label cont;
 3878     Label object_has_monitor;
 3879 
 3880     assert_different_registers(oop, box, tmp, disp_hdr);
 3881 
 3882     if (UseBiasedLocking && !UseOptoBiasInlining) {
 3883       __ biased_locking_exit(oop, tmp, cont);
 3884     }
 3885 
 3886     // Find the lock address and load the displaced header from the stack.
 3887     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 3888 
 3889     // If the displaced header is 0, we have a recursive unlock.
 3890     __ cmp(disp_hdr, zr);
 3891     __ br(Assembler::EQ, cont);
 3892 
 3893     // Handle existing monitor.
 3894     __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
 3895     __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
 3896 
 3897     // Check if it is still a light weight lock, this is is true if we
 3898     // see the stack address of the basicLock in the markWord of the
 3899     // object.
 3900 
 3901     __ cmpxchg(oop, box, disp_hdr, Assembler::xword, /*acquire*/ false,
 3902                /*release*/ true, /*weak*/ false, tmp);
 3903     __ b(cont);
 3904 
 3905     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
 3906 
 3907     // Handle existing monitor.
 3908     __ bind(object_has_monitor);
 3909     STATIC_ASSERT(markWord::monitor_value <= INT_MAX);
 3910     __ add(tmp, tmp, -(int)markWord::monitor_value); // monitor
 3911     __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
 3912     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
 3913     __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
 3914     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
 3915     __ cmp(rscratch1, zr); // Sets flags for result
 3916     __ br(Assembler::NE, cont);
 3917 
 3918     __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
 3919     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
 3920     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
 3921     __ cmp(rscratch1, zr); // Sets flags for result
 3922     __ cbnz(rscratch1, cont);
 3923     // need a release store here
 3924     __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
 3925     __ stlr(zr, tmp); // set unowned
 3926 
 3927     __ bind(cont);
 3928     // flag == EQ indicates success
 3929     // flag == NE indicates failure
 3930   %}
 3931 
 3932 %}
 3933 
 3934 //----------FRAME--------------------------------------------------------------
 3935 // Definition of frame structure and management information.
 3936 //
 3937 //  S T A C K   L A Y O U T    Allocators stack-slot number
 3938 //                             |   (to get allocators register number
 3939 //  G  Owned by    |        |  v    add OptoReg::stack0())
 3940 //  r   CALLER     |        |
 3941 //  o     |        +--------+      pad to even-align allocators stack-slot
 3942 //  w     V        |  pad0  |        numbers; owned by CALLER
 3943 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
 3944 //  h     ^        |   in   |  5
 3945 //        |        |  args  |  4   Holes in incoming args owned by SELF
 3946 //  |     |        |        |  3
 3947 //  |     |        +--------+
 3948 //  V     |        | old out|      Empty on Intel, window on Sparc
 3949 //        |    old |preserve|      Must be even aligned.
 3950 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
 3951 //        |        |   in   |  3   area for Intel ret address
 3952 //     Owned by    |preserve|      Empty on Sparc.
 3953 //       SELF      +--------+
 3954 //        |        |  pad2  |  2   pad to align old SP
 3955 //        |        +--------+  1
 3956 //        |        | locks  |  0
 3957 //        |        +--------+----> OptoReg::stack0(), even aligned
 3958 //        |        |  pad1  | 11   pad to align new SP
 3959 //        |        +--------+
 3960 //        |        |        | 10
 3961 //        |        | spills |  9   spills
 3962 //        V        |        |  8   (pad0 slot for callee)
 3963 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
 3964 //        ^        |  out   |  7
 3965 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
 3966 //     Owned by    +--------+
 3967 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
 3968 //        |    new |preserve|      Must be even-aligned.
 3969 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
 3970 //        |        |        |
 3971 //
 3972 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
 3973 //         known from SELF's arguments and the Java calling convention.
 3974 //         Region 6-7 is determined per call site.
 3975 // Note 2: If the calling convention leaves holes in the incoming argument
 3976 //         area, those holes are owned by SELF.  Holes in the outgoing area
 3977 //         are owned by the CALLEE.  Holes should not be nessecary in the
 3978 //         incoming area, as the Java calling convention is completely under
 3979 //         the control of the AD file.  Doubles can be sorted and packed to
 3980 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
 3981 //         varargs C calling conventions.
 3982 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
 3983 //         even aligned with pad0 as needed.
 3984 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
 3985 //           (the latter is true on Intel but is it false on AArch64?)
 3986 //         region 6-11 is even aligned; it may be padded out more so that
 3987 //         the region from SP to FP meets the minimum stack alignment.
 3988 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
 3989 //         alignment.  Region 11, pad1, may be dynamically extended so that
 3990 //         SP meets the minimum alignment.
 3991 
 3992 frame %{
 3993   // What direction does stack grow in (assumed to be same for C & Java)
 3994   stack_direction(TOWARDS_LOW);
 3995 
 3996   // These three registers define part of the calling convention
 3997   // between compiled code and the interpreter.
 3998 
 3999   // Inline Cache Register or Method for I2C.
 4000   inline_cache_reg(R12);
 4001 
 4002   // Method Oop Register when calling interpreter.
 4003   interpreter_method_oop_reg(R12);
 4004 
 4005   // Number of stack slots consumed by locking an object
 4006   sync_stack_slots(2);
 4007 
 4008   // Compiled code's Frame Pointer
 4009   frame_pointer(R31);
 4010 
 4011   // Interpreter stores its frame pointer in a register which is
 4012   // stored to the stack by I2CAdaptors.
 4013   // I2CAdaptors convert from interpreted java to compiled java.
 4014   interpreter_frame_pointer(R29);
 4015 
 4016   // Stack alignment requirement
 4017   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
 4018 
 4019   // Number of stack slots between incoming argument block and the start of
 4020   // a new frame.  The PROLOG must add this many slots to the stack.  The
 4021   // EPILOG must remove this many slots. aarch64 needs two slots for
 4022   // return address and fp.
 4023   // TODO think this is correct but check
 4024   in_preserve_stack_slots(4);
 4025 
 4026   // Number of outgoing stack slots killed above the out_preserve_stack_slots
 4027   // for calls to C.  Supports the var-args backing area for register parms.
 4028   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
 4029 
 4030   // The after-PROLOG location of the return address.  Location of
 4031   // return address specifies a type (REG or STACK) and a number
 4032   // representing the register number (i.e. - use a register name) or
 4033   // stack slot.
 4034   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
 4035   // Otherwise, it is above the locks and verification slot and alignment word
 4036   // TODO this may well be correct but need to check why that - 2 is there
 4037   // ppc port uses 0 but we definitely need to allow for fixed_slots
 4038   // which folds in the space used for monitors
 4039   return_addr(STACK - 2 +
 4040               align_up((Compile::current()->in_preserve_stack_slots() +
 4041                         Compile::current()->fixed_slots()),
 4042                        stack_alignment_in_slots()));
 4043 
 4044   // Body of function which returns an integer array locating
 4045   // arguments either in registers or in stack slots.  Passed an array
 4046   // of ideal registers called "sig" and a "length" count.  Stack-slot
 4047   // offsets are based on outgoing arguments, i.e. a CALLER setting up
 4048   // arguments for a CALLEE.  Incoming stack arguments are
 4049   // automatically biased by the preserve_stack_slots field above.
 4050 
 4051   calling_convention
 4052   %{
 4053     // No difference between ingoing/outgoing just pass false
 4054     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
 4055   %}
 4056 
 4057   c_calling_convention
 4058   %{
 4059     // This is obviously always outgoing
 4060     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
 4061   %}
 4062 
 4063   // Location of compiled Java return values.  Same as C for now.
 4064   return_value
 4065   %{
 4066     // TODO do we allow ideal_reg == Op_RegN???
 4067     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
 4068            "only return normal values");
 4069 
 4070     static const int lo[Op_RegL + 1] = { // enum name
 4071       0,                                 // Op_Node
 4072       0,                                 // Op_Set
 4073       R0_num,                            // Op_RegN
 4074       R0_num,                            // Op_RegI
 4075       R0_num,                            // Op_RegP
 4076       V0_num,                            // Op_RegF
 4077       V0_num,                            // Op_RegD
 4078       R0_num                             // Op_RegL
 4079     };
 4080 
 4081     static const int hi[Op_RegL + 1] = { // enum name
 4082       0,                                 // Op_Node
 4083       0,                                 // Op_Set
 4084       OptoReg::Bad,                      // Op_RegN
 4085       OptoReg::Bad,                      // Op_RegI
 4086       R0_H_num,                          // Op_RegP
 4087       OptoReg::Bad,                      // Op_RegF
 4088       V0_H_num,                          // Op_RegD
 4089       R0_H_num                           // Op_RegL
 4090     };
 4091 
 4092     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
 4093   %}
 4094 %}
 4095 
 4096 //----------ATTRIBUTES---------------------------------------------------------
 4097 //----------Operand Attributes-------------------------------------------------
 4098 op_attrib op_cost(1);        // Required cost attribute
 4099 
 4100 //----------Instruction Attributes---------------------------------------------
 4101 ins_attrib ins_cost(INSN_COST); // Required cost attribute
 4102 ins_attrib ins_size(32);        // Required size attribute (in bits)
 4103 ins_attrib ins_short_branch(0); // Required flag: is this instruction
 4104                                 // a non-matching short branch variant
 4105                                 // of some long branch?
 4106 ins_attrib ins_alignment(4);    // Required alignment attribute (must
 4107                                 // be a power of 2) specifies the
 4108                                 // alignment that some part of the
 4109                                 // instruction (not necessarily the
 4110                                 // start) requires.  If > 1, a
 4111                                 // compute_padding() function must be
 4112                                 // provided for the instruction
 4113 
 4114 //----------OPERANDS-----------------------------------------------------------
 4115 // Operand definitions must precede instruction definitions for correct parsing
 4116 // in the ADLC because operands constitute user defined types which are used in
 4117 // instruction definitions.
 4118 
 4119 //----------Simple Operands----------------------------------------------------
 4120 
 4121 // Integer operands 32 bit
 4122 // 32 bit immediate
 4123 operand immI()
 4124 %{
 4125   match(ConI);
 4126 
 4127   op_cost(0);
 4128   format %{ %}
 4129   interface(CONST_INTER);
 4130 %}
 4131 
 4132 // 32 bit zero
 4133 operand immI0()
 4134 %{
 4135   predicate(n->get_int() == 0);
 4136   match(ConI);
 4137 
 4138   op_cost(0);
 4139   format %{ %}
 4140   interface(CONST_INTER);
 4141 %}
 4142 
 4143 // 32 bit unit increment
 4144 operand immI_1()
 4145 %{
 4146   predicate(n->get_int() == 1);
 4147   match(ConI);
 4148 
 4149   op_cost(0);
 4150   format %{ %}
 4151   interface(CONST_INTER);
 4152 %}
 4153 
 4154 // 32 bit unit decrement
 4155 operand immI_M1()
 4156 %{
 4157   predicate(n->get_int() == -1);
 4158   match(ConI);
 4159 
 4160   op_cost(0);
 4161   format %{ %}
 4162   interface(CONST_INTER);
 4163 %}
 4164 
 4165 // Shift values for add/sub extension shift
 4166 operand immIExt()
 4167 %{
 4168   predicate(0 <= n->get_int() && (n->get_int() <= 4));
 4169   match(ConI);
 4170 
 4171   op_cost(0);
 4172   format %{ %}
 4173   interface(CONST_INTER);
 4174 %}
 4175 
 4176 operand immI_le_4()
 4177 %{
 4178   predicate(n->get_int() <= 4);
 4179   match(ConI);
 4180 
 4181   op_cost(0);
 4182   format %{ %}
 4183   interface(CONST_INTER);
 4184 %}
 4185 
 4186 operand immI_31()
 4187 %{
 4188   predicate(n->get_int() == 31);
 4189   match(ConI);
 4190 
 4191   op_cost(0);
 4192   format %{ %}
 4193   interface(CONST_INTER);
 4194 %}
 4195 
 4196 operand immI_8()
 4197 %{
 4198   predicate(n->get_int() == 8);
 4199   match(ConI);
 4200 
 4201   op_cost(0);
 4202   format %{ %}
 4203   interface(CONST_INTER);
 4204 %}
 4205 
 4206 operand immI_16()
 4207 %{
 4208   predicate(n->get_int() == 16);
 4209   match(ConI);
 4210 
 4211   op_cost(0);
 4212   format %{ %}
 4213   interface(CONST_INTER);
 4214 %}
 4215 
 4216 operand immI_24()
 4217 %{
 4218   predicate(n->get_int() == 24);
 4219   match(ConI);
 4220 
 4221   op_cost(0);
 4222   format %{ %}
 4223   interface(CONST_INTER);
 4224 %}
 4225 
 4226 operand immI_32()
 4227 %{
 4228   predicate(n->get_int() == 32);
 4229   match(ConI);
 4230 
 4231   op_cost(0);
 4232   format %{ %}
 4233   interface(CONST_INTER);
 4234 %}
 4235 
 4236 operand immI_48()
 4237 %{
 4238   predicate(n->get_int() == 48);
 4239   match(ConI);
 4240 
 4241   op_cost(0);
 4242   format %{ %}
 4243   interface(CONST_INTER);
 4244 %}
 4245 
 4246 operand immI_56()
 4247 %{
 4248   predicate(n->get_int() == 56);
 4249   match(ConI);
 4250 
 4251   op_cost(0);
 4252   format %{ %}
 4253   interface(CONST_INTER);
 4254 %}
 4255 
 4256 operand immI_63()
 4257 %{
 4258   predicate(n->get_int() == 63);
 4259   match(ConI);
 4260 
 4261   op_cost(0);
 4262   format %{ %}
 4263   interface(CONST_INTER);
 4264 %}
 4265 
 4266 operand immI_64()
 4267 %{
 4268   predicate(n->get_int() == 64);
 4269   match(ConI);
 4270 
 4271   op_cost(0);
 4272   format %{ %}
 4273   interface(CONST_INTER);
 4274 %}
 4275 
 4276 operand immI_255()
 4277 %{
 4278   predicate(n->get_int() == 255);
 4279   match(ConI);
 4280 
 4281   op_cost(0);
 4282   format %{ %}
 4283   interface(CONST_INTER);
 4284 %}
 4285 
 4286 operand immI_65535()
 4287 %{
 4288   predicate(n->get_int() == 65535);
 4289   match(ConI);
 4290 
 4291   op_cost(0);
 4292   format %{ %}
 4293   interface(CONST_INTER);
 4294 %}
 4295 
 4296 operand immL_255()
 4297 %{
 4298   predicate(n->get_long() == 255L);
 4299   match(ConL);
 4300 
 4301   op_cost(0);
 4302   format %{ %}
 4303   interface(CONST_INTER);
 4304 %}
 4305 
 4306 operand immL_65535()
 4307 %{
 4308   predicate(n->get_long() == 65535L);
 4309   match(ConL);
 4310 
 4311   op_cost(0);
 4312   format %{ %}
 4313   interface(CONST_INTER);
 4314 %}
 4315 
 4316 operand immL_4294967295()
 4317 %{
 4318   predicate(n->get_long() == 4294967295L);
 4319   match(ConL);
 4320 
 4321   op_cost(0);
 4322   format %{ %}
 4323   interface(CONST_INTER);
 4324 %}
 4325 
 4326 operand immL_bitmask()
 4327 %{
 4328   predicate((n->get_long() != 0)
 4329             && ((n->get_long() & 0xc000000000000000l) == 0)
 4330             && is_power_of_2(n->get_long() + 1));
 4331   match(ConL);
 4332 
 4333   op_cost(0);
 4334   format %{ %}
 4335   interface(CONST_INTER);
 4336 %}
 4337 
 4338 operand immI_bitmask()
 4339 %{
 4340   predicate((n->get_int() != 0)
 4341             && ((n->get_int() & 0xc0000000) == 0)
 4342             && is_power_of_2(n->get_int() + 1));
 4343   match(ConI);
 4344 
 4345   op_cost(0);
 4346   format %{ %}
 4347   interface(CONST_INTER);
 4348 %}
 4349 
 4350 operand immL_positive_bitmaskI()
 4351 %{
 4352   predicate((n->get_long() != 0)
 4353             && ((julong)n->get_long() < 0x80000000ULL)
 4354             && is_power_of_2(n->get_long() + 1));
 4355   match(ConL);
 4356 
 4357   op_cost(0);
 4358   format %{ %}
 4359   interface(CONST_INTER);
 4360 %}
 4361 
 4362 // Scale values for scaled offset addressing modes (up to long but not quad)
 4363 operand immIScale()
 4364 %{
 4365   predicate(0 <= n->get_int() && (n->get_int() <= 3));
 4366   match(ConI);
 4367 
 4368   op_cost(0);
 4369   format %{ %}
 4370   interface(CONST_INTER);
 4371 %}
 4372 
 4373 // 26 bit signed offset -- for pc-relative branches
 4374 operand immI26()
 4375 %{
 4376   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
 4377   match(ConI);
 4378 
 4379   op_cost(0);
 4380   format %{ %}
 4381   interface(CONST_INTER);
 4382 %}
 4383 
 4384 // 19 bit signed offset -- for pc-relative loads
 4385 operand immI19()
 4386 %{
 4387   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
 4388   match(ConI);
 4389 
 4390   op_cost(0);
 4391   format %{ %}
 4392   interface(CONST_INTER);
 4393 %}
 4394 
 4395 // 12 bit unsigned offset -- for base plus immediate loads
 4396 operand immIU12()
 4397 %{
 4398   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
 4399   match(ConI);
 4400 
 4401   op_cost(0);
 4402   format %{ %}
 4403   interface(CONST_INTER);
 4404 %}
 4405 
 4406 operand immLU12()
 4407 %{
 4408   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
 4409   match(ConL);
 4410 
 4411   op_cost(0);
 4412   format %{ %}
 4413   interface(CONST_INTER);
 4414 %}
 4415 
 4416 // Offset for scaled or unscaled immediate loads and stores
 4417 operand immIOffset()
 4418 %{
 4419   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4420   match(ConI);
 4421 
 4422   op_cost(0);
 4423   format %{ %}
 4424   interface(CONST_INTER);
 4425 %}
 4426 
 4427 operand immIOffset1()
 4428 %{
 4429   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4430   match(ConI);
 4431 
 4432   op_cost(0);
 4433   format %{ %}
 4434   interface(CONST_INTER);
 4435 %}
 4436 
 4437 operand immIOffset2()
 4438 %{
 4439   predicate(Address::offset_ok_for_immed(n->get_int(), 1));
 4440   match(ConI);
 4441 
 4442   op_cost(0);
 4443   format %{ %}
 4444   interface(CONST_INTER);
 4445 %}
 4446 
 4447 operand immIOffset4()
 4448 %{
 4449   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
 4450   match(ConI);
 4451 
 4452   op_cost(0);
 4453   format %{ %}
 4454   interface(CONST_INTER);
 4455 %}
 4456 
 4457 operand immIOffset8()
 4458 %{
 4459   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
 4460   match(ConI);
 4461 
 4462   op_cost(0);
 4463   format %{ %}
 4464   interface(CONST_INTER);
 4465 %}
 4466 
 4467 operand immIOffset16()
 4468 %{
 4469   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
 4470   match(ConI);
 4471 
 4472   op_cost(0);
 4473   format %{ %}
 4474   interface(CONST_INTER);
 4475 %}
 4476 
 4477 operand immLoffset()
 4478 %{
 4479   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
 4480   match(ConL);
 4481 
 4482   op_cost(0);
 4483   format %{ %}
 4484   interface(CONST_INTER);
 4485 %}
 4486 
 4487 operand immLoffset1()
 4488 %{
 4489   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
 4490   match(ConL);
 4491 
 4492   op_cost(0);
 4493   format %{ %}
 4494   interface(CONST_INTER);
 4495 %}
 4496 
 4497 operand immLoffset2()
 4498 %{
 4499   predicate(Address::offset_ok_for_immed(n->get_long(), 1));
 4500   match(ConL);
 4501 
 4502   op_cost(0);
 4503   format %{ %}
 4504   interface(CONST_INTER);
 4505 %}
 4506 
 4507 operand immLoffset4()
 4508 %{
 4509   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
 4510   match(ConL);
 4511 
 4512   op_cost(0);
 4513   format %{ %}
 4514   interface(CONST_INTER);
 4515 %}
 4516 
 4517 operand immLoffset8()
 4518 %{
 4519   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
 4520   match(ConL);
 4521 
 4522   op_cost(0);
 4523   format %{ %}
 4524   interface(CONST_INTER);
 4525 %}
 4526 
 4527 operand immLoffset16()
 4528 %{
 4529   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
 4530   match(ConL);
 4531 
 4532   op_cost(0);
 4533   format %{ %}
 4534   interface(CONST_INTER);
 4535 %}
 4536 
 4537 // 32 bit integer valid for add sub immediate
 4538 operand immIAddSub()
 4539 %{
 4540   predicate(Assembler::operand_valid_for_add_sub_immediate((int64_t)n->get_int()));
 4541   match(ConI);
 4542   op_cost(0);
 4543   format %{ %}
 4544   interface(CONST_INTER);
 4545 %}
 4546 
 4547 // 32 bit unsigned integer valid for logical immediate
 4548 // TODO -- check this is right when e.g the mask is 0x80000000
 4549 operand immILog()
 4550 %{
 4551   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (uint64_t)n->get_int()));
 4552   match(ConI);
 4553 
 4554   op_cost(0);
 4555   format %{ %}
 4556   interface(CONST_INTER);
 4557 %}
 4558 
 4559 // Integer operands 64 bit
 4560 // 64 bit immediate
 4561 operand immL()
 4562 %{
 4563   match(ConL);
 4564 
 4565   op_cost(0);
 4566   format %{ %}
 4567   interface(CONST_INTER);
 4568 %}
 4569 
 4570 // 64 bit zero
 4571 operand immL0()
 4572 %{
 4573   predicate(n->get_long() == 0);
 4574   match(ConL);
 4575 
 4576   op_cost(0);
 4577   format %{ %}
 4578   interface(CONST_INTER);
 4579 %}
 4580 
 4581 // 64 bit unit increment
 4582 operand immL_1()
 4583 %{
 4584   predicate(n->get_long() == 1);
 4585   match(ConL);
 4586 
 4587   op_cost(0);
 4588   format %{ %}
 4589   interface(CONST_INTER);
 4590 %}
 4591 
 4592 // 64 bit unit decrement
 4593 operand immL_M1()
 4594 %{
 4595   predicate(n->get_long() == -1);
 4596   match(ConL);
 4597 
 4598   op_cost(0);
 4599   format %{ %}
 4600   interface(CONST_INTER);
 4601 %}
 4602 
 4603 // 32 bit offset of pc in thread anchor
 4604 
 4605 operand immL_pc_off()
 4606 %{
 4607   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
 4608                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
 4609   match(ConL);
 4610 
 4611   op_cost(0);
 4612   format %{ %}
 4613   interface(CONST_INTER);
 4614 %}
 4615 
 4616 // 64 bit integer valid for add sub immediate
 4617 operand immLAddSub()
 4618 %{
 4619   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
 4620   match(ConL);
 4621   op_cost(0);
 4622   format %{ %}
 4623   interface(CONST_INTER);
 4624 %}
 4625 
 4626 // 64 bit integer valid for logical immediate
 4627 operand immLLog()
 4628 %{
 4629   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (uint64_t)n->get_long()));
 4630   match(ConL);
 4631   op_cost(0);
 4632   format %{ %}
 4633   interface(CONST_INTER);
 4634 %}
 4635 
 4636 // Long Immediate: low 32-bit mask
 4637 operand immL_32bits()
 4638 %{
 4639   predicate(n->get_long() == 0xFFFFFFFFL);
 4640   match(ConL);
 4641   op_cost(0);
 4642   format %{ %}
 4643   interface(CONST_INTER);
 4644 %}
 4645 
 4646 // Pointer operands
 4647 // Pointer Immediate
 4648 operand immP()
 4649 %{
 4650   match(ConP);
 4651 
 4652   op_cost(0);
 4653   format %{ %}
 4654   interface(CONST_INTER);
 4655 %}
 4656 
 4657 // NULL Pointer Immediate
 4658 operand immP0()
 4659 %{
 4660   predicate(n->get_ptr() == 0);
 4661   match(ConP);
 4662 
 4663   op_cost(0);
 4664   format %{ %}
 4665   interface(CONST_INTER);
 4666 %}
 4667 
 4668 // Pointer Immediate One
 4669 // this is used in object initialization (initial object header)
 4670 operand immP_1()
 4671 %{
 4672   predicate(n->get_ptr() == 1);
 4673   match(ConP);
 4674 
 4675   op_cost(0);
 4676   format %{ %}
 4677   interface(CONST_INTER);
 4678 %}
 4679 
 4680 // Card Table Byte Map Base
 4681 operand immByteMapBase()
 4682 %{
 4683   // Get base of card map
 4684   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
 4685             (CardTable::CardValue*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
 4686   match(ConP);
 4687 
 4688   op_cost(0);
 4689   format %{ %}
 4690   interface(CONST_INTER);
 4691 %}
 4692 
 4693 // Pointer Immediate Minus One
 4694 // this is used when we want to write the current PC to the thread anchor
 4695 operand immP_M1()
 4696 %{
 4697   predicate(n->get_ptr() == -1);
 4698   match(ConP);
 4699 
 4700   op_cost(0);
 4701   format %{ %}
 4702   interface(CONST_INTER);
 4703 %}
 4704 
 4705 // Pointer Immediate Minus Two
 4706 // this is used when we want to write the current PC to the thread anchor
 4707 operand immP_M2()
 4708 %{
 4709   predicate(n->get_ptr() == -2);
 4710   match(ConP);
 4711 
 4712   op_cost(0);
 4713   format %{ %}
 4714   interface(CONST_INTER);
 4715 %}
 4716 
 4717 // Float and Double operands
 4718 // Double Immediate
 4719 operand immD()
 4720 %{
 4721   match(ConD);
 4722   op_cost(0);
 4723   format %{ %}
 4724   interface(CONST_INTER);
 4725 %}
 4726 
 4727 // Double Immediate: +0.0d
 4728 operand immD0()
 4729 %{
 4730   predicate(jlong_cast(n->getd()) == 0);
 4731   match(ConD);
 4732 
 4733   op_cost(0);
 4734   format %{ %}
 4735   interface(CONST_INTER);
 4736 %}
 4737 
 4738 // constant 'double +0.0'.
 4739 operand immDPacked()
 4740 %{
 4741   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
 4742   match(ConD);
 4743   op_cost(0);
 4744   format %{ %}
 4745   interface(CONST_INTER);
 4746 %}
 4747 
 4748 // Float Immediate
 4749 operand immF()
 4750 %{
 4751   match(ConF);
 4752   op_cost(0);
 4753   format %{ %}
 4754   interface(CONST_INTER);
 4755 %}
 4756 
 4757 // Float Immediate: +0.0f.
 4758 operand immF0()
 4759 %{
 4760   predicate(jint_cast(n->getf()) == 0);
 4761   match(ConF);
 4762 
 4763   op_cost(0);
 4764   format %{ %}
 4765   interface(CONST_INTER);
 4766 %}
 4767 
 4768 //
 4769 operand immFPacked()
 4770 %{
 4771   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
 4772   match(ConF);
 4773   op_cost(0);
 4774   format %{ %}
 4775   interface(CONST_INTER);
 4776 %}
 4777 
 4778 // Narrow pointer operands
 4779 // Narrow Pointer Immediate
 4780 operand immN()
 4781 %{
 4782   match(ConN);
 4783 
 4784   op_cost(0);
 4785   format %{ %}
 4786   interface(CONST_INTER);
 4787 %}
 4788 
 4789 // Narrow NULL Pointer Immediate
 4790 operand immN0()
 4791 %{
 4792   predicate(n->get_narrowcon() == 0);
 4793   match(ConN);
 4794 
 4795   op_cost(0);
 4796   format %{ %}
 4797   interface(CONST_INTER);
 4798 %}
 4799 
 4800 operand immNKlass()
 4801 %{
 4802   match(ConNKlass);
 4803 
 4804   op_cost(0);
 4805   format %{ %}
 4806   interface(CONST_INTER);
 4807 %}
 4808 
 4809 // Integer 32 bit Register Operands
 4810 // Integer 32 bitRegister (excludes SP)
 4811 operand iRegI()
 4812 %{
 4813   constraint(ALLOC_IN_RC(any_reg32));
 4814   match(RegI);
 4815   match(iRegINoSp);
 4816   op_cost(0);
 4817   format %{ %}
 4818   interface(REG_INTER);
 4819 %}
 4820 
 4821 // Integer 32 bit Register not Special
 4822 operand iRegINoSp()
 4823 %{
 4824   constraint(ALLOC_IN_RC(no_special_reg32));
 4825   match(RegI);
 4826   op_cost(0);
 4827   format %{ %}
 4828   interface(REG_INTER);
 4829 %}
 4830 
 4831 // Integer 64 bit Register Operands
 4832 // Integer 64 bit Register (includes SP)
 4833 operand iRegL()
 4834 %{
 4835   constraint(ALLOC_IN_RC(any_reg));
 4836   match(RegL);
 4837   match(iRegLNoSp);
 4838   op_cost(0);
 4839   format %{ %}
 4840   interface(REG_INTER);
 4841 %}
 4842 
 4843 // Integer 64 bit Register not Special
 4844 operand iRegLNoSp()
 4845 %{
 4846   constraint(ALLOC_IN_RC(no_special_reg));
 4847   match(RegL);
 4848   match(iRegL_R0);
 4849   format %{ %}
 4850   interface(REG_INTER);
 4851 %}
 4852 
 4853 // Pointer Register Operands
 4854 // Pointer Register
 4855 operand iRegP()
 4856 %{
 4857   constraint(ALLOC_IN_RC(ptr_reg));
 4858   match(RegP);
 4859   match(iRegPNoSp);
 4860   match(iRegP_R0);
 4861   //match(iRegP_R2);
 4862   //match(iRegP_R4);
 4863   //match(iRegP_R5);
 4864   match(thread_RegP);
 4865   op_cost(0);
 4866   format %{ %}
 4867   interface(REG_INTER);
 4868 %}
 4869 
 4870 // Pointer 64 bit Register not Special
 4871 operand iRegPNoSp()
 4872 %{
 4873   constraint(ALLOC_IN_RC(no_special_ptr_reg));
 4874   match(RegP);
 4875   // match(iRegP);
 4876   // match(iRegP_R0);
 4877   // match(iRegP_R2);
 4878   // match(iRegP_R4);
 4879   // match(iRegP_R5);
 4880   // match(thread_RegP);
 4881   op_cost(0);
 4882   format %{ %}
 4883   interface(REG_INTER);
 4884 %}
 4885 
 4886 // Pointer 64 bit Register R0 only
 4887 operand iRegP_R0()
 4888 %{
 4889   constraint(ALLOC_IN_RC(r0_reg));
 4890   match(RegP);
 4891   // match(iRegP);
 4892   match(iRegPNoSp);
 4893   op_cost(0);
 4894   format %{ %}
 4895   interface(REG_INTER);
 4896 %}
 4897 
 4898 // Pointer 64 bit Register R1 only
 4899 operand iRegP_R1()
 4900 %{
 4901   constraint(ALLOC_IN_RC(r1_reg));
 4902   match(RegP);
 4903   // match(iRegP);
 4904   match(iRegPNoSp);
 4905   op_cost(0);
 4906   format %{ %}
 4907   interface(REG_INTER);
 4908 %}
 4909 
 4910 // Pointer 64 bit Register R2 only
 4911 operand iRegP_R2()
 4912 %{
 4913   constraint(ALLOC_IN_RC(r2_reg));
 4914   match(RegP);
 4915   // match(iRegP);
 4916   match(iRegPNoSp);
 4917   op_cost(0);
 4918   format %{ %}
 4919   interface(REG_INTER);
 4920 %}
 4921 
 4922 // Pointer 64 bit Register R3 only
 4923 operand iRegP_R3()
 4924 %{
 4925   constraint(ALLOC_IN_RC(r3_reg));
 4926   match(RegP);
 4927   // match(iRegP);
 4928   match(iRegPNoSp);
 4929   op_cost(0);
 4930   format %{ %}
 4931   interface(REG_INTER);
 4932 %}
 4933 
 4934 // Pointer 64 bit Register R4 only
 4935 operand iRegP_R4()
 4936 %{
 4937   constraint(ALLOC_IN_RC(r4_reg));
 4938   match(RegP);
 4939   // match(iRegP);
 4940   match(iRegPNoSp);
 4941   op_cost(0);
 4942   format %{ %}
 4943   interface(REG_INTER);
 4944 %}
 4945 
 4946 // Pointer 64 bit Register R5 only
 4947 operand iRegP_R5()
 4948 %{
 4949   constraint(ALLOC_IN_RC(r5_reg));
 4950   match(RegP);
 4951   // match(iRegP);
 4952   match(iRegPNoSp);
 4953   op_cost(0);
 4954   format %{ %}
 4955   interface(REG_INTER);
 4956 %}
 4957 
 4958 // Pointer 64 bit Register R10 only
 4959 operand iRegP_R10()
 4960 %{
 4961   constraint(ALLOC_IN_RC(r10_reg));
 4962   match(RegP);
 4963   // match(iRegP);
 4964   match(iRegPNoSp);
 4965   op_cost(0);
 4966   format %{ %}
 4967   interface(REG_INTER);
 4968 %}
 4969 
 4970 // Long 64 bit Register R0 only
 4971 operand iRegL_R0()
 4972 %{
 4973   constraint(ALLOC_IN_RC(r0_reg));
 4974   match(RegL);
 4975   match(iRegLNoSp);
 4976   op_cost(0);
 4977   format %{ %}
 4978   interface(REG_INTER);
 4979 %}
 4980 
 4981 // Long 64 bit Register R2 only
 4982 operand iRegL_R2()
 4983 %{
 4984   constraint(ALLOC_IN_RC(r2_reg));
 4985   match(RegL);
 4986   match(iRegLNoSp);
 4987   op_cost(0);
 4988   format %{ %}
 4989   interface(REG_INTER);
 4990 %}
 4991 
 4992 // Long 64 bit Register R3 only
 4993 operand iRegL_R3()
 4994 %{
 4995   constraint(ALLOC_IN_RC(r3_reg));
 4996   match(RegL);
 4997   match(iRegLNoSp);
 4998   op_cost(0);
 4999   format %{ %}
 5000   interface(REG_INTER);
 5001 %}
 5002 
 5003 // Long 64 bit Register R11 only
 5004 operand iRegL_R11()
 5005 %{
 5006   constraint(ALLOC_IN_RC(r11_reg));
 5007   match(RegL);
 5008   match(iRegLNoSp);
 5009   op_cost(0);
 5010   format %{ %}
 5011   interface(REG_INTER);
 5012 %}
 5013 
 5014 // Pointer 64 bit Register FP only
 5015 operand iRegP_FP()
 5016 %{
 5017   constraint(ALLOC_IN_RC(fp_reg));
 5018   match(RegP);
 5019   // match(iRegP);
 5020   op_cost(0);
 5021   format %{ %}
 5022   interface(REG_INTER);
 5023 %}
 5024 
 5025 // Register R0 only
 5026 operand iRegI_R0()
 5027 %{
 5028   constraint(ALLOC_IN_RC(int_r0_reg));
 5029   match(RegI);
 5030   match(iRegINoSp);
 5031   op_cost(0);
 5032   format %{ %}
 5033   interface(REG_INTER);
 5034 %}
 5035 
 5036 // Register R2 only
 5037 operand iRegI_R2()
 5038 %{
 5039   constraint(ALLOC_IN_RC(int_r2_reg));
 5040   match(RegI);
 5041   match(iRegINoSp);
 5042   op_cost(0);
 5043   format %{ %}
 5044   interface(REG_INTER);
 5045 %}
 5046 
 5047 // Register R3 only
 5048 operand iRegI_R3()
 5049 %{
 5050   constraint(ALLOC_IN_RC(int_r3_reg));
 5051   match(RegI);
 5052   match(iRegINoSp);
 5053   op_cost(0);
 5054   format %{ %}
 5055   interface(REG_INTER);
 5056 %}
 5057 
 5058 
 5059 // Register R4 only
 5060 operand iRegI_R4()
 5061 %{
 5062   constraint(ALLOC_IN_RC(int_r4_reg));
 5063   match(RegI);
 5064   match(iRegINoSp);
 5065   op_cost(0);
 5066   format %{ %}
 5067   interface(REG_INTER);
 5068 %}
 5069 
 5070 
 5071 // Pointer Register Operands
 5072 // Narrow Pointer Register
 5073 operand iRegN()
 5074 %{
 5075   constraint(ALLOC_IN_RC(any_reg32));
 5076   match(RegN);
 5077   match(iRegNNoSp);
 5078   op_cost(0);
 5079   format %{ %}
 5080   interface(REG_INTER);
 5081 %}
 5082 
 5083 operand iRegN_R0()
 5084 %{
 5085   constraint(ALLOC_IN_RC(r0_reg));
 5086   match(iRegN);
 5087   op_cost(0);
 5088   format %{ %}
 5089   interface(REG_INTER);
 5090 %}
 5091 
 5092 operand iRegN_R2()
 5093 %{
 5094   constraint(ALLOC_IN_RC(r2_reg));
 5095   match(iRegN);
 5096   op_cost(0);
 5097   format %{ %}
 5098   interface(REG_INTER);
 5099 %}
 5100 
 5101 operand iRegN_R3()
 5102 %{
 5103   constraint(ALLOC_IN_RC(r3_reg));
 5104   match(iRegN);
 5105   op_cost(0);
 5106   format %{ %}
 5107   interface(REG_INTER);
 5108 %}
 5109 
 5110 // Integer 64 bit Register not Special
 5111 operand iRegNNoSp()
 5112 %{
 5113   constraint(ALLOC_IN_RC(no_special_reg32));
 5114   match(RegN);
 5115   op_cost(0);
 5116   format %{ %}
 5117   interface(REG_INTER);
 5118 %}
 5119 
 5120 // heap base register -- used for encoding immN0
 5121 
 5122 operand iRegIHeapbase()
 5123 %{
 5124   constraint(ALLOC_IN_RC(heapbase_reg));
 5125   match(RegI);
 5126   op_cost(0);
 5127   format %{ %}
 5128   interface(REG_INTER);
 5129 %}
 5130 
 5131 // Float Register
 5132 // Float register operands
 5133 operand vRegF()
 5134 %{
 5135   constraint(ALLOC_IN_RC(float_reg));
 5136   match(RegF);
 5137 
 5138   op_cost(0);
 5139   format %{ %}
 5140   interface(REG_INTER);
 5141 %}
 5142 
 5143 // Double Register
 5144 // Double register operands
 5145 operand vRegD()
 5146 %{
 5147   constraint(ALLOC_IN_RC(double_reg));
 5148   match(RegD);
 5149 
 5150   op_cost(0);
 5151   format %{ %}
 5152   interface(REG_INTER);
 5153 %}
 5154 
 5155 operand vecA()
 5156 %{
 5157   constraint(ALLOC_IN_RC(vectora_reg));
 5158   match(VecA);
 5159   op_cost(0);
 5160   format %{ %}
 5161   interface(REG_INTER);
 5162 %}
 5163 
 5164 operand vecD()
 5165 %{
 5166   constraint(ALLOC_IN_RC(vectord_reg));
 5167   match(VecD);
 5168 
 5169   op_cost(0);
 5170   format %{ %}
 5171   interface(REG_INTER);
 5172 %}
 5173 
 5174 operand vecX()
 5175 %{
 5176   constraint(ALLOC_IN_RC(vectorx_reg));
 5177   match(VecX);
 5178 
 5179   op_cost(0);
 5180   format %{ %}
 5181   interface(REG_INTER);
 5182 %}
 5183 
 5184 operand vRegD_V0()
 5185 %{
 5186   constraint(ALLOC_IN_RC(v0_reg));
 5187   match(RegD);
 5188   op_cost(0);
 5189   format %{ %}
 5190   interface(REG_INTER);
 5191 %}
 5192 
 5193 operand vRegD_V1()
 5194 %{
 5195   constraint(ALLOC_IN_RC(v1_reg));
 5196   match(RegD);
 5197   op_cost(0);
 5198   format %{ %}
 5199   interface(REG_INTER);
 5200 %}
 5201 
 5202 operand vRegD_V2()
 5203 %{
 5204   constraint(ALLOC_IN_RC(v2_reg));
 5205   match(RegD);
 5206   op_cost(0);
 5207   format %{ %}
 5208   interface(REG_INTER);
 5209 %}
 5210 
 5211 operand vRegD_V3()
 5212 %{
 5213   constraint(ALLOC_IN_RC(v3_reg));
 5214   match(RegD);
 5215   op_cost(0);
 5216   format %{ %}
 5217   interface(REG_INTER);
 5218 %}
 5219 
 5220 operand vRegD_V4()
 5221 %{
 5222   constraint(ALLOC_IN_RC(v4_reg));
 5223   match(RegD);
 5224   op_cost(0);
 5225   format %{ %}
 5226   interface(REG_INTER);
 5227 %}
 5228 
 5229 operand vRegD_V5()
 5230 %{
 5231   constraint(ALLOC_IN_RC(v5_reg));
 5232   match(RegD);
 5233   op_cost(0);
 5234   format %{ %}
 5235   interface(REG_INTER);
 5236 %}
 5237 
 5238 operand vRegD_V6()
 5239 %{
 5240   constraint(ALLOC_IN_RC(v6_reg));
 5241   match(RegD);
 5242   op_cost(0);
 5243   format %{ %}
 5244   interface(REG_INTER);
 5245 %}
 5246 
 5247 operand vRegD_V7()
 5248 %{
 5249   constraint(ALLOC_IN_RC(v7_reg));
 5250   match(RegD);
 5251   op_cost(0);
 5252   format %{ %}
 5253   interface(REG_INTER);
 5254 %}
 5255 
 5256 operand vRegD_V8()
 5257 %{
 5258   constraint(ALLOC_IN_RC(v8_reg));
 5259   match(RegD);
 5260   op_cost(0);
 5261   format %{ %}
 5262   interface(REG_INTER);
 5263 %}
 5264 
 5265 operand vRegD_V9()
 5266 %{
 5267   constraint(ALLOC_IN_RC(v9_reg));
 5268   match(RegD);
 5269   op_cost(0);
 5270   format %{ %}
 5271   interface(REG_INTER);
 5272 %}
 5273 
 5274 operand vRegD_V10()
 5275 %{
 5276   constraint(ALLOC_IN_RC(v10_reg));
 5277   match(RegD);
 5278   op_cost(0);
 5279   format %{ %}
 5280   interface(REG_INTER);
 5281 %}
 5282 
 5283 operand vRegD_V11()
 5284 %{
 5285   constraint(ALLOC_IN_RC(v11_reg));
 5286   match(RegD);
 5287   op_cost(0);
 5288   format %{ %}
 5289   interface(REG_INTER);
 5290 %}
 5291 
 5292 operand vRegD_V12()
 5293 %{
 5294   constraint(ALLOC_IN_RC(v12_reg));
 5295   match(RegD);
 5296   op_cost(0);
 5297   format %{ %}
 5298   interface(REG_INTER);
 5299 %}
 5300 
 5301 operand vRegD_V13()
 5302 %{
 5303   constraint(ALLOC_IN_RC(v13_reg));
 5304   match(RegD);
 5305   op_cost(0);
 5306   format %{ %}
 5307   interface(REG_INTER);
 5308 %}
 5309 
 5310 operand vRegD_V14()
 5311 %{
 5312   constraint(ALLOC_IN_RC(v14_reg));
 5313   match(RegD);
 5314   op_cost(0);
 5315   format %{ %}
 5316   interface(REG_INTER);
 5317 %}
 5318 
 5319 operand vRegD_V15()
 5320 %{
 5321   constraint(ALLOC_IN_RC(v15_reg));
 5322   match(RegD);
 5323   op_cost(0);
 5324   format %{ %}
 5325   interface(REG_INTER);
 5326 %}
 5327 
 5328 operand vRegD_V16()
 5329 %{
 5330   constraint(ALLOC_IN_RC(v16_reg));
 5331   match(RegD);
 5332   op_cost(0);
 5333   format %{ %}
 5334   interface(REG_INTER);
 5335 %}
 5336 
 5337 operand vRegD_V17()
 5338 %{
 5339   constraint(ALLOC_IN_RC(v17_reg));
 5340   match(RegD);
 5341   op_cost(0);
 5342   format %{ %}
 5343   interface(REG_INTER);
 5344 %}
 5345 
 5346 operand vRegD_V18()
 5347 %{
 5348   constraint(ALLOC_IN_RC(v18_reg));
 5349   match(RegD);
 5350   op_cost(0);
 5351   format %{ %}
 5352   interface(REG_INTER);
 5353 %}
 5354 
 5355 operand vRegD_V19()
 5356 %{
 5357   constraint(ALLOC_IN_RC(v19_reg));
 5358   match(RegD);
 5359   op_cost(0);
 5360   format %{ %}
 5361   interface(REG_INTER);
 5362 %}
 5363 
 5364 operand vRegD_V20()
 5365 %{
 5366   constraint(ALLOC_IN_RC(v20_reg));
 5367   match(RegD);
 5368   op_cost(0);
 5369   format %{ %}
 5370   interface(REG_INTER);
 5371 %}
 5372 
 5373 operand vRegD_V21()
 5374 %{
 5375   constraint(ALLOC_IN_RC(v21_reg));
 5376   match(RegD);
 5377   op_cost(0);
 5378   format %{ %}
 5379   interface(REG_INTER);
 5380 %}
 5381 
 5382 operand vRegD_V22()
 5383 %{
 5384   constraint(ALLOC_IN_RC(v22_reg));
 5385   match(RegD);
 5386   op_cost(0);
 5387   format %{ %}
 5388   interface(REG_INTER);
 5389 %}
 5390 
 5391 operand vRegD_V23()
 5392 %{
 5393   constraint(ALLOC_IN_RC(v23_reg));
 5394   match(RegD);
 5395   op_cost(0);
 5396   format %{ %}
 5397   interface(REG_INTER);
 5398 %}
 5399 
 5400 operand vRegD_V24()
 5401 %{
 5402   constraint(ALLOC_IN_RC(v24_reg));
 5403   match(RegD);
 5404   op_cost(0);
 5405   format %{ %}
 5406   interface(REG_INTER);
 5407 %}
 5408 
 5409 operand vRegD_V25()
 5410 %{
 5411   constraint(ALLOC_IN_RC(v25_reg));
 5412   match(RegD);
 5413   op_cost(0);
 5414   format %{ %}
 5415   interface(REG_INTER);
 5416 %}
 5417 
 5418 operand vRegD_V26()
 5419 %{
 5420   constraint(ALLOC_IN_RC(v26_reg));
 5421   match(RegD);
 5422   op_cost(0);
 5423   format %{ %}
 5424   interface(REG_INTER);
 5425 %}
 5426 
 5427 operand vRegD_V27()
 5428 %{
 5429   constraint(ALLOC_IN_RC(v27_reg));
 5430   match(RegD);
 5431   op_cost(0);
 5432   format %{ %}
 5433   interface(REG_INTER);
 5434 %}
 5435 
 5436 operand vRegD_V28()
 5437 %{
 5438   constraint(ALLOC_IN_RC(v28_reg));
 5439   match(RegD);
 5440   op_cost(0);
 5441   format %{ %}
 5442   interface(REG_INTER);
 5443 %}
 5444 
 5445 operand vRegD_V29()
 5446 %{
 5447   constraint(ALLOC_IN_RC(v29_reg));
 5448   match(RegD);
 5449   op_cost(0);
 5450   format %{ %}
 5451   interface(REG_INTER);
 5452 %}
 5453 
 5454 operand vRegD_V30()
 5455 %{
 5456   constraint(ALLOC_IN_RC(v30_reg));
 5457   match(RegD);
 5458   op_cost(0);
 5459   format %{ %}
 5460   interface(REG_INTER);
 5461 %}
 5462 
 5463 operand vRegD_V31()
 5464 %{
 5465   constraint(ALLOC_IN_RC(v31_reg));
 5466   match(RegD);
 5467   op_cost(0);
 5468   format %{ %}
 5469   interface(REG_INTER);
 5470 %}
 5471 
 5472 operand pRegGov()
 5473 %{
 5474   constraint(ALLOC_IN_RC(gov_pr));
 5475   match(RegVMask);
 5476   op_cost(0);
 5477   format %{ %}
 5478   interface(REG_INTER);
 5479 %}
 5480 
 5481 // Flags register, used as output of signed compare instructions
 5482 
 5483 // note that on AArch64 we also use this register as the output for
 5484 // for floating point compare instructions (CmpF CmpD). this ensures
 5485 // that ordered inequality tests use GT, GE, LT or LE none of which
 5486 // pass through cases where the result is unordered i.e. one or both
 5487 // inputs to the compare is a NaN. this means that the ideal code can
 5488 // replace e.g. a GT with an LE and not end up capturing the NaN case
 5489 // (where the comparison should always fail). EQ and NE tests are
 5490 // always generated in ideal code so that unordered folds into the NE
 5491 // case, matching the behaviour of AArch64 NE.
 5492 //
 5493 // This differs from x86 where the outputs of FP compares use a
 5494 // special FP flags registers and where compares based on this
 5495 // register are distinguished into ordered inequalities (cmpOpUCF) and
 5496 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
 5497 // to explicitly handle the unordered case in branches. x86 also has
 5498 // to include extra CMoveX rules to accept a cmpOpUCF input.
 5499 
 5500 operand rFlagsReg()
 5501 %{
 5502   constraint(ALLOC_IN_RC(int_flags));
 5503   match(RegFlags);
 5504 
 5505   op_cost(0);
 5506   format %{ "RFLAGS" %}
 5507   interface(REG_INTER);
 5508 %}
 5509 
 5510 // Flags register, used as output of unsigned compare instructions
 5511 operand rFlagsRegU()
 5512 %{
 5513   constraint(ALLOC_IN_RC(int_flags));
 5514   match(RegFlags);
 5515 
 5516   op_cost(0);
 5517   format %{ "RFLAGSU" %}
 5518   interface(REG_INTER);
 5519 %}
 5520 
 5521 // Special Registers
 5522 
 5523 // Method Register
 5524 operand inline_cache_RegP(iRegP reg)
 5525 %{
 5526   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
 5527   match(reg);
 5528   match(iRegPNoSp);
 5529   op_cost(0);
 5530   format %{ %}
 5531   interface(REG_INTER);
 5532 %}
 5533 
 5534 operand interpreter_method_oop_RegP(iRegP reg)
 5535 %{
 5536   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
 5537   match(reg);
 5538   match(iRegPNoSp);
 5539   op_cost(0);
 5540   format %{ %}
 5541   interface(REG_INTER);
 5542 %}
 5543 
 5544 // Thread Register
 5545 operand thread_RegP(iRegP reg)
 5546 %{
 5547   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
 5548   match(reg);
 5549   op_cost(0);
 5550   format %{ %}
 5551   interface(REG_INTER);
 5552 %}
 5553 
 5554 operand lr_RegP(iRegP reg)
 5555 %{
 5556   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
 5557   match(reg);
 5558   op_cost(0);
 5559   format %{ %}
 5560   interface(REG_INTER);
 5561 %}
 5562 
 5563 //----------Memory Operands----------------------------------------------------
 5564 
 5565 operand indirect(iRegP reg)
 5566 %{
 5567   constraint(ALLOC_IN_RC(ptr_reg));
 5568   match(reg);
 5569   op_cost(0);
 5570   format %{ "[$reg]" %}
 5571   interface(MEMORY_INTER) %{
 5572     base($reg);
 5573     index(0xffffffff);
 5574     scale(0x0);
 5575     disp(0x0);
 5576   %}
 5577 %}
 5578 
 5579 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
 5580 %{
 5581   constraint(ALLOC_IN_RC(ptr_reg));
 5582   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5583   match(AddP reg (LShiftL (ConvI2L ireg) scale));
 5584   op_cost(0);
 5585   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
 5586   interface(MEMORY_INTER) %{
 5587     base($reg);
 5588     index($ireg);
 5589     scale($scale);
 5590     disp(0x0);
 5591   %}
 5592 %}
 5593 
 5594 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
 5595 %{
 5596   constraint(ALLOC_IN_RC(ptr_reg));
 5597   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5598   match(AddP reg (LShiftL lreg scale));
 5599   op_cost(0);
 5600   format %{ "$reg, $lreg lsl($scale)" %}
 5601   interface(MEMORY_INTER) %{
 5602     base($reg);
 5603     index($lreg);
 5604     scale($scale);
 5605     disp(0x0);
 5606   %}
 5607 %}
 5608 
 5609 operand indIndexI2L(iRegP reg, iRegI ireg)
 5610 %{
 5611   constraint(ALLOC_IN_RC(ptr_reg));
 5612   match(AddP reg (ConvI2L ireg));
 5613   op_cost(0);
 5614   format %{ "$reg, $ireg, 0, I2L" %}
 5615   interface(MEMORY_INTER) %{
 5616     base($reg);
 5617     index($ireg);
 5618     scale(0x0);
 5619     disp(0x0);
 5620   %}
 5621 %}
 5622 
 5623 operand indIndex(iRegP reg, iRegL lreg)
 5624 %{
 5625   constraint(ALLOC_IN_RC(ptr_reg));
 5626   match(AddP reg lreg);
 5627   op_cost(0);
 5628   format %{ "$reg, $lreg" %}
 5629   interface(MEMORY_INTER) %{
 5630     base($reg);
 5631     index($lreg);
 5632     scale(0x0);
 5633     disp(0x0);
 5634   %}
 5635 %}
 5636 
 5637 operand indOffI(iRegP reg, immIOffset off)
 5638 %{
 5639   constraint(ALLOC_IN_RC(ptr_reg));
 5640   match(AddP reg off);
 5641   op_cost(0);
 5642   format %{ "[$reg, $off]" %}
 5643   interface(MEMORY_INTER) %{
 5644     base($reg);
 5645     index(0xffffffff);
 5646     scale(0x0);
 5647     disp($off);
 5648   %}
 5649 %}
 5650 
 5651 operand indOffI1(iRegP reg, immIOffset1 off)
 5652 %{
 5653   constraint(ALLOC_IN_RC(ptr_reg));
 5654   match(AddP reg off);
 5655   op_cost(0);
 5656   format %{ "[$reg, $off]" %}
 5657   interface(MEMORY_INTER) %{
 5658     base($reg);
 5659     index(0xffffffff);
 5660     scale(0x0);
 5661     disp($off);
 5662   %}
 5663 %}
 5664 
 5665 operand indOffI2(iRegP reg, immIOffset2 off)
 5666 %{
 5667   constraint(ALLOC_IN_RC(ptr_reg));
 5668   match(AddP reg off);
 5669   op_cost(0);
 5670   format %{ "[$reg, $off]" %}
 5671   interface(MEMORY_INTER) %{
 5672     base($reg);
 5673     index(0xffffffff);
 5674     scale(0x0);
 5675     disp($off);
 5676   %}
 5677 %}
 5678 
 5679 operand indOffI4(iRegP reg, immIOffset4 off)
 5680 %{
 5681   constraint(ALLOC_IN_RC(ptr_reg));
 5682   match(AddP reg off);
 5683   op_cost(0);
 5684   format %{ "[$reg, $off]" %}
 5685   interface(MEMORY_INTER) %{
 5686     base($reg);
 5687     index(0xffffffff);
 5688     scale(0x0);
 5689     disp($off);
 5690   %}
 5691 %}
 5692 
 5693 operand indOffI8(iRegP reg, immIOffset8 off)
 5694 %{
 5695   constraint(ALLOC_IN_RC(ptr_reg));
 5696   match(AddP reg off);
 5697   op_cost(0);
 5698   format %{ "[$reg, $off]" %}
 5699   interface(MEMORY_INTER) %{
 5700     base($reg);
 5701     index(0xffffffff);
 5702     scale(0x0);
 5703     disp($off);
 5704   %}
 5705 %}
 5706 
 5707 operand indOffI16(iRegP reg, immIOffset16 off)
 5708 %{
 5709   constraint(ALLOC_IN_RC(ptr_reg));
 5710   match(AddP reg off);
 5711   op_cost(0);
 5712   format %{ "[$reg, $off]" %}
 5713   interface(MEMORY_INTER) %{
 5714     base($reg);
 5715     index(0xffffffff);
 5716     scale(0x0);
 5717     disp($off);
 5718   %}
 5719 %}
 5720 
 5721 operand indOffL(iRegP reg, immLoffset off)
 5722 %{
 5723   constraint(ALLOC_IN_RC(ptr_reg));
 5724   match(AddP reg off);
 5725   op_cost(0);
 5726   format %{ "[$reg, $off]" %}
 5727   interface(MEMORY_INTER) %{
 5728     base($reg);
 5729     index(0xffffffff);
 5730     scale(0x0);
 5731     disp($off);
 5732   %}
 5733 %}
 5734 
 5735 operand indOffL1(iRegP reg, immLoffset1 off)
 5736 %{
 5737   constraint(ALLOC_IN_RC(ptr_reg));
 5738   match(AddP reg off);
 5739   op_cost(0);
 5740   format %{ "[$reg, $off]" %}
 5741   interface(MEMORY_INTER) %{
 5742     base($reg);
 5743     index(0xffffffff);
 5744     scale(0x0);
 5745     disp($off);
 5746   %}
 5747 %}
 5748 
 5749 operand indOffL2(iRegP reg, immLoffset2 off)
 5750 %{
 5751   constraint(ALLOC_IN_RC(ptr_reg));
 5752   match(AddP reg off);
 5753   op_cost(0);
 5754   format %{ "[$reg, $off]" %}
 5755   interface(MEMORY_INTER) %{
 5756     base($reg);
 5757     index(0xffffffff);
 5758     scale(0x0);
 5759     disp($off);
 5760   %}
 5761 %}
 5762 
 5763 operand indOffL4(iRegP reg, immLoffset4 off)
 5764 %{
 5765   constraint(ALLOC_IN_RC(ptr_reg));
 5766   match(AddP reg off);
 5767   op_cost(0);
 5768   format %{ "[$reg, $off]" %}
 5769   interface(MEMORY_INTER) %{
 5770     base($reg);
 5771     index(0xffffffff);
 5772     scale(0x0);
 5773     disp($off);
 5774   %}
 5775 %}
 5776 
 5777 operand indOffL8(iRegP reg, immLoffset8 off)
 5778 %{
 5779   constraint(ALLOC_IN_RC(ptr_reg));
 5780   match(AddP reg off);
 5781   op_cost(0);
 5782   format %{ "[$reg, $off]" %}
 5783   interface(MEMORY_INTER) %{
 5784     base($reg);
 5785     index(0xffffffff);
 5786     scale(0x0);
 5787     disp($off);
 5788   %}
 5789 %}
 5790 
 5791 operand indOffL16(iRegP reg, immLoffset16 off)
 5792 %{
 5793   constraint(ALLOC_IN_RC(ptr_reg));
 5794   match(AddP reg off);
 5795   op_cost(0);
 5796   format %{ "[$reg, $off]" %}
 5797   interface(MEMORY_INTER) %{
 5798     base($reg);
 5799     index(0xffffffff);
 5800     scale(0x0);
 5801     disp($off);
 5802   %}
 5803 %}
 5804 
 5805 operand indirectN(iRegN reg)
 5806 %{
 5807   predicate(CompressedOops::shift() == 0);
 5808   constraint(ALLOC_IN_RC(ptr_reg));
 5809   match(DecodeN reg);
 5810   op_cost(0);
 5811   format %{ "[$reg]\t# narrow" %}
 5812   interface(MEMORY_INTER) %{
 5813     base($reg);
 5814     index(0xffffffff);
 5815     scale(0x0);
 5816     disp(0x0);
 5817   %}
 5818 %}
 5819 
 5820 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
 5821 %{
 5822   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5823   constraint(ALLOC_IN_RC(ptr_reg));
 5824   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
 5825   op_cost(0);
 5826   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
 5827   interface(MEMORY_INTER) %{
 5828     base($reg);
 5829     index($ireg);
 5830     scale($scale);
 5831     disp(0x0);
 5832   %}
 5833 %}
 5834 
 5835 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
 5836 %{
 5837   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5838   constraint(ALLOC_IN_RC(ptr_reg));
 5839   match(AddP (DecodeN reg) (LShiftL lreg scale));
 5840   op_cost(0);
 5841   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
 5842   interface(MEMORY_INTER) %{
 5843     base($reg);
 5844     index($lreg);
 5845     scale($scale);
 5846     disp(0x0);
 5847   %}
 5848 %}
 5849 
 5850 operand indIndexI2LN(iRegN reg, iRegI ireg)
 5851 %{
 5852   predicate(CompressedOops::shift() == 0);
 5853   constraint(ALLOC_IN_RC(ptr_reg));
 5854   match(AddP (DecodeN reg) (ConvI2L ireg));
 5855   op_cost(0);
 5856   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
 5857   interface(MEMORY_INTER) %{
 5858     base($reg);
 5859     index($ireg);
 5860     scale(0x0);
 5861     disp(0x0);
 5862   %}
 5863 %}
 5864 
 5865 operand indIndexN(iRegN reg, iRegL lreg)
 5866 %{
 5867   predicate(CompressedOops::shift() == 0);
 5868   constraint(ALLOC_IN_RC(ptr_reg));
 5869   match(AddP (DecodeN reg) lreg);
 5870   op_cost(0);
 5871   format %{ "$reg, $lreg\t# narrow" %}
 5872   interface(MEMORY_INTER) %{
 5873     base($reg);
 5874     index($lreg);
 5875     scale(0x0);
 5876     disp(0x0);
 5877   %}
 5878 %}
 5879 
 5880 operand indOffIN(iRegN reg, immIOffset off)
 5881 %{
 5882   predicate(CompressedOops::shift() == 0);
 5883   constraint(ALLOC_IN_RC(ptr_reg));
 5884   match(AddP (DecodeN reg) off);
 5885   op_cost(0);
 5886   format %{ "[$reg, $off]\t# narrow" %}
 5887   interface(MEMORY_INTER) %{
 5888     base($reg);
 5889     index(0xffffffff);
 5890     scale(0x0);
 5891     disp($off);
 5892   %}
 5893 %}
 5894 
 5895 operand indOffLN(iRegN reg, immLoffset off)
 5896 %{
 5897   predicate(CompressedOops::shift() == 0);
 5898   constraint(ALLOC_IN_RC(ptr_reg));
 5899   match(AddP (DecodeN reg) off);
 5900   op_cost(0);
 5901   format %{ "[$reg, $off]\t# narrow" %}
 5902   interface(MEMORY_INTER) %{
 5903     base($reg);
 5904     index(0xffffffff);
 5905     scale(0x0);
 5906     disp($off);
 5907   %}
 5908 %}
 5909 
 5910 
 5911 
 5912 // AArch64 opto stubs need to write to the pc slot in the thread anchor
 5913 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
 5914 %{
 5915   constraint(ALLOC_IN_RC(ptr_reg));
 5916   match(AddP reg off);
 5917   op_cost(0);
 5918   format %{ "[$reg, $off]" %}
 5919   interface(MEMORY_INTER) %{
 5920     base($reg);
 5921     index(0xffffffff);
 5922     scale(0x0);
 5923     disp($off);
 5924   %}
 5925 %}
 5926 
 5927 //----------Special Memory Operands--------------------------------------------
 5928 // Stack Slot Operand - This operand is used for loading and storing temporary
 5929 //                      values on the stack where a match requires a value to
 5930 //                      flow through memory.
 5931 operand stackSlotP(sRegP reg)
 5932 %{
 5933   constraint(ALLOC_IN_RC(stack_slots));
 5934   op_cost(100);
 5935   // No match rule because this operand is only generated in matching
 5936   // match(RegP);
 5937   format %{ "[$reg]" %}
 5938   interface(MEMORY_INTER) %{
 5939     base(0x1e);  // RSP
 5940     index(0x0);  // No Index
 5941     scale(0x0);  // No Scale
 5942     disp($reg);  // Stack Offset
 5943   %}
 5944 %}
 5945 
 5946 operand stackSlotI(sRegI reg)
 5947 %{
 5948   constraint(ALLOC_IN_RC(stack_slots));
 5949   // No match rule because this operand is only generated in matching
 5950   // match(RegI);
 5951   format %{ "[$reg]" %}
 5952   interface(MEMORY_INTER) %{
 5953     base(0x1e);  // RSP
 5954     index(0x0);  // No Index
 5955     scale(0x0);  // No Scale
 5956     disp($reg);  // Stack Offset
 5957   %}
 5958 %}
 5959 
 5960 operand stackSlotF(sRegF reg)
 5961 %{
 5962   constraint(ALLOC_IN_RC(stack_slots));
 5963   // No match rule because this operand is only generated in matching
 5964   // match(RegF);
 5965   format %{ "[$reg]" %}
 5966   interface(MEMORY_INTER) %{
 5967     base(0x1e);  // RSP
 5968     index(0x0);  // No Index
 5969     scale(0x0);  // No Scale
 5970     disp($reg);  // Stack Offset
 5971   %}
 5972 %}
 5973 
 5974 operand stackSlotD(sRegD reg)
 5975 %{
 5976   constraint(ALLOC_IN_RC(stack_slots));
 5977   // No match rule because this operand is only generated in matching
 5978   // match(RegD);
 5979   format %{ "[$reg]" %}
 5980   interface(MEMORY_INTER) %{
 5981     base(0x1e);  // RSP
 5982     index(0x0);  // No Index
 5983     scale(0x0);  // No Scale
 5984     disp($reg);  // Stack Offset
 5985   %}
 5986 %}
 5987 
 5988 operand stackSlotL(sRegL reg)
 5989 %{
 5990   constraint(ALLOC_IN_RC(stack_slots));
 5991   // No match rule because this operand is only generated in matching
 5992   // match(RegL);
 5993   format %{ "[$reg]" %}
 5994   interface(MEMORY_INTER) %{
 5995     base(0x1e);  // RSP
 5996     index(0x0);  // No Index
 5997     scale(0x0);  // No Scale
 5998     disp($reg);  // Stack Offset
 5999   %}
 6000 %}
 6001 
 6002 // Operands for expressing Control Flow
 6003 // NOTE: Label is a predefined operand which should not be redefined in
 6004 //       the AD file. It is generically handled within the ADLC.
 6005 
 6006 //----------Conditional Branch Operands----------------------------------------
 6007 // Comparison Op  - This is the operation of the comparison, and is limited to
 6008 //                  the following set of codes:
 6009 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
 6010 //
 6011 // Other attributes of the comparison, such as unsignedness, are specified
 6012 // by the comparison instruction that sets a condition code flags register.
 6013 // That result is represented by a flags operand whose subtype is appropriate
 6014 // to the unsignedness (etc.) of the comparison.
 6015 //
 6016 // Later, the instruction which matches both the Comparison Op (a Bool) and
 6017 // the flags (produced by the Cmp) specifies the coding of the comparison op
 6018 // by matching a specific subtype of Bool operand below, such as cmpOpU.
 6019 
 6020 // used for signed integral comparisons and fp comparisons
 6021 
 6022 operand cmpOp()
 6023 %{
 6024   match(Bool);
 6025 
 6026   format %{ "" %}
 6027   interface(COND_INTER) %{
 6028     equal(0x0, "eq");
 6029     not_equal(0x1, "ne");
 6030     less(0xb, "lt");
 6031     greater_equal(0xa, "ge");
 6032     less_equal(0xd, "le");
 6033     greater(0xc, "gt");
 6034     overflow(0x6, "vs");
 6035     no_overflow(0x7, "vc");
 6036   %}
 6037 %}
 6038 
 6039 // used for unsigned integral comparisons
 6040 
 6041 operand cmpOpU()
 6042 %{
 6043   match(Bool);
 6044 
 6045   format %{ "" %}
 6046   interface(COND_INTER) %{
 6047     equal(0x0, "eq");
 6048     not_equal(0x1, "ne");
 6049     less(0x3, "lo");
 6050     greater_equal(0x2, "hs");
 6051     less_equal(0x9, "ls");
 6052     greater(0x8, "hi");
 6053     overflow(0x6, "vs");
 6054     no_overflow(0x7, "vc");
 6055   %}
 6056 %}
 6057 
 6058 // used for certain integral comparisons which can be
 6059 // converted to cbxx or tbxx instructions
 6060 
 6061 operand cmpOpEqNe()
 6062 %{
 6063   match(Bool);
 6064   op_cost(0);
 6065   predicate(n->as_Bool()->_test._test == BoolTest::ne
 6066             || n->as_Bool()->_test._test == BoolTest::eq);
 6067 
 6068   format %{ "" %}
 6069   interface(COND_INTER) %{
 6070     equal(0x0, "eq");
 6071     not_equal(0x1, "ne");
 6072     less(0xb, "lt");
 6073     greater_equal(0xa, "ge");
 6074     less_equal(0xd, "le");
 6075     greater(0xc, "gt");
 6076     overflow(0x6, "vs");
 6077     no_overflow(0x7, "vc");
 6078   %}
 6079 %}
 6080 
 6081 // used for certain integral comparisons which can be
 6082 // converted to cbxx or tbxx instructions
 6083 
 6084 operand cmpOpLtGe()
 6085 %{
 6086   match(Bool);
 6087   op_cost(0);
 6088 
 6089   predicate(n->as_Bool()->_test._test == BoolTest::lt
 6090             || n->as_Bool()->_test._test == BoolTest::ge);
 6091 
 6092   format %{ "" %}
 6093   interface(COND_INTER) %{
 6094     equal(0x0, "eq");
 6095     not_equal(0x1, "ne");
 6096     less(0xb, "lt");
 6097     greater_equal(0xa, "ge");
 6098     less_equal(0xd, "le");
 6099     greater(0xc, "gt");
 6100     overflow(0x6, "vs");
 6101     no_overflow(0x7, "vc");
 6102   %}
 6103 %}
 6104 
 6105 // used for certain unsigned integral comparisons which can be
 6106 // converted to cbxx or tbxx instructions
 6107 
 6108 operand cmpOpUEqNeLtGe()
 6109 %{
 6110   match(Bool);
 6111   op_cost(0);
 6112 
 6113   predicate(n->as_Bool()->_test._test == BoolTest::eq
 6114             || n->as_Bool()->_test._test == BoolTest::ne
 6115             || n->as_Bool()->_test._test == BoolTest::lt
 6116             || n->as_Bool()->_test._test == BoolTest::ge);
 6117 
 6118   format %{ "" %}
 6119   interface(COND_INTER) %{
 6120     equal(0x0, "eq");
 6121     not_equal(0x1, "ne");
 6122     less(0xb, "lt");
 6123     greater_equal(0xa, "ge");
 6124     less_equal(0xd, "le");
 6125     greater(0xc, "gt");
 6126     overflow(0x6, "vs");
 6127     no_overflow(0x7, "vc");
 6128   %}
 6129 %}
 6130 
 6131 // Special operand allowing long args to int ops to be truncated for free
 6132 
 6133 operand iRegL2I(iRegL reg) %{
 6134 
 6135   op_cost(0);
 6136 
 6137   match(ConvL2I reg);
 6138 
 6139   format %{ "l2i($reg)" %}
 6140 
 6141   interface(REG_INTER)
 6142 %}
 6143 
 6144 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
 6145 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
 6146 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
 6147 
 6148 //----------OPERAND CLASSES----------------------------------------------------
 6149 // Operand Classes are groups of operands that are used as to simplify
 6150 // instruction definitions by not requiring the AD writer to specify
 6151 // separate instructions for every form of operand when the
 6152 // instruction accepts multiple operand types with the same basic
 6153 // encoding and format. The classic case of this is memory operands.
 6154 
 6155 // memory is used to define read/write location for load/store
 6156 // instruction defs. we can turn a memory op into an Address
 6157 
 6158 opclass memory1(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI1, indOffL1,
 6159                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN);
 6160 
 6161 opclass memory2(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI2, indOffL2,
 6162                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN);
 6163 
 6164 opclass memory4(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI4, indOffL4,
 6165                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
 6166 
 6167 opclass memory8(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI8, indOffL8,
 6168                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
 6169 
 6170 // All of the memory operands. For the pipeline description.
 6171 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex,
 6172                indOffI1, indOffL1, indOffI2, indOffL2, indOffI4, indOffL4, indOffI8, indOffL8,
 6173                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
 6174 
 6175 
 6176 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
 6177 // operations. it allows the src to be either an iRegI or a (ConvL2I
 6178 // iRegL). in the latter case the l2i normally planted for a ConvL2I
 6179 // can be elided because the 32-bit instruction will just employ the
 6180 // lower 32 bits anyway.
 6181 //
 6182 // n.b. this does not elide all L2I conversions. if the truncated
 6183 // value is consumed by more than one operation then the ConvL2I
 6184 // cannot be bundled into the consuming nodes so an l2i gets planted
 6185 // (actually a movw $dst $src) and the downstream instructions consume
 6186 // the result of the l2i as an iRegI input. That's a shame since the
 6187 // movw is actually redundant but its not too costly.
 6188 
 6189 opclass iRegIorL2I(iRegI, iRegL2I);
 6190 
 6191 //----------PIPELINE-----------------------------------------------------------
 6192 // Rules which define the behavior of the target architectures pipeline.
 6193 
 6194 // For specific pipelines, eg A53, define the stages of that pipeline
 6195 //pipe_desc(ISS, EX1, EX2, WR);
 6196 #define ISS S0
 6197 #define EX1 S1
 6198 #define EX2 S2
 6199 #define WR  S3
 6200 
 6201 // Integer ALU reg operation
 6202 pipeline %{
 6203 
 6204 attributes %{
 6205   // ARM instructions are of fixed length
 6206   fixed_size_instructions;        // Fixed size instructions TODO does
 6207   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
 6208   // ARM instructions come in 32-bit word units
 6209   instruction_unit_size = 4;         // An instruction is 4 bytes long
 6210   instruction_fetch_unit_size = 64;  // The processor fetches one line
 6211   instruction_fetch_units = 1;       // of 64 bytes
 6212 
 6213   // List of nop instructions
 6214   nops( MachNop );
 6215 %}
 6216 
 6217 // We don't use an actual pipeline model so don't care about resources
 6218 // or description. we do use pipeline classes to introduce fixed
 6219 // latencies
 6220 
 6221 //----------RESOURCES----------------------------------------------------------
 6222 // Resources are the functional units available to the machine
 6223 
 6224 resources( INS0, INS1, INS01 = INS0 | INS1,
 6225            ALU0, ALU1, ALU = ALU0 | ALU1,
 6226            MAC,
 6227            DIV,
 6228            BRANCH,
 6229            LDST,
 6230            NEON_FP);
 6231 
 6232 //----------PIPELINE DESCRIPTION-----------------------------------------------
 6233 // Pipeline Description specifies the stages in the machine's pipeline
 6234 
 6235 // Define the pipeline as a generic 6 stage pipeline
 6236 pipe_desc(S0, S1, S2, S3, S4, S5);
 6237 
 6238 //----------PIPELINE CLASSES---------------------------------------------------
 6239 // Pipeline Classes describe the stages in which input and output are
 6240 // referenced by the hardware pipeline.
 6241 
 6242 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
 6243 %{
 6244   single_instruction;
 6245   src1   : S1(read);
 6246   src2   : S2(read);
 6247   dst    : S5(write);
 6248   INS01  : ISS;
 6249   NEON_FP : S5;
 6250 %}
 6251 
 6252 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
 6253 %{
 6254   single_instruction;
 6255   src1   : S1(read);
 6256   src2   : S2(read);
 6257   dst    : S5(write);
 6258   INS01  : ISS;
 6259   NEON_FP : S5;
 6260 %}
 6261 
 6262 pipe_class fp_uop_s(vRegF dst, vRegF src)
 6263 %{
 6264   single_instruction;
 6265   src    : S1(read);
 6266   dst    : S5(write);
 6267   INS01  : ISS;
 6268   NEON_FP : S5;
 6269 %}
 6270 
 6271 pipe_class fp_uop_d(vRegD dst, vRegD src)
 6272 %{
 6273   single_instruction;
 6274   src    : S1(read);
 6275   dst    : S5(write);
 6276   INS01  : ISS;
 6277   NEON_FP : S5;
 6278 %}
 6279 
 6280 pipe_class fp_d2f(vRegF dst, vRegD src)
 6281 %{
 6282   single_instruction;
 6283   src    : S1(read);
 6284   dst    : S5(write);
 6285   INS01  : ISS;
 6286   NEON_FP : S5;
 6287 %}
 6288 
 6289 pipe_class fp_f2d(vRegD dst, vRegF src)
 6290 %{
 6291   single_instruction;
 6292   src    : S1(read);
 6293   dst    : S5(write);
 6294   INS01  : ISS;
 6295   NEON_FP : S5;
 6296 %}
 6297 
 6298 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
 6299 %{
 6300   single_instruction;
 6301   src    : S1(read);
 6302   dst    : S5(write);
 6303   INS01  : ISS;
 6304   NEON_FP : S5;
 6305 %}
 6306 
 6307 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
 6308 %{
 6309   single_instruction;
 6310   src    : S1(read);
 6311   dst    : S5(write);
 6312   INS01  : ISS;
 6313   NEON_FP : S5;
 6314 %}
 6315 
 6316 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
 6317 %{
 6318   single_instruction;
 6319   src    : S1(read);
 6320   dst    : S5(write);
 6321   INS01  : ISS;
 6322   NEON_FP : S5;
 6323 %}
 6324 
 6325 pipe_class fp_l2f(vRegF dst, iRegL src)
 6326 %{
 6327   single_instruction;
 6328   src    : S1(read);
 6329   dst    : S5(write);
 6330   INS01  : ISS;
 6331   NEON_FP : S5;
 6332 %}
 6333 
 6334 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
 6335 %{
 6336   single_instruction;
 6337   src    : S1(read);
 6338   dst    : S5(write);
 6339   INS01  : ISS;
 6340   NEON_FP : S5;
 6341 %}
 6342 
 6343 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
 6344 %{
 6345   single_instruction;
 6346   src    : S1(read);
 6347   dst    : S5(write);
 6348   INS01  : ISS;
 6349   NEON_FP : S5;
 6350 %}
 6351 
 6352 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
 6353 %{
 6354   single_instruction;
 6355   src    : S1(read);
 6356   dst    : S5(write);
 6357   INS01  : ISS;
 6358   NEON_FP : S5;
 6359 %}
 6360 
 6361 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
 6362 %{
 6363   single_instruction;
 6364   src    : S1(read);
 6365   dst    : S5(write);
 6366   INS01  : ISS;
 6367   NEON_FP : S5;
 6368 %}
 6369 
 6370 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
 6371 %{
 6372   single_instruction;
 6373   src1   : S1(read);
 6374   src2   : S2(read);
 6375   dst    : S5(write);
 6376   INS0   : ISS;
 6377   NEON_FP : S5;
 6378 %}
 6379 
 6380 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
 6381 %{
 6382   single_instruction;
 6383   src1   : S1(read);
 6384   src2   : S2(read);
 6385   dst    : S5(write);
 6386   INS0   : ISS;
 6387   NEON_FP : S5;
 6388 %}
 6389 
 6390 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
 6391 %{
 6392   single_instruction;
 6393   cr     : S1(read);
 6394   src1   : S1(read);
 6395   src2   : S1(read);
 6396   dst    : S3(write);
 6397   INS01  : ISS;
 6398   NEON_FP : S3;
 6399 %}
 6400 
 6401 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
 6402 %{
 6403   single_instruction;
 6404   cr     : S1(read);
 6405   src1   : S1(read);
 6406   src2   : S1(read);
 6407   dst    : S3(write);
 6408   INS01  : ISS;
 6409   NEON_FP : S3;
 6410 %}
 6411 
 6412 pipe_class fp_imm_s(vRegF dst)
 6413 %{
 6414   single_instruction;
 6415   dst    : S3(write);
 6416   INS01  : ISS;
 6417   NEON_FP : S3;
 6418 %}
 6419 
 6420 pipe_class fp_imm_d(vRegD dst)
 6421 %{
 6422   single_instruction;
 6423   dst    : S3(write);
 6424   INS01  : ISS;
 6425   NEON_FP : S3;
 6426 %}
 6427 
 6428 pipe_class fp_load_constant_s(vRegF dst)
 6429 %{
 6430   single_instruction;
 6431   dst    : S4(write);
 6432   INS01  : ISS;
 6433   NEON_FP : S4;
 6434 %}
 6435 
 6436 pipe_class fp_load_constant_d(vRegD dst)
 6437 %{
 6438   single_instruction;
 6439   dst    : S4(write);
 6440   INS01  : ISS;
 6441   NEON_FP : S4;
 6442 %}
 6443 
 6444 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
 6445 %{
 6446   single_instruction;
 6447   dst    : S5(write);
 6448   src1   : S1(read);
 6449   src2   : S1(read);
 6450   INS01  : ISS;
 6451   NEON_FP : S5;
 6452 %}
 6453 
 6454 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
 6455 %{
 6456   single_instruction;
 6457   dst    : S5(write);
 6458   src1   : S1(read);
 6459   src2   : S1(read);
 6460   INS0   : ISS;
 6461   NEON_FP : S5;
 6462 %}
 6463 
 6464 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
 6465 %{
 6466   single_instruction;
 6467   dst    : S5(write);
 6468   src1   : S1(read);
 6469   src2   : S1(read);
 6470   dst    : S1(read);
 6471   INS01  : ISS;
 6472   NEON_FP : S5;
 6473 %}
 6474 
 6475 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
 6476 %{
 6477   single_instruction;
 6478   dst    : S5(write);
 6479   src1   : S1(read);
 6480   src2   : S1(read);
 6481   dst    : S1(read);
 6482   INS0   : ISS;
 6483   NEON_FP : S5;
 6484 %}
 6485 
 6486 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
 6487 %{
 6488   single_instruction;
 6489   dst    : S4(write);
 6490   src1   : S2(read);
 6491   src2   : S2(read);
 6492   INS01  : ISS;
 6493   NEON_FP : S4;
 6494 %}
 6495 
 6496 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
 6497 %{
 6498   single_instruction;
 6499   dst    : S4(write);
 6500   src1   : S2(read);
 6501   src2   : S2(read);
 6502   INS0   : ISS;
 6503   NEON_FP : S4;
 6504 %}
 6505 
 6506 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
 6507 %{
 6508   single_instruction;
 6509   dst    : S3(write);
 6510   src1   : S2(read);
 6511   src2   : S2(read);
 6512   INS01  : ISS;
 6513   NEON_FP : S3;
 6514 %}
 6515 
 6516 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
 6517 %{
 6518   single_instruction;
 6519   dst    : S3(write);
 6520   src1   : S2(read);
 6521   src2   : S2(read);
 6522   INS0   : ISS;
 6523   NEON_FP : S3;
 6524 %}
 6525 
 6526 pipe_class vshift64(vecD dst, vecD src, vecX shift)
 6527 %{
 6528   single_instruction;
 6529   dst    : S3(write);
 6530   src    : S1(read);
 6531   shift  : S1(read);
 6532   INS01  : ISS;
 6533   NEON_FP : S3;
 6534 %}
 6535 
 6536 pipe_class vshift128(vecX dst, vecX src, vecX shift)
 6537 %{
 6538   single_instruction;
 6539   dst    : S3(write);
 6540   src    : S1(read);
 6541   shift  : S1(read);
 6542   INS0   : ISS;
 6543   NEON_FP : S3;
 6544 %}
 6545 
 6546 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
 6547 %{
 6548   single_instruction;
 6549   dst    : S3(write);
 6550   src    : S1(read);
 6551   INS01  : ISS;
 6552   NEON_FP : S3;
 6553 %}
 6554 
 6555 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
 6556 %{
 6557   single_instruction;
 6558   dst    : S3(write);
 6559   src    : S1(read);
 6560   INS0   : ISS;
 6561   NEON_FP : S3;
 6562 %}
 6563 
 6564 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
 6565 %{
 6566   single_instruction;
 6567   dst    : S5(write);
 6568   src1   : S1(read);
 6569   src2   : S1(read);
 6570   INS01  : ISS;
 6571   NEON_FP : S5;
 6572 %}
 6573 
 6574 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
 6575 %{
 6576   single_instruction;
 6577   dst    : S5(write);
 6578   src1   : S1(read);
 6579   src2   : S1(read);
 6580   INS0   : ISS;
 6581   NEON_FP : S5;
 6582 %}
 6583 
 6584 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
 6585 %{
 6586   single_instruction;
 6587   dst    : S5(write);
 6588   src1   : S1(read);
 6589   src2   : S1(read);
 6590   INS0   : ISS;
 6591   NEON_FP : S5;
 6592 %}
 6593 
 6594 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
 6595 %{
 6596   single_instruction;
 6597   dst    : S5(write);
 6598   src1   : S1(read);
 6599   src2   : S1(read);
 6600   INS0   : ISS;
 6601   NEON_FP : S5;
 6602 %}
 6603 
 6604 pipe_class vsqrt_fp128(vecX dst, vecX src)
 6605 %{
 6606   single_instruction;
 6607   dst    : S5(write);
 6608   src    : S1(read);
 6609   INS0   : ISS;
 6610   NEON_FP : S5;
 6611 %}
 6612 
 6613 pipe_class vunop_fp64(vecD dst, vecD src)
 6614 %{
 6615   single_instruction;
 6616   dst    : S5(write);
 6617   src    : S1(read);
 6618   INS01  : ISS;
 6619   NEON_FP : S5;
 6620 %}
 6621 
 6622 pipe_class vunop_fp128(vecX dst, vecX src)
 6623 %{
 6624   single_instruction;
 6625   dst    : S5(write);
 6626   src    : S1(read);
 6627   INS0   : ISS;
 6628   NEON_FP : S5;
 6629 %}
 6630 
 6631 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
 6632 %{
 6633   single_instruction;
 6634   dst    : S3(write);
 6635   src    : S1(read);
 6636   INS01  : ISS;
 6637   NEON_FP : S3;
 6638 %}
 6639 
 6640 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
 6641 %{
 6642   single_instruction;
 6643   dst    : S3(write);
 6644   src    : S1(read);
 6645   INS01  : ISS;
 6646   NEON_FP : S3;
 6647 %}
 6648 
 6649 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
 6650 %{
 6651   single_instruction;
 6652   dst    : S3(write);
 6653   src    : S1(read);
 6654   INS01  : ISS;
 6655   NEON_FP : S3;
 6656 %}
 6657 
 6658 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
 6659 %{
 6660   single_instruction;
 6661   dst    : S3(write);
 6662   src    : S1(read);
 6663   INS01  : ISS;
 6664   NEON_FP : S3;
 6665 %}
 6666 
 6667 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
 6668 %{
 6669   single_instruction;
 6670   dst    : S3(write);
 6671   src    : S1(read);
 6672   INS01  : ISS;
 6673   NEON_FP : S3;
 6674 %}
 6675 
 6676 pipe_class vmovi_reg_imm64(vecD dst)
 6677 %{
 6678   single_instruction;
 6679   dst    : S3(write);
 6680   INS01  : ISS;
 6681   NEON_FP : S3;
 6682 %}
 6683 
 6684 pipe_class vmovi_reg_imm128(vecX dst)
 6685 %{
 6686   single_instruction;
 6687   dst    : S3(write);
 6688   INS0   : ISS;
 6689   NEON_FP : S3;
 6690 %}
 6691 
 6692 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
 6693 %{
 6694   single_instruction;
 6695   dst    : S5(write);
 6696   mem    : ISS(read);
 6697   INS01  : ISS;
 6698   NEON_FP : S3;
 6699 %}
 6700 
 6701 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
 6702 %{
 6703   single_instruction;
 6704   dst    : S5(write);
 6705   mem    : ISS(read);
 6706   INS01  : ISS;
 6707   NEON_FP : S3;
 6708 %}
 6709 
 6710 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
 6711 %{
 6712   single_instruction;
 6713   mem    : ISS(read);
 6714   src    : S2(read);
 6715   INS01  : ISS;
 6716   NEON_FP : S3;
 6717 %}
 6718 
 6719 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
 6720 %{
 6721   single_instruction;
 6722   mem    : ISS(read);
 6723   src    : S2(read);
 6724   INS01  : ISS;
 6725   NEON_FP : S3;
 6726 %}
 6727 
 6728 //------- Integer ALU operations --------------------------
 6729 
 6730 // Integer ALU reg-reg operation
 6731 // Operands needed in EX1, result generated in EX2
 6732 // Eg.  ADD     x0, x1, x2
 6733 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6734 %{
 6735   single_instruction;
 6736   dst    : EX2(write);
 6737   src1   : EX1(read);
 6738   src2   : EX1(read);
 6739   INS01  : ISS; // Dual issue as instruction 0 or 1
 6740   ALU    : EX2;
 6741 %}
 6742 
 6743 // Integer ALU reg-reg operation with constant shift
 6744 // Shifted register must be available in LATE_ISS instead of EX1
 6745 // Eg.  ADD     x0, x1, x2, LSL #2
 6746 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
 6747 %{
 6748   single_instruction;
 6749   dst    : EX2(write);
 6750   src1   : EX1(read);
 6751   src2   : ISS(read);
 6752   INS01  : ISS;
 6753   ALU    : EX2;
 6754 %}
 6755 
 6756 // Integer ALU reg operation with constant shift
 6757 // Eg.  LSL     x0, x1, #shift
 6758 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
 6759 %{
 6760   single_instruction;
 6761   dst    : EX2(write);
 6762   src1   : ISS(read);
 6763   INS01  : ISS;
 6764   ALU    : EX2;
 6765 %}
 6766 
 6767 // Integer ALU reg-reg operation with variable shift
 6768 // Both operands must be available in LATE_ISS instead of EX1
 6769 // Result is available in EX1 instead of EX2
 6770 // Eg.  LSLV    x0, x1, x2
 6771 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
 6772 %{
 6773   single_instruction;
 6774   dst    : EX1(write);
 6775   src1   : ISS(read);
 6776   src2   : ISS(read);
 6777   INS01  : ISS;
 6778   ALU    : EX1;
 6779 %}
 6780 
 6781 // Integer ALU reg-reg operation with extract
 6782 // As for _vshift above, but result generated in EX2
 6783 // Eg.  EXTR    x0, x1, x2, #N
 6784 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
 6785 %{
 6786   single_instruction;
 6787   dst    : EX2(write);
 6788   src1   : ISS(read);
 6789   src2   : ISS(read);
 6790   INS1   : ISS; // Can only dual issue as Instruction 1
 6791   ALU    : EX1;
 6792 %}
 6793 
 6794 // Integer ALU reg operation
 6795 // Eg.  NEG     x0, x1
 6796 pipe_class ialu_reg(iRegI dst, iRegI src)
 6797 %{
 6798   single_instruction;
 6799   dst    : EX2(write);
 6800   src    : EX1(read);
 6801   INS01  : ISS;
 6802   ALU    : EX2;
 6803 %}
 6804 
 6805 // Integer ALU reg mmediate operation
 6806 // Eg.  ADD     x0, x1, #N
 6807 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
 6808 %{
 6809   single_instruction;
 6810   dst    : EX2(write);
 6811   src1   : EX1(read);
 6812   INS01  : ISS;
 6813   ALU    : EX2;
 6814 %}
 6815 
 6816 // Integer ALU immediate operation (no source operands)
 6817 // Eg.  MOV     x0, #N
 6818 pipe_class ialu_imm(iRegI dst)
 6819 %{
 6820   single_instruction;
 6821   dst    : EX1(write);
 6822   INS01  : ISS;
 6823   ALU    : EX1;
 6824 %}
 6825 
 6826 //------- Compare operation -------------------------------
 6827 
 6828 // Compare reg-reg
 6829 // Eg.  CMP     x0, x1
 6830 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
 6831 %{
 6832   single_instruction;
 6833 //  fixed_latency(16);
 6834   cr     : EX2(write);
 6835   op1    : EX1(read);
 6836   op2    : EX1(read);
 6837   INS01  : ISS;
 6838   ALU    : EX2;
 6839 %}
 6840 
 6841 // Compare reg-reg
 6842 // Eg.  CMP     x0, #N
 6843 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
 6844 %{
 6845   single_instruction;
 6846 //  fixed_latency(16);
 6847   cr     : EX2(write);
 6848   op1    : EX1(read);
 6849   INS01  : ISS;
 6850   ALU    : EX2;
 6851 %}
 6852 
 6853 //------- Conditional instructions ------------------------
 6854 
 6855 // Conditional no operands
 6856 // Eg.  CSINC   x0, zr, zr, <cond>
 6857 pipe_class icond_none(iRegI dst, rFlagsReg cr)
 6858 %{
 6859   single_instruction;
 6860   cr     : EX1(read);
 6861   dst    : EX2(write);
 6862   INS01  : ISS;
 6863   ALU    : EX2;
 6864 %}
 6865 
 6866 // Conditional 2 operand
 6867 // EG.  CSEL    X0, X1, X2, <cond>
 6868 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
 6869 %{
 6870   single_instruction;
 6871   cr     : EX1(read);
 6872   src1   : EX1(read);
 6873   src2   : EX1(read);
 6874   dst    : EX2(write);
 6875   INS01  : ISS;
 6876   ALU    : EX2;
 6877 %}
 6878 
 6879 // Conditional 2 operand
 6880 // EG.  CSEL    X0, X1, X2, <cond>
 6881 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
 6882 %{
 6883   single_instruction;
 6884   cr     : EX1(read);
 6885   src    : EX1(read);
 6886   dst    : EX2(write);
 6887   INS01  : ISS;
 6888   ALU    : EX2;
 6889 %}
 6890 
 6891 //------- Multiply pipeline operations --------------------
 6892 
 6893 // Multiply reg-reg
 6894 // Eg.  MUL     w0, w1, w2
 6895 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6896 %{
 6897   single_instruction;
 6898   dst    : WR(write);
 6899   src1   : ISS(read);
 6900   src2   : ISS(read);
 6901   INS01  : ISS;
 6902   MAC    : WR;
 6903 %}
 6904 
 6905 // Multiply accumulate
 6906 // Eg.  MADD    w0, w1, w2, w3
 6907 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 6908 %{
 6909   single_instruction;
 6910   dst    : WR(write);
 6911   src1   : ISS(read);
 6912   src2   : ISS(read);
 6913   src3   : ISS(read);
 6914   INS01  : ISS;
 6915   MAC    : WR;
 6916 %}
 6917 
 6918 // Eg.  MUL     w0, w1, w2
 6919 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6920 %{
 6921   single_instruction;
 6922   fixed_latency(3); // Maximum latency for 64 bit mul
 6923   dst    : WR(write);
 6924   src1   : ISS(read);
 6925   src2   : ISS(read);
 6926   INS01  : ISS;
 6927   MAC    : WR;
 6928 %}
 6929 
 6930 // Multiply accumulate
 6931 // Eg.  MADD    w0, w1, w2, w3
 6932 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 6933 %{
 6934   single_instruction;
 6935   fixed_latency(3); // Maximum latency for 64 bit mul
 6936   dst    : WR(write);
 6937   src1   : ISS(read);
 6938   src2   : ISS(read);
 6939   src3   : ISS(read);
 6940   INS01  : ISS;
 6941   MAC    : WR;
 6942 %}
 6943 
 6944 //------- Divide pipeline operations --------------------
 6945 
 6946 // Eg.  SDIV    w0, w1, w2
 6947 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6948 %{
 6949   single_instruction;
 6950   fixed_latency(8); // Maximum latency for 32 bit divide
 6951   dst    : WR(write);
 6952   src1   : ISS(read);
 6953   src2   : ISS(read);
 6954   INS0   : ISS; // Can only dual issue as instruction 0
 6955   DIV    : WR;
 6956 %}
 6957 
 6958 // Eg.  SDIV    x0, x1, x2
 6959 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6960 %{
 6961   single_instruction;
 6962   fixed_latency(16); // Maximum latency for 64 bit divide
 6963   dst    : WR(write);
 6964   src1   : ISS(read);
 6965   src2   : ISS(read);
 6966   INS0   : ISS; // Can only dual issue as instruction 0
 6967   DIV    : WR;
 6968 %}
 6969 
 6970 //------- Load pipeline operations ------------------------
 6971 
 6972 // Load - prefetch
 6973 // Eg.  PFRM    <mem>
 6974 pipe_class iload_prefetch(memory mem)
 6975 %{
 6976   single_instruction;
 6977   mem    : ISS(read);
 6978   INS01  : ISS;
 6979   LDST   : WR;
 6980 %}
 6981 
 6982 // Load - reg, mem
 6983 // Eg.  LDR     x0, <mem>
 6984 pipe_class iload_reg_mem(iRegI dst, memory mem)
 6985 %{
 6986   single_instruction;
 6987   dst    : WR(write);
 6988   mem    : ISS(read);
 6989   INS01  : ISS;
 6990   LDST   : WR;
 6991 %}
 6992 
 6993 // Load - reg, reg
 6994 // Eg.  LDR     x0, [sp, x1]
 6995 pipe_class iload_reg_reg(iRegI dst, iRegI src)
 6996 %{
 6997   single_instruction;
 6998   dst    : WR(write);
 6999   src    : ISS(read);
 7000   INS01  : ISS;
 7001   LDST   : WR;
 7002 %}
 7003 
 7004 //------- Store pipeline operations -----------------------
 7005 
 7006 // Store - zr, mem
 7007 // Eg.  STR     zr, <mem>
 7008 pipe_class istore_mem(memory mem)
 7009 %{
 7010   single_instruction;
 7011   mem    : ISS(read);
 7012   INS01  : ISS;
 7013   LDST   : WR;
 7014 %}
 7015 
 7016 // Store - reg, mem
 7017 // Eg.  STR     x0, <mem>
 7018 pipe_class istore_reg_mem(iRegI src, memory mem)
 7019 %{
 7020   single_instruction;
 7021   mem    : ISS(read);
 7022   src    : EX2(read);
 7023   INS01  : ISS;
 7024   LDST   : WR;
 7025 %}
 7026 
 7027 // Store - reg, reg
 7028 // Eg. STR      x0, [sp, x1]
 7029 pipe_class istore_reg_reg(iRegI dst, iRegI src)
 7030 %{
 7031   single_instruction;
 7032   dst    : ISS(read);
 7033   src    : EX2(read);
 7034   INS01  : ISS;
 7035   LDST   : WR;
 7036 %}
 7037 
 7038 //------- Store pipeline operations -----------------------
 7039 
 7040 // Branch
 7041 pipe_class pipe_branch()
 7042 %{
 7043   single_instruction;
 7044   INS01  : ISS;
 7045   BRANCH : EX1;
 7046 %}
 7047 
 7048 // Conditional branch
 7049 pipe_class pipe_branch_cond(rFlagsReg cr)
 7050 %{
 7051   single_instruction;
 7052   cr     : EX1(read);
 7053   INS01  : ISS;
 7054   BRANCH : EX1;
 7055 %}
 7056 
 7057 // Compare & Branch
 7058 // EG.  CBZ/CBNZ
 7059 pipe_class pipe_cmp_branch(iRegI op1)
 7060 %{
 7061   single_instruction;
 7062   op1    : EX1(read);
 7063   INS01  : ISS;
 7064   BRANCH : EX1;
 7065 %}
 7066 
 7067 //------- Synchronisation operations ----------------------
 7068 
 7069 // Any operation requiring serialization.
 7070 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
 7071 pipe_class pipe_serial()
 7072 %{
 7073   single_instruction;
 7074   force_serialization;
 7075   fixed_latency(16);
 7076   INS01  : ISS(2); // Cannot dual issue with any other instruction
 7077   LDST   : WR;
 7078 %}
 7079 
 7080 // Generic big/slow expanded idiom - also serialized
 7081 pipe_class pipe_slow()
 7082 %{
 7083   instruction_count(10);
 7084   multiple_bundles;
 7085   force_serialization;
 7086   fixed_latency(16);
 7087   INS01  : ISS(2); // Cannot dual issue with any other instruction
 7088   LDST   : WR;
 7089 %}
 7090 
 7091 // Empty pipeline class
 7092 pipe_class pipe_class_empty()
 7093 %{
 7094   single_instruction;
 7095   fixed_latency(0);
 7096 %}
 7097 
 7098 // Default pipeline class.
 7099 pipe_class pipe_class_default()
 7100 %{
 7101   single_instruction;
 7102   fixed_latency(2);
 7103 %}
 7104 
 7105 // Pipeline class for compares.
 7106 pipe_class pipe_class_compare()
 7107 %{
 7108   single_instruction;
 7109   fixed_latency(16);
 7110 %}
 7111 
 7112 // Pipeline class for memory operations.
 7113 pipe_class pipe_class_memory()
 7114 %{
 7115   single_instruction;
 7116   fixed_latency(16);
 7117 %}
 7118 
 7119 // Pipeline class for call.
 7120 pipe_class pipe_class_call()
 7121 %{
 7122   single_instruction;
 7123   fixed_latency(100);
 7124 %}
 7125 
 7126 // Define the class for the Nop node.
 7127 define %{
 7128    MachNop = pipe_class_empty;
 7129 %}
 7130 
 7131 %}
 7132 //----------INSTRUCTIONS-------------------------------------------------------
 7133 //
 7134 // match      -- States which machine-independent subtree may be replaced
 7135 //               by this instruction.
 7136 // ins_cost   -- The estimated cost of this instruction is used by instruction
 7137 //               selection to identify a minimum cost tree of machine
 7138 //               instructions that matches a tree of machine-independent
 7139 //               instructions.
 7140 // format     -- A string providing the disassembly for this instruction.
 7141 //               The value of an instruction's operand may be inserted
 7142 //               by referring to it with a '$' prefix.
 7143 // opcode     -- Three instruction opcodes may be provided.  These are referred
 7144 //               to within an encode class as $primary, $secondary, and $tertiary
 7145 //               rrspectively.  The primary opcode is commonly used to
 7146 //               indicate the type of machine instruction, while secondary
 7147 //               and tertiary are often used for prefix options or addressing
 7148 //               modes.
 7149 // ins_encode -- A list of encode classes with parameters. The encode class
 7150 //               name must have been defined in an 'enc_class' specification
 7151 //               in the encode section of the architecture description.
 7152 
 7153 // ============================================================================
 7154 // Memory (Load/Store) Instructions
 7155 
 7156 // Load Instructions
 7157 
 7158 // Load Byte (8 bit signed)
 7159 instruct loadB(iRegINoSp dst, memory1 mem)
 7160 %{
 7161   match(Set dst (LoadB mem));
 7162   predicate(!needs_acquiring_load(n));
 7163 
 7164   ins_cost(4 * INSN_COST);
 7165   format %{ "ldrsbw  $dst, $mem\t# byte" %}
 7166 
 7167   ins_encode(aarch64_enc_ldrsbw(dst, mem));
 7168 
 7169   ins_pipe(iload_reg_mem);
 7170 %}
 7171 
 7172 // Load Byte (8 bit signed) into long
 7173 instruct loadB2L(iRegLNoSp dst, memory1 mem)
 7174 %{
 7175   match(Set dst (ConvI2L (LoadB mem)));
 7176   predicate(!needs_acquiring_load(n->in(1)));
 7177 
 7178   ins_cost(4 * INSN_COST);
 7179   format %{ "ldrsb  $dst, $mem\t# byte" %}
 7180 
 7181   ins_encode(aarch64_enc_ldrsb(dst, mem));
 7182 
 7183   ins_pipe(iload_reg_mem);
 7184 %}
 7185 
 7186 // Load Byte (8 bit unsigned)
 7187 instruct loadUB(iRegINoSp dst, memory1 mem)
 7188 %{
 7189   match(Set dst (LoadUB mem));
 7190   predicate(!needs_acquiring_load(n));
 7191 
 7192   ins_cost(4 * INSN_COST);
 7193   format %{ "ldrbw  $dst, $mem\t# byte" %}
 7194 
 7195   ins_encode(aarch64_enc_ldrb(dst, mem));
 7196 
 7197   ins_pipe(iload_reg_mem);
 7198 %}
 7199 
 7200 // Load Byte (8 bit unsigned) into long
 7201 instruct loadUB2L(iRegLNoSp dst, memory1 mem)
 7202 %{
 7203   match(Set dst (ConvI2L (LoadUB mem)));
 7204   predicate(!needs_acquiring_load(n->in(1)));
 7205 
 7206   ins_cost(4 * INSN_COST);
 7207   format %{ "ldrb  $dst, $mem\t# byte" %}
 7208 
 7209   ins_encode(aarch64_enc_ldrb(dst, mem));
 7210 
 7211   ins_pipe(iload_reg_mem);
 7212 %}
 7213 
 7214 // Load Short (16 bit signed)
 7215 instruct loadS(iRegINoSp dst, memory2 mem)
 7216 %{
 7217   match(Set dst (LoadS mem));
 7218   predicate(!needs_acquiring_load(n));
 7219 
 7220   ins_cost(4 * INSN_COST);
 7221   format %{ "ldrshw  $dst, $mem\t# short" %}
 7222 
 7223   ins_encode(aarch64_enc_ldrshw(dst, mem));
 7224 
 7225   ins_pipe(iload_reg_mem);
 7226 %}
 7227 
 7228 // Load Short (16 bit signed) into long
 7229 instruct loadS2L(iRegLNoSp dst, memory2 mem)
 7230 %{
 7231   match(Set dst (ConvI2L (LoadS mem)));
 7232   predicate(!needs_acquiring_load(n->in(1)));
 7233 
 7234   ins_cost(4 * INSN_COST);
 7235   format %{ "ldrsh  $dst, $mem\t# short" %}
 7236 
 7237   ins_encode(aarch64_enc_ldrsh(dst, mem));
 7238 
 7239   ins_pipe(iload_reg_mem);
 7240 %}
 7241 
 7242 // Load Char (16 bit unsigned)
 7243 instruct loadUS(iRegINoSp dst, memory2 mem)
 7244 %{
 7245   match(Set dst (LoadUS mem));
 7246   predicate(!needs_acquiring_load(n));
 7247 
 7248   ins_cost(4 * INSN_COST);
 7249   format %{ "ldrh  $dst, $mem\t# short" %}
 7250 
 7251   ins_encode(aarch64_enc_ldrh(dst, mem));
 7252 
 7253   ins_pipe(iload_reg_mem);
 7254 %}
 7255 
 7256 // Load Short/Char (16 bit unsigned) into long
 7257 instruct loadUS2L(iRegLNoSp dst, memory2 mem)
 7258 %{
 7259   match(Set dst (ConvI2L (LoadUS mem)));
 7260   predicate(!needs_acquiring_load(n->in(1)));
 7261 
 7262   ins_cost(4 * INSN_COST);
 7263   format %{ "ldrh  $dst, $mem\t# short" %}
 7264 
 7265   ins_encode(aarch64_enc_ldrh(dst, mem));
 7266 
 7267   ins_pipe(iload_reg_mem);
 7268 %}
 7269 
 7270 // Load Integer (32 bit signed)
 7271 instruct loadI(iRegINoSp dst, memory4 mem)
 7272 %{
 7273   match(Set dst (LoadI mem));
 7274   predicate(!needs_acquiring_load(n));
 7275 
 7276   ins_cost(4 * INSN_COST);
 7277   format %{ "ldrw  $dst, $mem\t# int" %}
 7278 
 7279   ins_encode(aarch64_enc_ldrw(dst, mem));
 7280 
 7281   ins_pipe(iload_reg_mem);
 7282 %}
 7283 
 7284 // Load Integer (32 bit signed) into long
 7285 instruct loadI2L(iRegLNoSp dst, memory4 mem)
 7286 %{
 7287   match(Set dst (ConvI2L (LoadI mem)));
 7288   predicate(!needs_acquiring_load(n->in(1)));
 7289 
 7290   ins_cost(4 * INSN_COST);
 7291   format %{ "ldrsw  $dst, $mem\t# int" %}
 7292 
 7293   ins_encode(aarch64_enc_ldrsw(dst, mem));
 7294 
 7295   ins_pipe(iload_reg_mem);
 7296 %}
 7297 
 7298 // Load Integer (32 bit unsigned) into long
 7299 instruct loadUI2L(iRegLNoSp dst, memory4 mem, immL_32bits mask)
 7300 %{
 7301   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 7302   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
 7303 
 7304   ins_cost(4 * INSN_COST);
 7305   format %{ "ldrw  $dst, $mem\t# int" %}
 7306 
 7307   ins_encode(aarch64_enc_ldrw(dst, mem));
 7308 
 7309   ins_pipe(iload_reg_mem);
 7310 %}
 7311 
 7312 // Load Long (64 bit signed)
 7313 instruct loadL(iRegLNoSp dst, memory8 mem)
 7314 %{
 7315   match(Set dst (LoadL mem));
 7316   predicate(!needs_acquiring_load(n));
 7317 
 7318   ins_cost(4 * INSN_COST);
 7319   format %{ "ldr  $dst, $mem\t# int" %}
 7320 
 7321   ins_encode(aarch64_enc_ldr(dst, mem));
 7322 
 7323   ins_pipe(iload_reg_mem);
 7324 %}
 7325 
 7326 // Load Range
 7327 instruct loadRange(iRegINoSp dst, memory4 mem)
 7328 %{
 7329   match(Set dst (LoadRange mem));
 7330 
 7331   ins_cost(4 * INSN_COST);
 7332   format %{ "ldrw  $dst, $mem\t# range" %}
 7333 
 7334   ins_encode(aarch64_enc_ldrw(dst, mem));
 7335 
 7336   ins_pipe(iload_reg_mem);
 7337 %}
 7338 
 7339 // Load Pointer
 7340 instruct loadP(iRegPNoSp dst, memory8 mem)
 7341 %{
 7342   match(Set dst (LoadP mem));
 7343   predicate(!needs_acquiring_load(n) && (n->as_Load()->barrier_data() == 0));
 7344 
 7345   ins_cost(4 * INSN_COST);
 7346   format %{ "ldr  $dst, $mem\t# ptr" %}
 7347 
 7348   ins_encode(aarch64_enc_ldr(dst, mem));
 7349 
 7350   ins_pipe(iload_reg_mem);
 7351 %}
 7352 
 7353 // Load Compressed Pointer
 7354 instruct loadN(iRegNNoSp dst, memory4 mem)
 7355 %{
 7356   match(Set dst (LoadN mem));
 7357   predicate(!needs_acquiring_load(n));
 7358 
 7359   ins_cost(4 * INSN_COST);
 7360   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
 7361 
 7362   ins_encode(aarch64_enc_ldrw(dst, mem));
 7363 
 7364   ins_pipe(iload_reg_mem);
 7365 %}
 7366 
 7367 // Load Klass Pointer
 7368 instruct loadKlass(iRegPNoSp dst, memory8 mem)
 7369 %{
 7370   match(Set dst (LoadKlass mem));
 7371   predicate(!needs_acquiring_load(n));
 7372 
 7373   ins_cost(4 * INSN_COST);
 7374   format %{ "ldr  $dst, $mem\t# class" %}
 7375 
 7376   ins_encode(aarch64_enc_ldr(dst, mem));
 7377 
 7378   ins_pipe(iload_reg_mem);
 7379 %}
 7380 
 7381 // Load Narrow Klass Pointer
 7382 instruct loadNKlass(iRegNNoSp dst, memory4 mem)
 7383 %{
 7384   match(Set dst (LoadNKlass mem));
 7385   predicate(!needs_acquiring_load(n));
 7386 
 7387   ins_cost(4 * INSN_COST);
 7388   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
 7389 
 7390   ins_encode(aarch64_enc_ldrw(dst, mem));
 7391 
 7392   ins_pipe(iload_reg_mem);
 7393 %}
 7394 
 7395 // Load Float
 7396 instruct loadF(vRegF dst, memory4 mem)
 7397 %{
 7398   match(Set dst (LoadF mem));
 7399   predicate(!needs_acquiring_load(n));
 7400 
 7401   ins_cost(4 * INSN_COST);
 7402   format %{ "ldrs  $dst, $mem\t# float" %}
 7403 
 7404   ins_encode( aarch64_enc_ldrs(dst, mem) );
 7405 
 7406   ins_pipe(pipe_class_memory);
 7407 %}
 7408 
 7409 // Load Double
 7410 instruct loadD(vRegD dst, memory8 mem)
 7411 %{
 7412   match(Set dst (LoadD mem));
 7413   predicate(!needs_acquiring_load(n));
 7414 
 7415   ins_cost(4 * INSN_COST);
 7416   format %{ "ldrd  $dst, $mem\t# double" %}
 7417 
 7418   ins_encode( aarch64_enc_ldrd(dst, mem) );
 7419 
 7420   ins_pipe(pipe_class_memory);
 7421 %}
 7422 
 7423 
 7424 // Load Int Constant
 7425 instruct loadConI(iRegINoSp dst, immI src)
 7426 %{
 7427   match(Set dst src);
 7428 
 7429   ins_cost(INSN_COST);
 7430   format %{ "mov $dst, $src\t# int" %}
 7431 
 7432   ins_encode( aarch64_enc_movw_imm(dst, src) );
 7433 
 7434   ins_pipe(ialu_imm);
 7435 %}
 7436 
 7437 // Load Long Constant
 7438 instruct loadConL(iRegLNoSp dst, immL src)
 7439 %{
 7440   match(Set dst src);
 7441 
 7442   ins_cost(INSN_COST);
 7443   format %{ "mov $dst, $src\t# long" %}
 7444 
 7445   ins_encode( aarch64_enc_mov_imm(dst, src) );
 7446 
 7447   ins_pipe(ialu_imm);
 7448 %}
 7449 
 7450 // Load Pointer Constant
 7451 
 7452 instruct loadConP(iRegPNoSp dst, immP con)
 7453 %{
 7454   match(Set dst con);
 7455 
 7456   ins_cost(INSN_COST * 4);
 7457   format %{
 7458     "mov  $dst, $con\t# ptr\n\t"
 7459   %}
 7460 
 7461   ins_encode(aarch64_enc_mov_p(dst, con));
 7462 
 7463   ins_pipe(ialu_imm);
 7464 %}
 7465 
 7466 // Load Null Pointer Constant
 7467 
 7468 instruct loadConP0(iRegPNoSp dst, immP0 con)
 7469 %{
 7470   match(Set dst con);
 7471 
 7472   ins_cost(INSN_COST);
 7473   format %{ "mov  $dst, $con\t# NULL ptr" %}
 7474 
 7475   ins_encode(aarch64_enc_mov_p0(dst, con));
 7476 
 7477   ins_pipe(ialu_imm);
 7478 %}
 7479 
 7480 // Load Pointer Constant One
 7481 
 7482 instruct loadConP1(iRegPNoSp dst, immP_1 con)
 7483 %{
 7484   match(Set dst con);
 7485 
 7486   ins_cost(INSN_COST);
 7487   format %{ "mov  $dst, $con\t# NULL ptr" %}
 7488 
 7489   ins_encode(aarch64_enc_mov_p1(dst, con));
 7490 
 7491   ins_pipe(ialu_imm);
 7492 %}
 7493 
 7494 // Load Byte Map Base Constant
 7495 
 7496 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
 7497 %{
 7498   match(Set dst con);
 7499 
 7500   ins_cost(INSN_COST);
 7501   format %{ "adr  $dst, $con\t# Byte Map Base" %}
 7502 
 7503   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
 7504 
 7505   ins_pipe(ialu_imm);
 7506 %}
 7507 
 7508 // Load Narrow Pointer Constant
 7509 
 7510 instruct loadConN(iRegNNoSp dst, immN con)
 7511 %{
 7512   match(Set dst con);
 7513 
 7514   ins_cost(INSN_COST * 4);
 7515   format %{ "mov  $dst, $con\t# compressed ptr" %}
 7516 
 7517   ins_encode(aarch64_enc_mov_n(dst, con));
 7518 
 7519   ins_pipe(ialu_imm);
 7520 %}
 7521 
 7522 // Load Narrow Null Pointer Constant
 7523 
 7524 instruct loadConN0(iRegNNoSp dst, immN0 con)
 7525 %{
 7526   match(Set dst con);
 7527 
 7528   ins_cost(INSN_COST);
 7529   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
 7530 
 7531   ins_encode(aarch64_enc_mov_n0(dst, con));
 7532 
 7533   ins_pipe(ialu_imm);
 7534 %}
 7535 
 7536 // Load Narrow Klass Constant
 7537 
 7538 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
 7539 %{
 7540   match(Set dst con);
 7541 
 7542   ins_cost(INSN_COST);
 7543   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
 7544 
 7545   ins_encode(aarch64_enc_mov_nk(dst, con));
 7546 
 7547   ins_pipe(ialu_imm);
 7548 %}
 7549 
 7550 // Load Packed Float Constant
 7551 
 7552 instruct loadConF_packed(vRegF dst, immFPacked con) %{
 7553   match(Set dst con);
 7554   ins_cost(INSN_COST * 4);
 7555   format %{ "fmovs  $dst, $con"%}
 7556   ins_encode %{
 7557     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
 7558   %}
 7559 
 7560   ins_pipe(fp_imm_s);
 7561 %}
 7562 
 7563 // Load Float Constant
 7564 
 7565 instruct loadConF(vRegF dst, immF con) %{
 7566   match(Set dst con);
 7567 
 7568   ins_cost(INSN_COST * 4);
 7569 
 7570   format %{
 7571     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 7572   %}
 7573 
 7574   ins_encode %{
 7575     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
 7576   %}
 7577 
 7578   ins_pipe(fp_load_constant_s);
 7579 %}
 7580 
 7581 // Load Packed Double Constant
 7582 
 7583 instruct loadConD_packed(vRegD dst, immDPacked con) %{
 7584   match(Set dst con);
 7585   ins_cost(INSN_COST);
 7586   format %{ "fmovd  $dst, $con"%}
 7587   ins_encode %{
 7588     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
 7589   %}
 7590 
 7591   ins_pipe(fp_imm_d);
 7592 %}
 7593 
 7594 // Load Double Constant
 7595 
 7596 instruct loadConD(vRegD dst, immD con) %{
 7597   match(Set dst con);
 7598 
 7599   ins_cost(INSN_COST * 5);
 7600   format %{
 7601     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 7602   %}
 7603 
 7604   ins_encode %{
 7605     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
 7606   %}
 7607 
 7608   ins_pipe(fp_load_constant_d);
 7609 %}
 7610 
 7611 // Store Instructions
 7612 
 7613 // Store CMS card-mark Immediate
 7614 instruct storeimmCM0(immI0 zero, memory1 mem)
 7615 %{
 7616   match(Set mem (StoreCM mem zero));
 7617 
 7618   ins_cost(INSN_COST);
 7619   format %{ "storestore (elided)\n\t"
 7620             "strb zr, $mem\t# byte" %}
 7621 
 7622   ins_encode(aarch64_enc_strb0(mem));
 7623 
 7624   ins_pipe(istore_mem);
 7625 %}
 7626 
 7627 // Store CMS card-mark Immediate with intervening StoreStore
 7628 // needed when using CMS with no conditional card marking
 7629 instruct storeimmCM0_ordered(immI0 zero, memory1 mem)
 7630 %{
 7631   match(Set mem (StoreCM mem zero));
 7632 
 7633   ins_cost(INSN_COST * 2);
 7634   format %{ "storestore\n\t"
 7635             "dmb ishst"
 7636             "\n\tstrb zr, $mem\t# byte" %}
 7637 
 7638   ins_encode(aarch64_enc_strb0_ordered(mem));
 7639 
 7640   ins_pipe(istore_mem);
 7641 %}
 7642 
 7643 // Store Byte
 7644 instruct storeB(iRegIorL2I src, memory1 mem)
 7645 %{
 7646   match(Set mem (StoreB mem src));
 7647   predicate(!needs_releasing_store(n));
 7648 
 7649   ins_cost(INSN_COST);
 7650   format %{ "strb  $src, $mem\t# byte" %}
 7651 
 7652   ins_encode(aarch64_enc_strb(src, mem));
 7653 
 7654   ins_pipe(istore_reg_mem);
 7655 %}
 7656 
 7657 
 7658 instruct storeimmB0(immI0 zero, memory1 mem)
 7659 %{
 7660   match(Set mem (StoreB mem zero));
 7661   predicate(!needs_releasing_store(n));
 7662 
 7663   ins_cost(INSN_COST);
 7664   format %{ "strb rscractch2, $mem\t# byte" %}
 7665 
 7666   ins_encode(aarch64_enc_strb0(mem));
 7667 
 7668   ins_pipe(istore_mem);
 7669 %}
 7670 
 7671 // Store Char/Short
 7672 instruct storeC(iRegIorL2I src, memory2 mem)
 7673 %{
 7674   match(Set mem (StoreC mem src));
 7675   predicate(!needs_releasing_store(n));
 7676 
 7677   ins_cost(INSN_COST);
 7678   format %{ "strh  $src, $mem\t# short" %}
 7679 
 7680   ins_encode(aarch64_enc_strh(src, mem));
 7681 
 7682   ins_pipe(istore_reg_mem);
 7683 %}
 7684 
 7685 instruct storeimmC0(immI0 zero, memory2 mem)
 7686 %{
 7687   match(Set mem (StoreC mem zero));
 7688   predicate(!needs_releasing_store(n));
 7689 
 7690   ins_cost(INSN_COST);
 7691   format %{ "strh  zr, $mem\t# short" %}
 7692 
 7693   ins_encode(aarch64_enc_strh0(mem));
 7694 
 7695   ins_pipe(istore_mem);
 7696 %}
 7697 
 7698 // Store Integer
 7699 
 7700 instruct storeI(iRegIorL2I src, memory4 mem)
 7701 %{
 7702   match(Set mem(StoreI mem src));
 7703   predicate(!needs_releasing_store(n));
 7704 
 7705   ins_cost(INSN_COST);
 7706   format %{ "strw  $src, $mem\t# int" %}
 7707 
 7708   ins_encode(aarch64_enc_strw(src, mem));
 7709 
 7710   ins_pipe(istore_reg_mem);
 7711 %}
 7712 
 7713 instruct storeimmI0(immI0 zero, memory4 mem)
 7714 %{
 7715   match(Set mem(StoreI mem zero));
 7716   predicate(!needs_releasing_store(n));
 7717 
 7718   ins_cost(INSN_COST);
 7719   format %{ "strw  zr, $mem\t# int" %}
 7720 
 7721   ins_encode(aarch64_enc_strw0(mem));
 7722 
 7723   ins_pipe(istore_mem);
 7724 %}
 7725 
 7726 // Store Long (64 bit signed)
 7727 instruct storeL(iRegL src, memory8 mem)
 7728 %{
 7729   match(Set mem (StoreL mem src));
 7730   predicate(!needs_releasing_store(n));
 7731 
 7732   ins_cost(INSN_COST);
 7733   format %{ "str  $src, $mem\t# int" %}
 7734 
 7735   ins_encode(aarch64_enc_str(src, mem));
 7736 
 7737   ins_pipe(istore_reg_mem);
 7738 %}
 7739 
 7740 // Store Long (64 bit signed)
 7741 instruct storeimmL0(immL0 zero, memory8 mem)
 7742 %{
 7743   match(Set mem (StoreL mem zero));
 7744   predicate(!needs_releasing_store(n));
 7745 
 7746   ins_cost(INSN_COST);
 7747   format %{ "str  zr, $mem\t# int" %}
 7748 
 7749   ins_encode(aarch64_enc_str0(mem));
 7750 
 7751   ins_pipe(istore_mem);
 7752 %}
 7753 
 7754 // Store Pointer
 7755 instruct storeP(iRegP src, memory8 mem)
 7756 %{
 7757   match(Set mem (StoreP mem src));
 7758   predicate(!needs_releasing_store(n));
 7759 
 7760   ins_cost(INSN_COST);
 7761   format %{ "str  $src, $mem\t# ptr" %}
 7762 
 7763   ins_encode(aarch64_enc_str(src, mem));
 7764 
 7765   ins_pipe(istore_reg_mem);
 7766 %}
 7767 
 7768 // Store Pointer
 7769 instruct storeimmP0(immP0 zero, memory8 mem)
 7770 %{
 7771   match(Set mem (StoreP mem zero));
 7772   predicate(!needs_releasing_store(n));
 7773 
 7774   ins_cost(INSN_COST);
 7775   format %{ "str zr, $mem\t# ptr" %}
 7776 
 7777   ins_encode(aarch64_enc_str0(mem));
 7778 
 7779   ins_pipe(istore_mem);
 7780 %}
 7781 
 7782 // Store Compressed Pointer
 7783 instruct storeN(iRegN src, memory4 mem)
 7784 %{
 7785   match(Set mem (StoreN mem src));
 7786   predicate(!needs_releasing_store(n));
 7787 
 7788   ins_cost(INSN_COST);
 7789   format %{ "strw  $src, $mem\t# compressed ptr" %}
 7790 
 7791   ins_encode(aarch64_enc_strw(src, mem));
 7792 
 7793   ins_pipe(istore_reg_mem);
 7794 %}
 7795 
 7796 instruct storeImmN0(immN0 zero, memory4 mem)
 7797 %{
 7798   match(Set mem (StoreN mem zero));
 7799   predicate(!needs_releasing_store(n));
 7800 
 7801   ins_cost(INSN_COST);
 7802   format %{ "strw  zr, $mem\t# compressed ptr" %}
 7803 
 7804   ins_encode(aarch64_enc_strw0(mem));
 7805 
 7806   ins_pipe(istore_mem);
 7807 %}
 7808 
 7809 // Store Float
 7810 instruct storeF(vRegF src, memory4 mem)
 7811 %{
 7812   match(Set mem (StoreF mem src));
 7813   predicate(!needs_releasing_store(n));
 7814 
 7815   ins_cost(INSN_COST);
 7816   format %{ "strs  $src, $mem\t# float" %}
 7817 
 7818   ins_encode( aarch64_enc_strs(src, mem) );
 7819 
 7820   ins_pipe(pipe_class_memory);
 7821 %}
 7822 
 7823 // TODO
 7824 // implement storeImmF0 and storeFImmPacked
 7825 
 7826 // Store Double
 7827 instruct storeD(vRegD src, memory8 mem)
 7828 %{
 7829   match(Set mem (StoreD mem src));
 7830   predicate(!needs_releasing_store(n));
 7831 
 7832   ins_cost(INSN_COST);
 7833   format %{ "strd  $src, $mem\t# double" %}
 7834 
 7835   ins_encode( aarch64_enc_strd(src, mem) );
 7836 
 7837   ins_pipe(pipe_class_memory);
 7838 %}
 7839 
 7840 // Store Compressed Klass Pointer
 7841 instruct storeNKlass(iRegN src, memory4 mem)
 7842 %{
 7843   predicate(!needs_releasing_store(n));
 7844   match(Set mem (StoreNKlass mem src));
 7845 
 7846   ins_cost(INSN_COST);
 7847   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
 7848 
 7849   ins_encode(aarch64_enc_strw(src, mem));
 7850 
 7851   ins_pipe(istore_reg_mem);
 7852 %}
 7853 
 7854 // TODO
 7855 // implement storeImmD0 and storeDImmPacked
 7856 
 7857 // prefetch instructions
 7858 // Must be safe to execute with invalid address (cannot fault).
 7859 
 7860 instruct prefetchalloc( memory8 mem ) %{
 7861   match(PrefetchAllocation mem);
 7862 
 7863   ins_cost(INSN_COST);
 7864   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
 7865 
 7866   ins_encode( aarch64_enc_prefetchw(mem) );
 7867 
 7868   ins_pipe(iload_prefetch);
 7869 %}
 7870 
 7871 //  ---------------- volatile loads and stores ----------------
 7872 
 7873 // Load Byte (8 bit signed)
 7874 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7875 %{
 7876   match(Set dst (LoadB mem));
 7877 
 7878   ins_cost(VOLATILE_REF_COST);
 7879   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7880 
 7881   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7882 
 7883   ins_pipe(pipe_serial);
 7884 %}
 7885 
 7886 // Load Byte (8 bit signed) into long
 7887 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7888 %{
 7889   match(Set dst (ConvI2L (LoadB mem)));
 7890 
 7891   ins_cost(VOLATILE_REF_COST);
 7892   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7893 
 7894   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7895 
 7896   ins_pipe(pipe_serial);
 7897 %}
 7898 
 7899 // Load Byte (8 bit unsigned)
 7900 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7901 %{
 7902   match(Set dst (LoadUB mem));
 7903 
 7904   ins_cost(VOLATILE_REF_COST);
 7905   format %{ "ldarb  $dst, $mem\t# byte" %}
 7906 
 7907   ins_encode(aarch64_enc_ldarb(dst, mem));
 7908 
 7909   ins_pipe(pipe_serial);
 7910 %}
 7911 
 7912 // Load Byte (8 bit unsigned) into long
 7913 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7914 %{
 7915   match(Set dst (ConvI2L (LoadUB mem)));
 7916 
 7917   ins_cost(VOLATILE_REF_COST);
 7918   format %{ "ldarb  $dst, $mem\t# byte" %}
 7919 
 7920   ins_encode(aarch64_enc_ldarb(dst, mem));
 7921 
 7922   ins_pipe(pipe_serial);
 7923 %}
 7924 
 7925 // Load Short (16 bit signed)
 7926 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7927 %{
 7928   match(Set dst (LoadS mem));
 7929 
 7930   ins_cost(VOLATILE_REF_COST);
 7931   format %{ "ldarshw  $dst, $mem\t# short" %}
 7932 
 7933   ins_encode(aarch64_enc_ldarshw(dst, mem));
 7934 
 7935   ins_pipe(pipe_serial);
 7936 %}
 7937 
 7938 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7939 %{
 7940   match(Set dst (LoadUS mem));
 7941 
 7942   ins_cost(VOLATILE_REF_COST);
 7943   format %{ "ldarhw  $dst, $mem\t# short" %}
 7944 
 7945   ins_encode(aarch64_enc_ldarhw(dst, mem));
 7946 
 7947   ins_pipe(pipe_serial);
 7948 %}
 7949 
 7950 // Load Short/Char (16 bit unsigned) into long
 7951 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7952 %{
 7953   match(Set dst (ConvI2L (LoadUS mem)));
 7954 
 7955   ins_cost(VOLATILE_REF_COST);
 7956   format %{ "ldarh  $dst, $mem\t# short" %}
 7957 
 7958   ins_encode(aarch64_enc_ldarh(dst, mem));
 7959 
 7960   ins_pipe(pipe_serial);
 7961 %}
 7962 
 7963 // Load Short/Char (16 bit signed) into long
 7964 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7965 %{
 7966   match(Set dst (ConvI2L (LoadS mem)));
 7967 
 7968   ins_cost(VOLATILE_REF_COST);
 7969   format %{ "ldarh  $dst, $mem\t# short" %}
 7970 
 7971   ins_encode(aarch64_enc_ldarsh(dst, mem));
 7972 
 7973   ins_pipe(pipe_serial);
 7974 %}
 7975 
 7976 // Load Integer (32 bit signed)
 7977 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7978 %{
 7979   match(Set dst (LoadI mem));
 7980 
 7981   ins_cost(VOLATILE_REF_COST);
 7982   format %{ "ldarw  $dst, $mem\t# int" %}
 7983 
 7984   ins_encode(aarch64_enc_ldarw(dst, mem));
 7985 
 7986   ins_pipe(pipe_serial);
 7987 %}
 7988 
 7989 // Load Integer (32 bit unsigned) into long
 7990 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
 7991 %{
 7992   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 7993 
 7994   ins_cost(VOLATILE_REF_COST);
 7995   format %{ "ldarw  $dst, $mem\t# int" %}
 7996 
 7997   ins_encode(aarch64_enc_ldarw(dst, mem));
 7998 
 7999   ins_pipe(pipe_serial);
 8000 %}
 8001 
 8002 // Load Long (64 bit signed)
 8003 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 8004 %{
 8005   match(Set dst (LoadL mem));
 8006 
 8007   ins_cost(VOLATILE_REF_COST);
 8008   format %{ "ldar  $dst, $mem\t# int" %}
 8009 
 8010   ins_encode(aarch64_enc_ldar(dst, mem));
 8011 
 8012   ins_pipe(pipe_serial);
 8013 %}
 8014 
 8015 // Load Pointer
 8016 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
 8017 %{
 8018   match(Set dst (LoadP mem));
 8019   predicate(n->as_Load()->barrier_data() == 0);
 8020 
 8021   ins_cost(VOLATILE_REF_COST);
 8022   format %{ "ldar  $dst, $mem\t# ptr" %}
 8023 
 8024   ins_encode(aarch64_enc_ldar(dst, mem));
 8025 
 8026   ins_pipe(pipe_serial);
 8027 %}
 8028 
 8029 // Load Compressed Pointer
 8030 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
 8031 %{
 8032   match(Set dst (LoadN mem));
 8033 
 8034   ins_cost(VOLATILE_REF_COST);
 8035   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
 8036 
 8037   ins_encode(aarch64_enc_ldarw(dst, mem));
 8038 
 8039   ins_pipe(pipe_serial);
 8040 %}
 8041 
 8042 // Load Float
 8043 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
 8044 %{
 8045   match(Set dst (LoadF mem));
 8046 
 8047   ins_cost(VOLATILE_REF_COST);
 8048   format %{ "ldars  $dst, $mem\t# float" %}
 8049 
 8050   ins_encode( aarch64_enc_fldars(dst, mem) );
 8051 
 8052   ins_pipe(pipe_serial);
 8053 %}
 8054 
 8055 // Load Double
 8056 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
 8057 %{
 8058   match(Set dst (LoadD mem));
 8059 
 8060   ins_cost(VOLATILE_REF_COST);
 8061   format %{ "ldard  $dst, $mem\t# double" %}
 8062 
 8063   ins_encode( aarch64_enc_fldard(dst, mem) );
 8064 
 8065   ins_pipe(pipe_serial);
 8066 %}
 8067 
 8068 // Store Byte
 8069 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 8070 %{
 8071   match(Set mem (StoreB mem src));
 8072 
 8073   ins_cost(VOLATILE_REF_COST);
 8074   format %{ "stlrb  $src, $mem\t# byte" %}
 8075 
 8076   ins_encode(aarch64_enc_stlrb(src, mem));
 8077 
 8078   ins_pipe(pipe_class_memory);
 8079 %}
 8080 
 8081 // Store Char/Short
 8082 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 8083 %{
 8084   match(Set mem (StoreC mem src));
 8085 
 8086   ins_cost(VOLATILE_REF_COST);
 8087   format %{ "stlrh  $src, $mem\t# short" %}
 8088 
 8089   ins_encode(aarch64_enc_stlrh(src, mem));
 8090 
 8091   ins_pipe(pipe_class_memory);
 8092 %}
 8093 
 8094 // Store Integer
 8095 
 8096 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 8097 %{
 8098   match(Set mem(StoreI mem src));
 8099 
 8100   ins_cost(VOLATILE_REF_COST);
 8101   format %{ "stlrw  $src, $mem\t# int" %}
 8102 
 8103   ins_encode(aarch64_enc_stlrw(src, mem));
 8104 
 8105   ins_pipe(pipe_class_memory);
 8106 %}
 8107 
 8108 // Store Long (64 bit signed)
 8109 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
 8110 %{
 8111   match(Set mem (StoreL mem src));
 8112 
 8113   ins_cost(VOLATILE_REF_COST);
 8114   format %{ "stlr  $src, $mem\t# int" %}
 8115 
 8116   ins_encode(aarch64_enc_stlr(src, mem));
 8117 
 8118   ins_pipe(pipe_class_memory);
 8119 %}
 8120 
 8121 // Store Pointer
 8122 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
 8123 %{
 8124   match(Set mem (StoreP mem src));
 8125 
 8126   ins_cost(VOLATILE_REF_COST);
 8127   format %{ "stlr  $src, $mem\t# ptr" %}
 8128 
 8129   ins_encode(aarch64_enc_stlr(src, mem));
 8130 
 8131   ins_pipe(pipe_class_memory);
 8132 %}
 8133 
 8134 // Store Compressed Pointer
 8135 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
 8136 %{
 8137   match(Set mem (StoreN mem src));
 8138 
 8139   ins_cost(VOLATILE_REF_COST);
 8140   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
 8141 
 8142   ins_encode(aarch64_enc_stlrw(src, mem));
 8143 
 8144   ins_pipe(pipe_class_memory);
 8145 %}
 8146 
 8147 // Store Float
 8148 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
 8149 %{
 8150   match(Set mem (StoreF mem src));
 8151 
 8152   ins_cost(VOLATILE_REF_COST);
 8153   format %{ "stlrs  $src, $mem\t# float" %}
 8154 
 8155   ins_encode( aarch64_enc_fstlrs(src, mem) );
 8156 
 8157   ins_pipe(pipe_class_memory);
 8158 %}
 8159 
 8160 // TODO
 8161 // implement storeImmF0 and storeFImmPacked
 8162 
 8163 // Store Double
 8164 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
 8165 %{
 8166   match(Set mem (StoreD mem src));
 8167 
 8168   ins_cost(VOLATILE_REF_COST);
 8169   format %{ "stlrd  $src, $mem\t# double" %}
 8170 
 8171   ins_encode( aarch64_enc_fstlrd(src, mem) );
 8172 
 8173   ins_pipe(pipe_class_memory);
 8174 %}
 8175 
 8176 //  ---------------- end of volatile loads and stores ----------------
 8177 
 8178 instruct cacheWB(indirect addr)
 8179 %{
 8180   predicate(VM_Version::supports_data_cache_line_flush());
 8181   match(CacheWB addr);
 8182 
 8183   ins_cost(100);
 8184   format %{"cache wb $addr" %}
 8185   ins_encode %{
 8186     assert($addr->index_position() < 0, "should be");
 8187     assert($addr$$disp == 0, "should be");
 8188     __ cache_wb(Address($addr$$base$$Register, 0));
 8189   %}
 8190   ins_pipe(pipe_slow); // XXX
 8191 %}
 8192 
 8193 instruct cacheWBPreSync()
 8194 %{
 8195   predicate(VM_Version::supports_data_cache_line_flush());
 8196   match(CacheWBPreSync);
 8197 
 8198   ins_cost(100);
 8199   format %{"cache wb presync" %}
 8200   ins_encode %{
 8201     __ cache_wbsync(true);
 8202   %}
 8203   ins_pipe(pipe_slow); // XXX
 8204 %}
 8205 
 8206 instruct cacheWBPostSync()
 8207 %{
 8208   predicate(VM_Version::supports_data_cache_line_flush());
 8209   match(CacheWBPostSync);
 8210 
 8211   ins_cost(100);
 8212   format %{"cache wb postsync" %}
 8213   ins_encode %{
 8214     __ cache_wbsync(false);
 8215   %}
 8216   ins_pipe(pipe_slow); // XXX
 8217 %}
 8218 
 8219 // ============================================================================
 8220 // BSWAP Instructions
 8221 
 8222 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
 8223   match(Set dst (ReverseBytesI src));
 8224 
 8225   ins_cost(INSN_COST);
 8226   format %{ "revw  $dst, $src" %}
 8227 
 8228   ins_encode %{
 8229     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
 8230   %}
 8231 
 8232   ins_pipe(ialu_reg);
 8233 %}
 8234 
 8235 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
 8236   match(Set dst (ReverseBytesL src));
 8237 
 8238   ins_cost(INSN_COST);
 8239   format %{ "rev  $dst, $src" %}
 8240 
 8241   ins_encode %{
 8242     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
 8243   %}
 8244 
 8245   ins_pipe(ialu_reg);
 8246 %}
 8247 
 8248 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
 8249   match(Set dst (ReverseBytesUS src));
 8250 
 8251   ins_cost(INSN_COST);
 8252   format %{ "rev16w  $dst, $src" %}
 8253 
 8254   ins_encode %{
 8255     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 8256   %}
 8257 
 8258   ins_pipe(ialu_reg);
 8259 %}
 8260 
 8261 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
 8262   match(Set dst (ReverseBytesS src));
 8263 
 8264   ins_cost(INSN_COST);
 8265   format %{ "rev16w  $dst, $src\n\t"
 8266             "sbfmw $dst, $dst, #0, #15" %}
 8267 
 8268   ins_encode %{
 8269     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 8270     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
 8271   %}
 8272 
 8273   ins_pipe(ialu_reg);
 8274 %}
 8275 
 8276 // ============================================================================
 8277 // Zero Count Instructions
 8278 
 8279 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 8280   match(Set dst (CountLeadingZerosI src));
 8281 
 8282   ins_cost(INSN_COST);
 8283   format %{ "clzw  $dst, $src" %}
 8284   ins_encode %{
 8285     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
 8286   %}
 8287 
 8288   ins_pipe(ialu_reg);
 8289 %}
 8290 
 8291 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
 8292   match(Set dst (CountLeadingZerosL src));
 8293 
 8294   ins_cost(INSN_COST);
 8295   format %{ "clz   $dst, $src" %}
 8296   ins_encode %{
 8297     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
 8298   %}
 8299 
 8300   ins_pipe(ialu_reg);
 8301 %}
 8302 
 8303 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 8304   match(Set dst (CountTrailingZerosI src));
 8305 
 8306   ins_cost(INSN_COST * 2);
 8307   format %{ "rbitw  $dst, $src\n\t"
 8308             "clzw   $dst, $dst" %}
 8309   ins_encode %{
 8310     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
 8311     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
 8312   %}
 8313 
 8314   ins_pipe(ialu_reg);
 8315 %}
 8316 
 8317 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
 8318   match(Set dst (CountTrailingZerosL src));
 8319 
 8320   ins_cost(INSN_COST * 2);
 8321   format %{ "rbit   $dst, $src\n\t"
 8322             "clz    $dst, $dst" %}
 8323   ins_encode %{
 8324     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
 8325     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
 8326   %}
 8327 
 8328   ins_pipe(ialu_reg);
 8329 %}
 8330 
 8331 //---------- Population Count Instructions -------------------------------------
 8332 //
 8333 
 8334 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
 8335   predicate(UsePopCountInstruction);
 8336   match(Set dst (PopCountI src));
 8337   effect(TEMP tmp);
 8338   ins_cost(INSN_COST * 13);
 8339 
 8340   format %{ "movw   $src, $src\n\t"
 8341             "mov    $tmp, $src\t# vector (1D)\n\t"
 8342             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8343             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8344             "mov    $dst, $tmp\t# vector (1D)" %}
 8345   ins_encode %{
 8346     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
 8347     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
 8348     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8349     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8350     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
 8351   %}
 8352 
 8353   ins_pipe(pipe_class_default);
 8354 %}
 8355 
 8356 instruct popCountI_mem(iRegINoSp dst, memory4 mem, vRegF tmp) %{
 8357   predicate(UsePopCountInstruction);
 8358   match(Set dst (PopCountI (LoadI mem)));
 8359   effect(TEMP tmp);
 8360   ins_cost(INSN_COST * 13);
 8361 
 8362   format %{ "ldrs   $tmp, $mem\n\t"
 8363             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8364             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8365             "mov    $dst, $tmp\t# vector (1D)" %}
 8366   ins_encode %{
 8367     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 8368     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
 8369               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 8370     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8371     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8372     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
 8373   %}
 8374 
 8375   ins_pipe(pipe_class_default);
 8376 %}
 8377 
 8378 // Note: Long.bitCount(long) returns an int.
 8379 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
 8380   predicate(UsePopCountInstruction);
 8381   match(Set dst (PopCountL src));
 8382   effect(TEMP tmp);
 8383   ins_cost(INSN_COST * 13);
 8384 
 8385   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
 8386             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8387             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8388             "mov    $dst, $tmp\t# vector (1D)" %}
 8389   ins_encode %{
 8390     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
 8391     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8392     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8393     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
 8394   %}
 8395 
 8396   ins_pipe(pipe_class_default);
 8397 %}
 8398 
 8399 instruct popCountL_mem(iRegINoSp dst, memory8 mem, vRegD tmp) %{
 8400   predicate(UsePopCountInstruction);
 8401   match(Set dst (PopCountL (LoadL mem)));
 8402   effect(TEMP tmp);
 8403   ins_cost(INSN_COST * 13);
 8404 
 8405   format %{ "ldrd   $tmp, $mem\n\t"
 8406             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8407             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8408             "mov    $dst, $tmp\t# vector (1D)" %}
 8409   ins_encode %{
 8410     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 8411     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
 8412               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 8413     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8414     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8415     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
 8416   %}
 8417 
 8418   ins_pipe(pipe_class_default);
 8419 %}
 8420 
 8421 // ============================================================================
 8422 // MemBar Instruction
 8423 
 8424 instruct load_fence() %{
 8425   match(LoadFence);
 8426   ins_cost(VOLATILE_REF_COST);
 8427 
 8428   format %{ "load_fence" %}
 8429 
 8430   ins_encode %{
 8431     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 8432   %}
 8433   ins_pipe(pipe_serial);
 8434 %}
 8435 
 8436 instruct unnecessary_membar_acquire() %{
 8437   predicate(unnecessary_acquire(n));
 8438   match(MemBarAcquire);
 8439   ins_cost(0);
 8440 
 8441   format %{ "membar_acquire (elided)" %}
 8442 
 8443   ins_encode %{
 8444     __ block_comment("membar_acquire (elided)");
 8445   %}
 8446 
 8447   ins_pipe(pipe_class_empty);
 8448 %}
 8449 
 8450 instruct membar_acquire() %{
 8451   match(MemBarAcquire);
 8452   ins_cost(VOLATILE_REF_COST);
 8453 
 8454   format %{ "membar_acquire\n\t"
 8455             "dmb ish" %}
 8456 
 8457   ins_encode %{
 8458     __ block_comment("membar_acquire");
 8459     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 8460   %}
 8461 
 8462   ins_pipe(pipe_serial);
 8463 %}
 8464 
 8465 
 8466 instruct membar_acquire_lock() %{
 8467   match(MemBarAcquireLock);
 8468   ins_cost(VOLATILE_REF_COST);
 8469 
 8470   format %{ "membar_acquire_lock (elided)" %}
 8471 
 8472   ins_encode %{
 8473     __ block_comment("membar_acquire_lock (elided)");
 8474   %}
 8475 
 8476   ins_pipe(pipe_serial);
 8477 %}
 8478 
 8479 instruct store_fence() %{
 8480   match(StoreFence);
 8481   ins_cost(VOLATILE_REF_COST);
 8482 
 8483   format %{ "store_fence" %}
 8484 
 8485   ins_encode %{
 8486     __ membar(Assembler::LoadStore|Assembler::StoreStore);
 8487   %}
 8488   ins_pipe(pipe_serial);
 8489 %}
 8490 
 8491 instruct unnecessary_membar_release() %{
 8492   predicate(unnecessary_release(n));
 8493   match(MemBarRelease);
 8494   ins_cost(0);
 8495 
 8496   format %{ "membar_release (elided)" %}
 8497 
 8498   ins_encode %{
 8499     __ block_comment("membar_release (elided)");
 8500   %}
 8501   ins_pipe(pipe_serial);
 8502 %}
 8503 
 8504 instruct membar_release() %{
 8505   match(MemBarRelease);
 8506   ins_cost(VOLATILE_REF_COST);
 8507 
 8508   format %{ "membar_release\n\t"
 8509             "dmb ish" %}
 8510 
 8511   ins_encode %{
 8512     __ block_comment("membar_release");
 8513     __ membar(Assembler::LoadStore|Assembler::StoreStore);
 8514   %}
 8515   ins_pipe(pipe_serial);
 8516 %}
 8517 
 8518 instruct membar_storestore() %{
 8519   match(MemBarStoreStore);
 8520   ins_cost(VOLATILE_REF_COST);
 8521 
 8522   format %{ "MEMBAR-store-store" %}
 8523 
 8524   ins_encode %{
 8525     __ membar(Assembler::StoreStore);
 8526   %}
 8527   ins_pipe(pipe_serial);
 8528 %}
 8529 
 8530 instruct membar_release_lock() %{
 8531   match(MemBarReleaseLock);
 8532   ins_cost(VOLATILE_REF_COST);
 8533 
 8534   format %{ "membar_release_lock (elided)" %}
 8535 
 8536   ins_encode %{
 8537     __ block_comment("membar_release_lock (elided)");
 8538   %}
 8539 
 8540   ins_pipe(pipe_serial);
 8541 %}
 8542 
 8543 instruct unnecessary_membar_volatile() %{
 8544   predicate(unnecessary_volatile(n));
 8545   match(MemBarVolatile);
 8546   ins_cost(0);
 8547 
 8548   format %{ "membar_volatile (elided)" %}
 8549 
 8550   ins_encode %{
 8551     __ block_comment("membar_volatile (elided)");
 8552   %}
 8553 
 8554   ins_pipe(pipe_serial);
 8555 %}
 8556 
 8557 instruct membar_volatile() %{
 8558   match(MemBarVolatile);
 8559   ins_cost(VOLATILE_REF_COST*100);
 8560 
 8561   format %{ "membar_volatile\n\t"
 8562              "dmb ish"%}
 8563 
 8564   ins_encode %{
 8565     __ block_comment("membar_volatile");
 8566     __ membar(Assembler::StoreLoad);
 8567   %}
 8568 
 8569   ins_pipe(pipe_serial);
 8570 %}
 8571 
 8572 // ============================================================================
 8573 // Cast/Convert Instructions
 8574 
 8575 instruct castX2P(iRegPNoSp dst, iRegL src) %{
 8576   match(Set dst (CastX2P src));
 8577 
 8578   ins_cost(INSN_COST);
 8579   format %{ "mov $dst, $src\t# long -> ptr" %}
 8580 
 8581   ins_encode %{
 8582     if ($dst$$reg != $src$$reg) {
 8583       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8584     }
 8585   %}
 8586 
 8587   ins_pipe(ialu_reg);
 8588 %}
 8589 
 8590 instruct castP2X(iRegLNoSp dst, iRegP src) %{
 8591   match(Set dst (CastP2X src));
 8592 
 8593   ins_cost(INSN_COST);
 8594   format %{ "mov $dst, $src\t# ptr -> long" %}
 8595 
 8596   ins_encode %{
 8597     if ($dst$$reg != $src$$reg) {
 8598       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8599     }
 8600   %}
 8601 
 8602   ins_pipe(ialu_reg);
 8603 %}
 8604 
 8605 // Convert oop into int for vectors alignment masking
 8606 instruct convP2I(iRegINoSp dst, iRegP src) %{
 8607   match(Set dst (ConvL2I (CastP2X src)));
 8608 
 8609   ins_cost(INSN_COST);
 8610   format %{ "movw $dst, $src\t# ptr -> int" %}
 8611   ins_encode %{
 8612     __ movw($dst$$Register, $src$$Register);
 8613   %}
 8614 
 8615   ins_pipe(ialu_reg);
 8616 %}
 8617 
 8618 // Convert compressed oop into int for vectors alignment masking
 8619 // in case of 32bit oops (heap < 4Gb).
 8620 instruct convN2I(iRegINoSp dst, iRegN src)
 8621 %{
 8622   predicate(CompressedOops::shift() == 0);
 8623   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
 8624 
 8625   ins_cost(INSN_COST);
 8626   format %{ "mov dst, $src\t# compressed ptr -> int" %}
 8627   ins_encode %{
 8628     __ movw($dst$$Register, $src$$Register);
 8629   %}
 8630 
 8631   ins_pipe(ialu_reg);
 8632 %}
 8633 
 8634 
 8635 // Convert oop pointer into compressed form
 8636 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8637   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
 8638   match(Set dst (EncodeP src));
 8639   effect(KILL cr);
 8640   ins_cost(INSN_COST * 3);
 8641   format %{ "encode_heap_oop $dst, $src" %}
 8642   ins_encode %{
 8643     Register s = $src$$Register;
 8644     Register d = $dst$$Register;
 8645     __ encode_heap_oop(d, s);
 8646   %}
 8647   ins_pipe(ialu_reg);
 8648 %}
 8649 
 8650 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8651   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
 8652   match(Set dst (EncodeP src));
 8653   ins_cost(INSN_COST * 3);
 8654   format %{ "encode_heap_oop_not_null $dst, $src" %}
 8655   ins_encode %{
 8656     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
 8657   %}
 8658   ins_pipe(ialu_reg);
 8659 %}
 8660 
 8661 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8662   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
 8663             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
 8664   match(Set dst (DecodeN src));
 8665   ins_cost(INSN_COST * 3);
 8666   format %{ "decode_heap_oop $dst, $src" %}
 8667   ins_encode %{
 8668     Register s = $src$$Register;
 8669     Register d = $dst$$Register;
 8670     __ decode_heap_oop(d, s);
 8671   %}
 8672   ins_pipe(ialu_reg);
 8673 %}
 8674 
 8675 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8676   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
 8677             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
 8678   match(Set dst (DecodeN src));
 8679   ins_cost(INSN_COST * 3);
 8680   format %{ "decode_heap_oop_not_null $dst, $src" %}
 8681   ins_encode %{
 8682     Register s = $src$$Register;
 8683     Register d = $dst$$Register;
 8684     __ decode_heap_oop_not_null(d, s);
 8685   %}
 8686   ins_pipe(ialu_reg);
 8687 %}
 8688 
 8689 // n.b. AArch64 implementations of encode_klass_not_null and
 8690 // decode_klass_not_null do not modify the flags register so, unlike
 8691 // Intel, we don't kill CR as a side effect here
 8692 
 8693 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
 8694   match(Set dst (EncodePKlass src));
 8695 
 8696   ins_cost(INSN_COST * 3);
 8697   format %{ "encode_klass_not_null $dst,$src" %}
 8698 
 8699   ins_encode %{
 8700     Register src_reg = as_Register($src$$reg);
 8701     Register dst_reg = as_Register($dst$$reg);
 8702     __ encode_klass_not_null(dst_reg, src_reg);
 8703   %}
 8704 
 8705    ins_pipe(ialu_reg);
 8706 %}
 8707 
 8708 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
 8709   match(Set dst (DecodeNKlass src));
 8710 
 8711   ins_cost(INSN_COST * 3);
 8712   format %{ "decode_klass_not_null $dst,$src" %}
 8713 
 8714   ins_encode %{
 8715     Register src_reg = as_Register($src$$reg);
 8716     Register dst_reg = as_Register($dst$$reg);
 8717     if (dst_reg != src_reg) {
 8718       __ decode_klass_not_null(dst_reg, src_reg);
 8719     } else {
 8720       __ decode_klass_not_null(dst_reg);
 8721     }
 8722   %}
 8723 
 8724    ins_pipe(ialu_reg);
 8725 %}
 8726 
 8727 instruct checkCastPP(iRegPNoSp dst)
 8728 %{
 8729   match(Set dst (CheckCastPP dst));
 8730 
 8731   size(0);
 8732   format %{ "# checkcastPP of $dst" %}
 8733   ins_encode(/* empty encoding */);
 8734   ins_pipe(pipe_class_empty);
 8735 %}
 8736 
 8737 instruct castPP(iRegPNoSp dst)
 8738 %{
 8739   match(Set dst (CastPP dst));
 8740 
 8741   size(0);
 8742   format %{ "# castPP of $dst" %}
 8743   ins_encode(/* empty encoding */);
 8744   ins_pipe(pipe_class_empty);
 8745 %}
 8746 
 8747 instruct castII(iRegI dst)
 8748 %{
 8749   match(Set dst (CastII dst));
 8750 
 8751   size(0);
 8752   format %{ "# castII of $dst" %}
 8753   ins_encode(/* empty encoding */);
 8754   ins_cost(0);
 8755   ins_pipe(pipe_class_empty);
 8756 %}
 8757 
 8758 // ============================================================================
 8759 // Atomic operation instructions
 8760 //
 8761 // Intel and SPARC both implement Ideal Node LoadPLocked and
 8762 // Store{PIL}Conditional instructions using a normal load for the
 8763 // LoadPLocked and a CAS for the Store{PIL}Conditional.
 8764 //
 8765 // The ideal code appears only to use LoadPLocked/StorePLocked as a
 8766 // pair to lock object allocations from Eden space when not using
 8767 // TLABs.
 8768 //
 8769 // There does not appear to be a Load{IL}Locked Ideal Node and the
 8770 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
 8771 // and to use StoreIConditional only for 32-bit and StoreLConditional
 8772 // only for 64-bit.
 8773 //
 8774 // We implement LoadPLocked and StorePLocked instructions using,
 8775 // respectively the AArch64 hw load-exclusive and store-conditional
 8776 // instructions. Whereas we must implement each of
 8777 // Store{IL}Conditional using a CAS which employs a pair of
 8778 // instructions comprising a load-exclusive followed by a
 8779 // store-conditional.
 8780 
 8781 
 8782 // Locked-load (linked load) of the current heap-top
 8783 // used when updating the eden heap top
 8784 // implemented using ldaxr on AArch64
 8785 
 8786 instruct loadPLocked(iRegPNoSp dst, indirect mem)
 8787 %{
 8788   match(Set dst (LoadPLocked mem));
 8789 
 8790   ins_cost(VOLATILE_REF_COST);
 8791 
 8792   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
 8793 
 8794   ins_encode(aarch64_enc_ldaxr(dst, mem));
 8795 
 8796   ins_pipe(pipe_serial);
 8797 %}
 8798 
 8799 // Conditional-store of the updated heap-top.
 8800 // Used during allocation of the shared heap.
 8801 // Sets flag (EQ) on success.
 8802 // implemented using stlxr on AArch64.
 8803 
 8804 instruct storePConditional(memory8 heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
 8805 %{
 8806   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
 8807 
 8808   ins_cost(VOLATILE_REF_COST);
 8809 
 8810  // TODO
 8811  // do we need to do a store-conditional release or can we just use a
 8812  // plain store-conditional?
 8813 
 8814   format %{
 8815     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
 8816     "cmpw rscratch1, zr\t# EQ on successful write"
 8817   %}
 8818 
 8819   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
 8820 
 8821   ins_pipe(pipe_serial);
 8822 %}
 8823 
 8824 
 8825 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
 8826 // when attempting to rebias a lock towards the current thread.  We
 8827 // must use the acquire form of cmpxchg in order to guarantee acquire
 8828 // semantics in this case.
 8829 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
 8830 %{
 8831   match(Set cr (StoreLConditional mem (Binary oldval newval)));
 8832 
 8833   ins_cost(VOLATILE_REF_COST);
 8834 
 8835   format %{
 8836     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
 8837     "cmpw rscratch1, zr\t# EQ on successful write"
 8838   %}
 8839 
 8840   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
 8841 
 8842   ins_pipe(pipe_slow);
 8843 %}
 8844 
 8845 // storeIConditional also has acquire semantics, for no better reason
 8846 // than matching storeLConditional.  At the time of writing this
 8847 // comment storeIConditional was not used anywhere by AArch64.
 8848 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
 8849 %{
 8850   match(Set cr (StoreIConditional mem (Binary oldval newval)));
 8851 
 8852   ins_cost(VOLATILE_REF_COST);
 8853 
 8854   format %{
 8855     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
 8856     "cmpw rscratch1, zr\t# EQ on successful write"
 8857   %}
 8858 
 8859   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
 8860 
 8861   ins_pipe(pipe_slow);
 8862 %}
 8863 
 8864 // standard CompareAndSwapX when we are using barriers
 8865 // these have higher priority than the rules selected by a predicate
 8866 
 8867 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
 8868 // can't match them
 8869 
 8870 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8871 
 8872   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 8873   ins_cost(2 * VOLATILE_REF_COST);
 8874 
 8875   effect(KILL cr);
 8876 
 8877   format %{
 8878     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8879     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8880   %}
 8881 
 8882   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
 8883             aarch64_enc_cset_eq(res));
 8884 
 8885   ins_pipe(pipe_slow);
 8886 %}
 8887 
 8888 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8889 
 8890   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 8891   ins_cost(2 * VOLATILE_REF_COST);
 8892 
 8893   effect(KILL cr);
 8894 
 8895   format %{
 8896     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8897     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8898   %}
 8899 
 8900   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
 8901             aarch64_enc_cset_eq(res));
 8902 
 8903   ins_pipe(pipe_slow);
 8904 %}
 8905 
 8906 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8907 
 8908   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 8909   ins_cost(2 * VOLATILE_REF_COST);
 8910 
 8911   effect(KILL cr);
 8912 
 8913  format %{
 8914     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8915     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8916  %}
 8917 
 8918  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 8919             aarch64_enc_cset_eq(res));
 8920 
 8921   ins_pipe(pipe_slow);
 8922 %}
 8923 
 8924 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 8925 
 8926   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 8927   ins_cost(2 * VOLATILE_REF_COST);
 8928 
 8929   effect(KILL cr);
 8930 
 8931  format %{
 8932     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 8933     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8934  %}
 8935 
 8936  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 8937             aarch64_enc_cset_eq(res));
 8938 
 8939   ins_pipe(pipe_slow);
 8940 %}
 8941 
 8942 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8943 
 8944   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 8945   predicate(n->as_LoadStore()->barrier_data() == 0);
 8946   ins_cost(2 * VOLATILE_REF_COST);
 8947 
 8948   effect(KILL cr);
 8949 
 8950  format %{
 8951     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 8952     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8953  %}
 8954 
 8955  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 8956             aarch64_enc_cset_eq(res));
 8957 
 8958   ins_pipe(pipe_slow);
 8959 %}
 8960 
 8961 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 8962 
 8963   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 8964   ins_cost(2 * VOLATILE_REF_COST);
 8965 
 8966   effect(KILL cr);
 8967 
 8968  format %{
 8969     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 8970     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8971  %}
 8972 
 8973  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 8974             aarch64_enc_cset_eq(res));
 8975 
 8976   ins_pipe(pipe_slow);
 8977 %}
 8978 
 8979 // alternative CompareAndSwapX when we are eliding barriers
 8980 
 8981 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8982 
 8983   predicate(needs_acquiring_load_exclusive(n));
 8984   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 8985   ins_cost(VOLATILE_REF_COST);
 8986 
 8987   effect(KILL cr);
 8988 
 8989   format %{
 8990     "cmpxchgb_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8991     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8992   %}
 8993 
 8994   ins_encode(aarch64_enc_cmpxchgb_acq(mem, oldval, newval),
 8995             aarch64_enc_cset_eq(res));
 8996 
 8997   ins_pipe(pipe_slow);
 8998 %}
 8999 
 9000 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 9001 
 9002   predicate(needs_acquiring_load_exclusive(n));
 9003   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 9004   ins_cost(VOLATILE_REF_COST);
 9005 
 9006   effect(KILL cr);
 9007 
 9008   format %{
 9009     "cmpxchgs_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 9010     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9011   %}
 9012 
 9013   ins_encode(aarch64_enc_cmpxchgs_acq(mem, oldval, newval),
 9014             aarch64_enc_cset_eq(res));
 9015 
 9016   ins_pipe(pipe_slow);
 9017 %}
 9018 
 9019 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 9020 
 9021   predicate(needs_acquiring_load_exclusive(n));
 9022   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 9023   ins_cost(VOLATILE_REF_COST);
 9024 
 9025   effect(KILL cr);
 9026 
 9027  format %{
 9028     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 9029     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9030  %}
 9031 
 9032  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 9033             aarch64_enc_cset_eq(res));
 9034 
 9035   ins_pipe(pipe_slow);
 9036 %}
 9037 
 9038 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 9039 
 9040   predicate(needs_acquiring_load_exclusive(n));
 9041   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 9042   ins_cost(VOLATILE_REF_COST);
 9043 
 9044   effect(KILL cr);
 9045 
 9046  format %{
 9047     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 9048     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9049  %}
 9050 
 9051  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 9052             aarch64_enc_cset_eq(res));
 9053 
 9054   ins_pipe(pipe_slow);
 9055 %}
 9056 
 9057 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9058 
 9059   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9060   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 9061   ins_cost(VOLATILE_REF_COST);
 9062 
 9063   effect(KILL cr);
 9064 
 9065  format %{
 9066     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 9067     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9068  %}
 9069 
 9070  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 9071             aarch64_enc_cset_eq(res));
 9072 
 9073   ins_pipe(pipe_slow);
 9074 %}
 9075 
 9076 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 9077 
 9078   predicate(needs_acquiring_load_exclusive(n));
 9079   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 9080   ins_cost(VOLATILE_REF_COST);
 9081 
 9082   effect(KILL cr);
 9083 
 9084  format %{
 9085     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 9086     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9087  %}
 9088 
 9089  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 9090             aarch64_enc_cset_eq(res));
 9091 
 9092   ins_pipe(pipe_slow);
 9093 %}
 9094 
 9095 
 9096 // ---------------------------------------------------------------------
 9097 
 9098 
 9099 // BEGIN This section of the file is automatically generated. Do not edit --------------
 9100 
 9101 // Sundry CAS operations.  Note that release is always true,
 9102 // regardless of the memory ordering of the CAS.  This is because we
 9103 // need the volatile case to be sequentially consistent but there is
 9104 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
 9105 // can't check the type of memory ordering here, so we always emit a
 9106 // STLXR.
 9107 
 9108 // This section is generated from aarch64_ad_cas.m4
 9109 
 9110 
 9111 
 9112 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9113   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 9114   ins_cost(2 * VOLATILE_REF_COST);
 9115   effect(TEMP_DEF res, KILL cr);
 9116   format %{
 9117     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9118   %}
 9119   ins_encode %{
 9120     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9121                Assembler::byte, /*acquire*/ false, /*release*/ true,
 9122                /*weak*/ false, $res$$Register);
 9123     __ sxtbw($res$$Register, $res$$Register);
 9124   %}
 9125   ins_pipe(pipe_slow);
 9126 %}
 9127 
 9128 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9129   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 9130   ins_cost(2 * VOLATILE_REF_COST);
 9131   effect(TEMP_DEF res, KILL cr);
 9132   format %{
 9133     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9134   %}
 9135   ins_encode %{
 9136     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9137                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 9138                /*weak*/ false, $res$$Register);
 9139     __ sxthw($res$$Register, $res$$Register);
 9140   %}
 9141   ins_pipe(pipe_slow);
 9142 %}
 9143 
 9144 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9145   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 9146   ins_cost(2 * VOLATILE_REF_COST);
 9147   effect(TEMP_DEF res, KILL cr);
 9148   format %{
 9149     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9150   %}
 9151   ins_encode %{
 9152     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9153                Assembler::word, /*acquire*/ false, /*release*/ true,
 9154                /*weak*/ false, $res$$Register);
 9155   %}
 9156   ins_pipe(pipe_slow);
 9157 %}
 9158 
 9159 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9160   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 9161   ins_cost(2 * VOLATILE_REF_COST);
 9162   effect(TEMP_DEF res, KILL cr);
 9163   format %{
 9164     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9165   %}
 9166   ins_encode %{
 9167     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9168                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9169                /*weak*/ false, $res$$Register);
 9170   %}
 9171   ins_pipe(pipe_slow);
 9172 %}
 9173 
 9174 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9175   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 9176   ins_cost(2 * VOLATILE_REF_COST);
 9177   effect(TEMP_DEF res, KILL cr);
 9178   format %{
 9179     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9180   %}
 9181   ins_encode %{
 9182     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9183                Assembler::word, /*acquire*/ false, /*release*/ true,
 9184                /*weak*/ false, $res$$Register);
 9185   %}
 9186   ins_pipe(pipe_slow);
 9187 %}
 9188 
 9189 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9190   predicate(n->as_LoadStore()->barrier_data() == 0);
 9191   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 9192   ins_cost(2 * VOLATILE_REF_COST);
 9193   effect(TEMP_DEF res, KILL cr);
 9194   format %{
 9195     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9196   %}
 9197   ins_encode %{
 9198     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9199                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9200                /*weak*/ false, $res$$Register);
 9201   %}
 9202   ins_pipe(pipe_slow);
 9203 %}
 9204 
 9205 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9206   predicate(needs_acquiring_load_exclusive(n));
 9207   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 9208   ins_cost(VOLATILE_REF_COST);
 9209   effect(TEMP_DEF res, KILL cr);
 9210   format %{
 9211     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9212   %}
 9213   ins_encode %{
 9214     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9215                Assembler::byte, /*acquire*/ true, /*release*/ true,
 9216                /*weak*/ false, $res$$Register);
 9217     __ sxtbw($res$$Register, $res$$Register);
 9218   %}
 9219   ins_pipe(pipe_slow);
 9220 %}
 9221 
 9222 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9223   predicate(needs_acquiring_load_exclusive(n));
 9224   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 9225   ins_cost(VOLATILE_REF_COST);
 9226   effect(TEMP_DEF res, KILL cr);
 9227   format %{
 9228     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9229   %}
 9230   ins_encode %{
 9231     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9232                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 9233                /*weak*/ false, $res$$Register);
 9234     __ sxthw($res$$Register, $res$$Register);
 9235   %}
 9236   ins_pipe(pipe_slow);
 9237 %}
 9238 
 9239 
 9240 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9241   predicate(needs_acquiring_load_exclusive(n));
 9242   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 9243   ins_cost(VOLATILE_REF_COST);
 9244   effect(TEMP_DEF res, KILL cr);
 9245   format %{
 9246     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9247   %}
 9248   ins_encode %{
 9249     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9250                Assembler::word, /*acquire*/ true, /*release*/ true,
 9251                /*weak*/ false, $res$$Register);
 9252   %}
 9253   ins_pipe(pipe_slow);
 9254 %}
 9255 
 9256 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9257   predicate(needs_acquiring_load_exclusive(n));
 9258   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 9259   ins_cost(VOLATILE_REF_COST);
 9260   effect(TEMP_DEF res, KILL cr);
 9261   format %{
 9262     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9263   %}
 9264   ins_encode %{
 9265     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9266                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9267                /*weak*/ false, $res$$Register);
 9268   %}
 9269   ins_pipe(pipe_slow);
 9270 %}
 9271 
 9272 
 9273 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9274   predicate(needs_acquiring_load_exclusive(n));
 9275   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 9276   ins_cost(VOLATILE_REF_COST);
 9277   effect(TEMP_DEF res, KILL cr);
 9278   format %{
 9279     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9280   %}
 9281   ins_encode %{
 9282     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9283                Assembler::word, /*acquire*/ true, /*release*/ true,
 9284                /*weak*/ false, $res$$Register);
 9285   %}
 9286   ins_pipe(pipe_slow);
 9287 %}
 9288 
 9289 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9290   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9291   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 9292   ins_cost(VOLATILE_REF_COST);
 9293   effect(TEMP_DEF res, KILL cr);
 9294   format %{
 9295     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9296   %}
 9297   ins_encode %{
 9298     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9299                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9300                /*weak*/ false, $res$$Register);
 9301   %}
 9302   ins_pipe(pipe_slow);
 9303 %}
 9304 
 9305 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9306   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 9307   ins_cost(2 * VOLATILE_REF_COST);
 9308   effect(KILL cr);
 9309   format %{
 9310     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9311     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9312   %}
 9313   ins_encode %{
 9314     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9315                Assembler::byte, /*acquire*/ false, /*release*/ true,
 9316                /*weak*/ true, noreg);
 9317     __ csetw($res$$Register, Assembler::EQ);
 9318   %}
 9319   ins_pipe(pipe_slow);
 9320 %}
 9321 
 9322 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9323   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 9324   ins_cost(2 * VOLATILE_REF_COST);
 9325   effect(KILL cr);
 9326   format %{
 9327     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9328     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9329   %}
 9330   ins_encode %{
 9331     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9332                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 9333                /*weak*/ true, noreg);
 9334     __ csetw($res$$Register, Assembler::EQ);
 9335   %}
 9336   ins_pipe(pipe_slow);
 9337 %}
 9338 
 9339 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9340   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 9341   ins_cost(2 * VOLATILE_REF_COST);
 9342   effect(KILL cr);
 9343   format %{
 9344     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9345     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9346   %}
 9347   ins_encode %{
 9348     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9349                Assembler::word, /*acquire*/ false, /*release*/ true,
 9350                /*weak*/ true, noreg);
 9351     __ csetw($res$$Register, Assembler::EQ);
 9352   %}
 9353   ins_pipe(pipe_slow);
 9354 %}
 9355 
 9356 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9357   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 9358   ins_cost(2 * VOLATILE_REF_COST);
 9359   effect(KILL cr);
 9360   format %{
 9361     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9362     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9363   %}
 9364   ins_encode %{
 9365     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9366                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9367                /*weak*/ true, noreg);
 9368     __ csetw($res$$Register, Assembler::EQ);
 9369   %}
 9370   ins_pipe(pipe_slow);
 9371 %}
 9372 
 9373 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9374   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 9375   ins_cost(2 * VOLATILE_REF_COST);
 9376   effect(KILL cr);
 9377   format %{
 9378     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9379     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9380   %}
 9381   ins_encode %{
 9382     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9383                Assembler::word, /*acquire*/ false, /*release*/ true,
 9384                /*weak*/ true, noreg);
 9385     __ csetw($res$$Register, Assembler::EQ);
 9386   %}
 9387   ins_pipe(pipe_slow);
 9388 %}
 9389 
 9390 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9391   predicate(n->as_LoadStore()->barrier_data() == 0);
 9392   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 9393   ins_cost(2 * VOLATILE_REF_COST);
 9394   effect(KILL cr);
 9395   format %{
 9396     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9397     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9398   %}
 9399   ins_encode %{
 9400     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9401                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9402                /*weak*/ true, noreg);
 9403     __ csetw($res$$Register, Assembler::EQ);
 9404   %}
 9405   ins_pipe(pipe_slow);
 9406 %}
 9407 
 9408 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9409   predicate(needs_acquiring_load_exclusive(n));
 9410   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 9411   ins_cost(VOLATILE_REF_COST);
 9412   effect(KILL cr);
 9413   format %{
 9414     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9415     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9416   %}
 9417   ins_encode %{
 9418     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9419                Assembler::byte, /*acquire*/ true, /*release*/ true,
 9420                /*weak*/ true, noreg);
 9421     __ csetw($res$$Register, Assembler::EQ);
 9422   %}
 9423   ins_pipe(pipe_slow);
 9424 %}
 9425 
 9426 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9427   predicate(needs_acquiring_load_exclusive(n));
 9428   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 9429   ins_cost(VOLATILE_REF_COST);
 9430   effect(KILL cr);
 9431   format %{
 9432     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9433     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9434   %}
 9435   ins_encode %{
 9436     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9437                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 9438                /*weak*/ true, noreg);
 9439     __ csetw($res$$Register, Assembler::EQ);
 9440   %}
 9441   ins_pipe(pipe_slow);
 9442 %}
 9443 
 9444 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9445   predicate(needs_acquiring_load_exclusive(n));
 9446   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 9447   ins_cost(VOLATILE_REF_COST);
 9448   effect(KILL cr);
 9449   format %{
 9450     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9451     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9452   %}
 9453   ins_encode %{
 9454     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9455                Assembler::word, /*acquire*/ true, /*release*/ true,
 9456                /*weak*/ true, noreg);
 9457     __ csetw($res$$Register, Assembler::EQ);
 9458   %}
 9459   ins_pipe(pipe_slow);
 9460 %}
 9461 
 9462 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9463   predicate(needs_acquiring_load_exclusive(n));
 9464   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 9465   ins_cost(VOLATILE_REF_COST);
 9466   effect(KILL cr);
 9467   format %{
 9468     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9469     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9470   %}
 9471   ins_encode %{
 9472     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9473                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9474                /*weak*/ true, noreg);
 9475     __ csetw($res$$Register, Assembler::EQ);
 9476   %}
 9477   ins_pipe(pipe_slow);
 9478 %}
 9479 
 9480 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9481   predicate(needs_acquiring_load_exclusive(n));
 9482   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 9483   ins_cost(VOLATILE_REF_COST);
 9484   effect(KILL cr);
 9485   format %{
 9486     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9487     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9488   %}
 9489   ins_encode %{
 9490     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9491                Assembler::word, /*acquire*/ true, /*release*/ true,
 9492                /*weak*/ true, noreg);
 9493     __ csetw($res$$Register, Assembler::EQ);
 9494   %}
 9495   ins_pipe(pipe_slow);
 9496 %}
 9497 
 9498 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9499   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 9500   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9501   ins_cost(VOLATILE_REF_COST);
 9502   effect(KILL cr);
 9503   format %{
 9504     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9505     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9506   %}
 9507   ins_encode %{
 9508     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9509                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9510                /*weak*/ true, noreg);
 9511     __ csetw($res$$Register, Assembler::EQ);
 9512   %}
 9513   ins_pipe(pipe_slow);
 9514 %}
 9515 
 9516 // END This section of the file is automatically generated. Do not edit --------------
 9517 // ---------------------------------------------------------------------
 9518 
 9519 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
 9520   match(Set prev (GetAndSetI mem newv));
 9521   ins_cost(2 * VOLATILE_REF_COST);
 9522   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
 9523   ins_encode %{
 9524     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9525   %}
 9526   ins_pipe(pipe_serial);
 9527 %}
 9528 
 9529 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
 9530   match(Set prev (GetAndSetL mem newv));
 9531   ins_cost(2 * VOLATILE_REF_COST);
 9532   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
 9533   ins_encode %{
 9534     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9535   %}
 9536   ins_pipe(pipe_serial);
 9537 %}
 9538 
 9539 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
 9540   match(Set prev (GetAndSetN mem newv));
 9541   ins_cost(2 * VOLATILE_REF_COST);
 9542   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
 9543   ins_encode %{
 9544     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9545   %}
 9546   ins_pipe(pipe_serial);
 9547 %}
 9548 
 9549 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
 9550   predicate(n->as_LoadStore()->barrier_data() == 0);
 9551   match(Set prev (GetAndSetP mem newv));
 9552   ins_cost(2 * VOLATILE_REF_COST);
 9553   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
 9554   ins_encode %{
 9555     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9556   %}
 9557   ins_pipe(pipe_serial);
 9558 %}
 9559 
 9560 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
 9561   predicate(needs_acquiring_load_exclusive(n));
 9562   match(Set prev (GetAndSetI mem newv));
 9563   ins_cost(VOLATILE_REF_COST);
 9564   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]" %}
 9565   ins_encode %{
 9566     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9567   %}
 9568   ins_pipe(pipe_serial);
 9569 %}
 9570 
 9571 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
 9572   predicate(needs_acquiring_load_exclusive(n));
 9573   match(Set prev (GetAndSetL mem newv));
 9574   ins_cost(VOLATILE_REF_COST);
 9575   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
 9576   ins_encode %{
 9577     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9578   %}
 9579   ins_pipe(pipe_serial);
 9580 %}
 9581 
 9582 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
 9583   predicate(needs_acquiring_load_exclusive(n));
 9584   match(Set prev (GetAndSetN mem newv));
 9585   ins_cost(VOLATILE_REF_COST);
 9586   format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
 9587   ins_encode %{
 9588     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9589   %}
 9590   ins_pipe(pipe_serial);
 9591 %}
 9592 
 9593 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
 9594   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9595   match(Set prev (GetAndSetP mem newv));
 9596   ins_cost(VOLATILE_REF_COST);
 9597   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
 9598   ins_encode %{
 9599     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9600   %}
 9601   ins_pipe(pipe_serial);
 9602 %}
 9603 
 9604 
 9605 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
 9606   match(Set newval (GetAndAddL mem incr));
 9607   ins_cost(2 * VOLATILE_REF_COST + 1);
 9608   format %{ "get_and_addL $newval, [$mem], $incr" %}
 9609   ins_encode %{
 9610     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9611   %}
 9612   ins_pipe(pipe_serial);
 9613 %}
 9614 
 9615 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
 9616   predicate(n->as_LoadStore()->result_not_used());
 9617   match(Set dummy (GetAndAddL mem incr));
 9618   ins_cost(2 * VOLATILE_REF_COST);
 9619   format %{ "get_and_addL [$mem], $incr" %}
 9620   ins_encode %{
 9621     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
 9622   %}
 9623   ins_pipe(pipe_serial);
 9624 %}
 9625 
 9626 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
 9627   match(Set newval (GetAndAddL mem incr));
 9628   ins_cost(2 * VOLATILE_REF_COST + 1);
 9629   format %{ "get_and_addL $newval, [$mem], $incr" %}
 9630   ins_encode %{
 9631     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9632   %}
 9633   ins_pipe(pipe_serial);
 9634 %}
 9635 
 9636 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
 9637   predicate(n->as_LoadStore()->result_not_used());
 9638   match(Set dummy (GetAndAddL mem incr));
 9639   ins_cost(2 * VOLATILE_REF_COST);
 9640   format %{ "get_and_addL [$mem], $incr" %}
 9641   ins_encode %{
 9642     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
 9643   %}
 9644   ins_pipe(pipe_serial);
 9645 %}
 9646 
 9647 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
 9648   match(Set newval (GetAndAddI mem incr));
 9649   ins_cost(2 * VOLATILE_REF_COST + 1);
 9650   format %{ "get_and_addI $newval, [$mem], $incr" %}
 9651   ins_encode %{
 9652     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9653   %}
 9654   ins_pipe(pipe_serial);
 9655 %}
 9656 
 9657 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
 9658   predicate(n->as_LoadStore()->result_not_used());
 9659   match(Set dummy (GetAndAddI mem incr));
 9660   ins_cost(2 * VOLATILE_REF_COST);
 9661   format %{ "get_and_addI [$mem], $incr" %}
 9662   ins_encode %{
 9663     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
 9664   %}
 9665   ins_pipe(pipe_serial);
 9666 %}
 9667 
 9668 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
 9669   match(Set newval (GetAndAddI mem incr));
 9670   ins_cost(2 * VOLATILE_REF_COST + 1);
 9671   format %{ "get_and_addI $newval, [$mem], $incr" %}
 9672   ins_encode %{
 9673     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9674   %}
 9675   ins_pipe(pipe_serial);
 9676 %}
 9677 
 9678 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
 9679   predicate(n->as_LoadStore()->result_not_used());
 9680   match(Set dummy (GetAndAddI mem incr));
 9681   ins_cost(2 * VOLATILE_REF_COST);
 9682   format %{ "get_and_addI [$mem], $incr" %}
 9683   ins_encode %{
 9684     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
 9685   %}
 9686   ins_pipe(pipe_serial);
 9687 %}
 9688 
 9689 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
 9690   predicate(needs_acquiring_load_exclusive(n));
 9691   match(Set newval (GetAndAddL mem incr));
 9692   ins_cost(VOLATILE_REF_COST + 1);
 9693   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
 9694   ins_encode %{
 9695     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9696   %}
 9697   ins_pipe(pipe_serial);
 9698 %}
 9699 
 9700 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
 9701   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9702   match(Set dummy (GetAndAddL mem incr));
 9703   ins_cost(VOLATILE_REF_COST);
 9704   format %{ "get_and_addL_acq [$mem], $incr" %}
 9705   ins_encode %{
 9706     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
 9707   %}
 9708   ins_pipe(pipe_serial);
 9709 %}
 9710 
 9711 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
 9712   predicate(needs_acquiring_load_exclusive(n));
 9713   match(Set newval (GetAndAddL mem incr));
 9714   ins_cost(VOLATILE_REF_COST + 1);
 9715   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
 9716   ins_encode %{
 9717     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9718   %}
 9719   ins_pipe(pipe_serial);
 9720 %}
 9721 
 9722 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
 9723   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9724   match(Set dummy (GetAndAddL mem incr));
 9725   ins_cost(VOLATILE_REF_COST);
 9726   format %{ "get_and_addL_acq [$mem], $incr" %}
 9727   ins_encode %{
 9728     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
 9729   %}
 9730   ins_pipe(pipe_serial);
 9731 %}
 9732 
 9733 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
 9734   predicate(needs_acquiring_load_exclusive(n));
 9735   match(Set newval (GetAndAddI mem incr));
 9736   ins_cost(VOLATILE_REF_COST + 1);
 9737   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
 9738   ins_encode %{
 9739     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9740   %}
 9741   ins_pipe(pipe_serial);
 9742 %}
 9743 
 9744 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
 9745   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9746   match(Set dummy (GetAndAddI mem incr));
 9747   ins_cost(VOLATILE_REF_COST);
 9748   format %{ "get_and_addI_acq [$mem], $incr" %}
 9749   ins_encode %{
 9750     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
 9751   %}
 9752   ins_pipe(pipe_serial);
 9753 %}
 9754 
 9755 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
 9756   predicate(needs_acquiring_load_exclusive(n));
 9757   match(Set newval (GetAndAddI mem incr));
 9758   ins_cost(VOLATILE_REF_COST + 1);
 9759   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
 9760   ins_encode %{
 9761     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9762   %}
 9763   ins_pipe(pipe_serial);
 9764 %}
 9765 
 9766 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
 9767   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9768   match(Set dummy (GetAndAddI mem incr));
 9769   ins_cost(VOLATILE_REF_COST);
 9770   format %{ "get_and_addI_acq [$mem], $incr" %}
 9771   ins_encode %{
 9772     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
 9773   %}
 9774   ins_pipe(pipe_serial);
 9775 %}
 9776 
 9777 // Manifest a CmpL result in an integer register.
 9778 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9779 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
 9780 %{
 9781   match(Set dst (CmpL3 src1 src2));
 9782   effect(KILL flags);
 9783 
 9784   ins_cost(INSN_COST * 6);
 9785   format %{
 9786       "cmp $src1, $src2"
 9787       "csetw $dst, ne"
 9788       "cnegw $dst, lt"
 9789   %}
 9790   // format %{ "CmpL3 $dst, $src1, $src2" %}
 9791   ins_encode %{
 9792     __ cmp($src1$$Register, $src2$$Register);
 9793     __ csetw($dst$$Register, Assembler::NE);
 9794     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 9795   %}
 9796 
 9797   ins_pipe(pipe_class_default);
 9798 %}
 9799 
 9800 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
 9801 %{
 9802   match(Set dst (CmpL3 src1 src2));
 9803   effect(KILL flags);
 9804 
 9805   ins_cost(INSN_COST * 6);
 9806   format %{
 9807       "cmp $src1, $src2"
 9808       "csetw $dst, ne"
 9809       "cnegw $dst, lt"
 9810   %}
 9811   ins_encode %{
 9812     int32_t con = (int32_t)$src2$$constant;
 9813      if (con < 0) {
 9814       __ adds(zr, $src1$$Register, -con);
 9815     } else {
 9816       __ subs(zr, $src1$$Register, con);
 9817     }
 9818     __ csetw($dst$$Register, Assembler::NE);
 9819     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 9820   %}
 9821 
 9822   ins_pipe(pipe_class_default);
 9823 %}
 9824 
 9825 // ============================================================================
 9826 // Conditional Move Instructions
 9827 
 9828 // n.b. we have identical rules for both a signed compare op (cmpOp)
 9829 // and an unsigned compare op (cmpOpU). it would be nice if we could
 9830 // define an op class which merged both inputs and use it to type the
 9831 // argument to a single rule. unfortunatelyt his fails because the
 9832 // opclass does not live up to the COND_INTER interface of its
 9833 // component operands. When the generic code tries to negate the
 9834 // operand it ends up running the generci Machoper::negate method
 9835 // which throws a ShouldNotHappen. So, we have to provide two flavours
 9836 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
 9837 
 9838 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9839   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 9840 
 9841   ins_cost(INSN_COST * 2);
 9842   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
 9843 
 9844   ins_encode %{
 9845     __ cselw(as_Register($dst$$reg),
 9846              as_Register($src2$$reg),
 9847              as_Register($src1$$reg),
 9848              (Assembler::Condition)$cmp$$cmpcode);
 9849   %}
 9850 
 9851   ins_pipe(icond_reg_reg);
 9852 %}
 9853 
 9854 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9855   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 9856 
 9857   ins_cost(INSN_COST * 2);
 9858   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
 9859 
 9860   ins_encode %{
 9861     __ cselw(as_Register($dst$$reg),
 9862              as_Register($src2$$reg),
 9863              as_Register($src1$$reg),
 9864              (Assembler::Condition)$cmp$$cmpcode);
 9865   %}
 9866 
 9867   ins_pipe(icond_reg_reg);
 9868 %}
 9869 
 9870 // special cases where one arg is zero
 9871 
 9872 // n.b. this is selected in preference to the rule above because it
 9873 // avoids loading constant 0 into a source register
 9874 
 9875 // TODO
 9876 // we ought only to be able to cull one of these variants as the ideal
 9877 // transforms ought always to order the zero consistently (to left/right?)
 9878 
 9879 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
 9880   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
 9881 
 9882   ins_cost(INSN_COST * 2);
 9883   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
 9884 
 9885   ins_encode %{
 9886     __ cselw(as_Register($dst$$reg),
 9887              as_Register($src$$reg),
 9888              zr,
 9889              (Assembler::Condition)$cmp$$cmpcode);
 9890   %}
 9891 
 9892   ins_pipe(icond_reg);
 9893 %}
 9894 
 9895 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
 9896   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
 9897 
 9898   ins_cost(INSN_COST * 2);
 9899   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
 9900 
 9901   ins_encode %{
 9902     __ cselw(as_Register($dst$$reg),
 9903              as_Register($src$$reg),
 9904              zr,
 9905              (Assembler::Condition)$cmp$$cmpcode);
 9906   %}
 9907 
 9908   ins_pipe(icond_reg);
 9909 %}
 9910 
 9911 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 9912   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
 9913 
 9914   ins_cost(INSN_COST * 2);
 9915   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
 9916 
 9917   ins_encode %{
 9918     __ cselw(as_Register($dst$$reg),
 9919              zr,
 9920              as_Register($src$$reg),
 9921              (Assembler::Condition)$cmp$$cmpcode);
 9922   %}
 9923 
 9924   ins_pipe(icond_reg);
 9925 %}
 9926 
 9927 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 9928   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
 9929 
 9930   ins_cost(INSN_COST * 2);
 9931   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
 9932 
 9933   ins_encode %{
 9934     __ cselw(as_Register($dst$$reg),
 9935              zr,
 9936              as_Register($src$$reg),
 9937              (Assembler::Condition)$cmp$$cmpcode);
 9938   %}
 9939 
 9940   ins_pipe(icond_reg);
 9941 %}
 9942 
 9943 // special case for creating a boolean 0 or 1
 9944 
 9945 // n.b. this is selected in preference to the rule above because it
 9946 // avoids loading constants 0 and 1 into a source register
 9947 
 9948 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
 9949   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
 9950 
 9951   ins_cost(INSN_COST * 2);
 9952   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
 9953 
 9954   ins_encode %{
 9955     // equivalently
 9956     // cset(as_Register($dst$$reg),
 9957     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
 9958     __ csincw(as_Register($dst$$reg),
 9959              zr,
 9960              zr,
 9961              (Assembler::Condition)$cmp$$cmpcode);
 9962   %}
 9963 
 9964   ins_pipe(icond_none);
 9965 %}
 9966 
 9967 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
 9968   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
 9969 
 9970   ins_cost(INSN_COST * 2);
 9971   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
 9972 
 9973   ins_encode %{
 9974     // equivalently
 9975     // cset(as_Register($dst$$reg),
 9976     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
 9977     __ csincw(as_Register($dst$$reg),
 9978              zr,
 9979              zr,
 9980              (Assembler::Condition)$cmp$$cmpcode);
 9981   %}
 9982 
 9983   ins_pipe(icond_none);
 9984 %}
 9985 
 9986 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9987   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
 9988 
 9989   ins_cost(INSN_COST * 2);
 9990   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
 9991 
 9992   ins_encode %{
 9993     __ csel(as_Register($dst$$reg),
 9994             as_Register($src2$$reg),
 9995             as_Register($src1$$reg),
 9996             (Assembler::Condition)$cmp$$cmpcode);
 9997   %}
 9998 
 9999   ins_pipe(icond_reg_reg);
10000 %}
10001 
10002 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
10003   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
10004 
10005   ins_cost(INSN_COST * 2);
10006   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
10007 
10008   ins_encode %{
10009     __ csel(as_Register($dst$$reg),
10010             as_Register($src2$$reg),
10011             as_Register($src1$$reg),
10012             (Assembler::Condition)$cmp$$cmpcode);
10013   %}
10014 
10015   ins_pipe(icond_reg_reg);
10016 %}
10017 
10018 // special cases where one arg is zero
10019 
10020 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
10021   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
10022 
10023   ins_cost(INSN_COST * 2);
10024   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
10025 
10026   ins_encode %{
10027     __ csel(as_Register($dst$$reg),
10028             zr,
10029             as_Register($src$$reg),
10030             (Assembler::Condition)$cmp$$cmpcode);
10031   %}
10032 
10033   ins_pipe(icond_reg);
10034 %}
10035 
10036 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
10037   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
10038 
10039   ins_cost(INSN_COST * 2);
10040   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
10041 
10042   ins_encode %{
10043     __ csel(as_Register($dst$$reg),
10044             zr,
10045             as_Register($src$$reg),
10046             (Assembler::Condition)$cmp$$cmpcode);
10047   %}
10048 
10049   ins_pipe(icond_reg);
10050 %}
10051 
10052 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
10053   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
10054 
10055   ins_cost(INSN_COST * 2);
10056   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
10057 
10058   ins_encode %{
10059     __ csel(as_Register($dst$$reg),
10060             as_Register($src$$reg),
10061             zr,
10062             (Assembler::Condition)$cmp$$cmpcode);
10063   %}
10064 
10065   ins_pipe(icond_reg);
10066 %}
10067 
10068 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
10069   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
10070 
10071   ins_cost(INSN_COST * 2);
10072   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
10073 
10074   ins_encode %{
10075     __ csel(as_Register($dst$$reg),
10076             as_Register($src$$reg),
10077             zr,
10078             (Assembler::Condition)$cmp$$cmpcode);
10079   %}
10080 
10081   ins_pipe(icond_reg);
10082 %}
10083 
10084 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10085   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10086 
10087   ins_cost(INSN_COST * 2);
10088   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
10089 
10090   ins_encode %{
10091     __ csel(as_Register($dst$$reg),
10092             as_Register($src2$$reg),
10093             as_Register($src1$$reg),
10094             (Assembler::Condition)$cmp$$cmpcode);
10095   %}
10096 
10097   ins_pipe(icond_reg_reg);
10098 %}
10099 
10100 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10101   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10102 
10103   ins_cost(INSN_COST * 2);
10104   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
10105 
10106   ins_encode %{
10107     __ csel(as_Register($dst$$reg),
10108             as_Register($src2$$reg),
10109             as_Register($src1$$reg),
10110             (Assembler::Condition)$cmp$$cmpcode);
10111   %}
10112 
10113   ins_pipe(icond_reg_reg);
10114 %}
10115 
10116 // special cases where one arg is zero
10117 
10118 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10119   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10120 
10121   ins_cost(INSN_COST * 2);
10122   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
10123 
10124   ins_encode %{
10125     __ csel(as_Register($dst$$reg),
10126             zr,
10127             as_Register($src$$reg),
10128             (Assembler::Condition)$cmp$$cmpcode);
10129   %}
10130 
10131   ins_pipe(icond_reg);
10132 %}
10133 
10134 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10135   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10136 
10137   ins_cost(INSN_COST * 2);
10138   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
10139 
10140   ins_encode %{
10141     __ csel(as_Register($dst$$reg),
10142             zr,
10143             as_Register($src$$reg),
10144             (Assembler::Condition)$cmp$$cmpcode);
10145   %}
10146 
10147   ins_pipe(icond_reg);
10148 %}
10149 
10150 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10151   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10152 
10153   ins_cost(INSN_COST * 2);
10154   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
10155 
10156   ins_encode %{
10157     __ csel(as_Register($dst$$reg),
10158             as_Register($src$$reg),
10159             zr,
10160             (Assembler::Condition)$cmp$$cmpcode);
10161   %}
10162 
10163   ins_pipe(icond_reg);
10164 %}
10165 
10166 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10167   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10168 
10169   ins_cost(INSN_COST * 2);
10170   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
10171 
10172   ins_encode %{
10173     __ csel(as_Register($dst$$reg),
10174             as_Register($src$$reg),
10175             zr,
10176             (Assembler::Condition)$cmp$$cmpcode);
10177   %}
10178 
10179   ins_pipe(icond_reg);
10180 %}
10181 
10182 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10183   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10184 
10185   ins_cost(INSN_COST * 2);
10186   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10187 
10188   ins_encode %{
10189     __ cselw(as_Register($dst$$reg),
10190              as_Register($src2$$reg),
10191              as_Register($src1$$reg),
10192              (Assembler::Condition)$cmp$$cmpcode);
10193   %}
10194 
10195   ins_pipe(icond_reg_reg);
10196 %}
10197 
10198 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10199   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10200 
10201   ins_cost(INSN_COST * 2);
10202   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10203 
10204   ins_encode %{
10205     __ cselw(as_Register($dst$$reg),
10206              as_Register($src2$$reg),
10207              as_Register($src1$$reg),
10208              (Assembler::Condition)$cmp$$cmpcode);
10209   %}
10210 
10211   ins_pipe(icond_reg_reg);
10212 %}
10213 
10214 // special cases where one arg is zero
10215 
10216 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10217   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10218 
10219   ins_cost(INSN_COST * 2);
10220   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
10221 
10222   ins_encode %{
10223     __ cselw(as_Register($dst$$reg),
10224              zr,
10225              as_Register($src$$reg),
10226              (Assembler::Condition)$cmp$$cmpcode);
10227   %}
10228 
10229   ins_pipe(icond_reg);
10230 %}
10231 
10232 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10233   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10234 
10235   ins_cost(INSN_COST * 2);
10236   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
10237 
10238   ins_encode %{
10239     __ cselw(as_Register($dst$$reg),
10240              zr,
10241              as_Register($src$$reg),
10242              (Assembler::Condition)$cmp$$cmpcode);
10243   %}
10244 
10245   ins_pipe(icond_reg);
10246 %}
10247 
10248 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10249   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10250 
10251   ins_cost(INSN_COST * 2);
10252   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
10253 
10254   ins_encode %{
10255     __ cselw(as_Register($dst$$reg),
10256              as_Register($src$$reg),
10257              zr,
10258              (Assembler::Condition)$cmp$$cmpcode);
10259   %}
10260 
10261   ins_pipe(icond_reg);
10262 %}
10263 
10264 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10265   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10266 
10267   ins_cost(INSN_COST * 2);
10268   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
10269 
10270   ins_encode %{
10271     __ cselw(as_Register($dst$$reg),
10272              as_Register($src$$reg),
10273              zr,
10274              (Assembler::Condition)$cmp$$cmpcode);
10275   %}
10276 
10277   ins_pipe(icond_reg);
10278 %}
10279 
10280 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
10281 %{
10282   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10283 
10284   ins_cost(INSN_COST * 3);
10285 
10286   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10287   ins_encode %{
10288     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10289     __ fcsels(as_FloatRegister($dst$$reg),
10290               as_FloatRegister($src2$$reg),
10291               as_FloatRegister($src1$$reg),
10292               cond);
10293   %}
10294 
10295   ins_pipe(fp_cond_reg_reg_s);
10296 %}
10297 
10298 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
10299 %{
10300   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10301 
10302   ins_cost(INSN_COST * 3);
10303 
10304   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10305   ins_encode %{
10306     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10307     __ fcsels(as_FloatRegister($dst$$reg),
10308               as_FloatRegister($src2$$reg),
10309               as_FloatRegister($src1$$reg),
10310               cond);
10311   %}
10312 
10313   ins_pipe(fp_cond_reg_reg_s);
10314 %}
10315 
10316 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
10317 %{
10318   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10319 
10320   ins_cost(INSN_COST * 3);
10321 
10322   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10323   ins_encode %{
10324     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10325     __ fcseld(as_FloatRegister($dst$$reg),
10326               as_FloatRegister($src2$$reg),
10327               as_FloatRegister($src1$$reg),
10328               cond);
10329   %}
10330 
10331   ins_pipe(fp_cond_reg_reg_d);
10332 %}
10333 
10334 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
10335 %{
10336   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10337 
10338   ins_cost(INSN_COST * 3);
10339 
10340   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10341   ins_encode %{
10342     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10343     __ fcseld(as_FloatRegister($dst$$reg),
10344               as_FloatRegister($src2$$reg),
10345               as_FloatRegister($src1$$reg),
10346               cond);
10347   %}
10348 
10349   ins_pipe(fp_cond_reg_reg_d);
10350 %}
10351 
10352 // ============================================================================
10353 // Arithmetic Instructions
10354 //
10355 
10356 // Integer Addition
10357 
10358 // TODO
10359 // these currently employ operations which do not set CR and hence are
10360 // not flagged as killing CR but we would like to isolate the cases
10361 // where we want to set flags from those where we don't. need to work
10362 // out how to do that.
10363 
10364 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10365   match(Set dst (AddI src1 src2));
10366 
10367   ins_cost(INSN_COST);
10368   format %{ "addw  $dst, $src1, $src2" %}
10369 
10370   ins_encode %{
10371     __ addw(as_Register($dst$$reg),
10372             as_Register($src1$$reg),
10373             as_Register($src2$$reg));
10374   %}
10375 
10376   ins_pipe(ialu_reg_reg);
10377 %}
10378 
10379 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10380   match(Set dst (AddI src1 src2));
10381 
10382   ins_cost(INSN_COST);
10383   format %{ "addw $dst, $src1, $src2" %}
10384 
10385   // use opcode to indicate that this is an add not a sub
10386   opcode(0x0);
10387 
10388   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10389 
10390   ins_pipe(ialu_reg_imm);
10391 %}
10392 
10393 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10394   match(Set dst (AddI (ConvL2I src1) src2));
10395 
10396   ins_cost(INSN_COST);
10397   format %{ "addw $dst, $src1, $src2" %}
10398 
10399   // use opcode to indicate that this is an add not a sub
10400   opcode(0x0);
10401 
10402   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10403 
10404   ins_pipe(ialu_reg_imm);
10405 %}
10406 
10407 // Pointer Addition
10408 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10409   match(Set dst (AddP src1 src2));
10410 
10411   ins_cost(INSN_COST);
10412   format %{ "add $dst, $src1, $src2\t# ptr" %}
10413 
10414   ins_encode %{
10415     __ add(as_Register($dst$$reg),
10416            as_Register($src1$$reg),
10417            as_Register($src2$$reg));
10418   %}
10419 
10420   ins_pipe(ialu_reg_reg);
10421 %}
10422 
10423 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10424   match(Set dst (AddP src1 (ConvI2L src2)));
10425 
10426   ins_cost(1.9 * INSN_COST);
10427   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10428 
10429   ins_encode %{
10430     __ add(as_Register($dst$$reg),
10431            as_Register($src1$$reg),
10432            as_Register($src2$$reg), ext::sxtw);
10433   %}
10434 
10435   ins_pipe(ialu_reg_reg);
10436 %}
10437 
10438 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10439   match(Set dst (AddP src1 (LShiftL src2 scale)));
10440 
10441   ins_cost(1.9 * INSN_COST);
10442   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10443 
10444   ins_encode %{
10445     __ lea(as_Register($dst$$reg),
10446            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10447                    Address::lsl($scale$$constant)));
10448   %}
10449 
10450   ins_pipe(ialu_reg_reg_shift);
10451 %}
10452 
10453 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10454   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10455 
10456   ins_cost(1.9 * INSN_COST);
10457   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10458 
10459   ins_encode %{
10460     __ lea(as_Register($dst$$reg),
10461            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10462                    Address::sxtw($scale$$constant)));
10463   %}
10464 
10465   ins_pipe(ialu_reg_reg_shift);
10466 %}
10467 
10468 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10469   match(Set dst (LShiftL (ConvI2L src) scale));
10470 
10471   ins_cost(INSN_COST);
10472   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10473 
10474   ins_encode %{
10475     __ sbfiz(as_Register($dst$$reg),
10476           as_Register($src$$reg),
10477           $scale$$constant & 63, MIN2(32, (int)((-$scale$$constant) & 63)));
10478   %}
10479 
10480   ins_pipe(ialu_reg_shift);
10481 %}
10482 
10483 // Pointer Immediate Addition
10484 // n.b. this needs to be more expensive than using an indirect memory
10485 // operand
10486 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10487   match(Set dst (AddP src1 src2));
10488 
10489   ins_cost(INSN_COST);
10490   format %{ "add $dst, $src1, $src2\t# ptr" %}
10491 
10492   // use opcode to indicate that this is an add not a sub
10493   opcode(0x0);
10494 
10495   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10496 
10497   ins_pipe(ialu_reg_imm);
10498 %}
10499 
10500 // Long Addition
10501 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10502 
10503   match(Set dst (AddL src1 src2));
10504 
10505   ins_cost(INSN_COST);
10506   format %{ "add  $dst, $src1, $src2" %}
10507 
10508   ins_encode %{
10509     __ add(as_Register($dst$$reg),
10510            as_Register($src1$$reg),
10511            as_Register($src2$$reg));
10512   %}
10513 
10514   ins_pipe(ialu_reg_reg);
10515 %}
10516 
10517 // No constant pool entries requiredLong Immediate Addition.
10518 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10519   match(Set dst (AddL src1 src2));
10520 
10521   ins_cost(INSN_COST);
10522   format %{ "add $dst, $src1, $src2" %}
10523 
10524   // use opcode to indicate that this is an add not a sub
10525   opcode(0x0);
10526 
10527   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10528 
10529   ins_pipe(ialu_reg_imm);
10530 %}
10531 
10532 // Integer Subtraction
10533 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10534   match(Set dst (SubI src1 src2));
10535 
10536   ins_cost(INSN_COST);
10537   format %{ "subw  $dst, $src1, $src2" %}
10538 
10539   ins_encode %{
10540     __ subw(as_Register($dst$$reg),
10541             as_Register($src1$$reg),
10542             as_Register($src2$$reg));
10543   %}
10544 
10545   ins_pipe(ialu_reg_reg);
10546 %}
10547 
10548 // Immediate Subtraction
10549 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10550   match(Set dst (SubI src1 src2));
10551 
10552   ins_cost(INSN_COST);
10553   format %{ "subw $dst, $src1, $src2" %}
10554 
10555   // use opcode to indicate that this is a sub not an add
10556   opcode(0x1);
10557 
10558   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10559 
10560   ins_pipe(ialu_reg_imm);
10561 %}
10562 
10563 // Long Subtraction
10564 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10565 
10566   match(Set dst (SubL src1 src2));
10567 
10568   ins_cost(INSN_COST);
10569   format %{ "sub  $dst, $src1, $src2" %}
10570 
10571   ins_encode %{
10572     __ sub(as_Register($dst$$reg),
10573            as_Register($src1$$reg),
10574            as_Register($src2$$reg));
10575   %}
10576 
10577   ins_pipe(ialu_reg_reg);
10578 %}
10579 
10580 // No constant pool entries requiredLong Immediate Subtraction.
10581 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10582   match(Set dst (SubL src1 src2));
10583 
10584   ins_cost(INSN_COST);
10585   format %{ "sub$dst, $src1, $src2" %}
10586 
10587   // use opcode to indicate that this is a sub not an add
10588   opcode(0x1);
10589 
10590   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10591 
10592   ins_pipe(ialu_reg_imm);
10593 %}
10594 
10595 // Integer Negation (special case for sub)
10596 
10597 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10598   match(Set dst (SubI zero src));
10599 
10600   ins_cost(INSN_COST);
10601   format %{ "negw $dst, $src\t# int" %}
10602 
10603   ins_encode %{
10604     __ negw(as_Register($dst$$reg),
10605             as_Register($src$$reg));
10606   %}
10607 
10608   ins_pipe(ialu_reg);
10609 %}
10610 
10611 // Long Negation
10612 
10613 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
10614   match(Set dst (SubL zero src));
10615 
10616   ins_cost(INSN_COST);
10617   format %{ "neg $dst, $src\t# long" %}
10618 
10619   ins_encode %{
10620     __ neg(as_Register($dst$$reg),
10621            as_Register($src$$reg));
10622   %}
10623 
10624   ins_pipe(ialu_reg);
10625 %}
10626 
10627 // Integer Multiply
10628 
10629 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10630   match(Set dst (MulI src1 src2));
10631 
10632   ins_cost(INSN_COST * 3);
10633   format %{ "mulw  $dst, $src1, $src2" %}
10634 
10635   ins_encode %{
10636     __ mulw(as_Register($dst$$reg),
10637             as_Register($src1$$reg),
10638             as_Register($src2$$reg));
10639   %}
10640 
10641   ins_pipe(imul_reg_reg);
10642 %}
10643 
10644 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10645   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10646 
10647   ins_cost(INSN_COST * 3);
10648   format %{ "smull  $dst, $src1, $src2" %}
10649 
10650   ins_encode %{
10651     __ smull(as_Register($dst$$reg),
10652              as_Register($src1$$reg),
10653              as_Register($src2$$reg));
10654   %}
10655 
10656   ins_pipe(imul_reg_reg);
10657 %}
10658 
10659 // Long Multiply
10660 
10661 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10662   match(Set dst (MulL src1 src2));
10663 
10664   ins_cost(INSN_COST * 5);
10665   format %{ "mul  $dst, $src1, $src2" %}
10666 
10667   ins_encode %{
10668     __ mul(as_Register($dst$$reg),
10669            as_Register($src1$$reg),
10670            as_Register($src2$$reg));
10671   %}
10672 
10673   ins_pipe(lmul_reg_reg);
10674 %}
10675 
10676 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10677 %{
10678   match(Set dst (MulHiL src1 src2));
10679 
10680   ins_cost(INSN_COST * 7);
10681   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
10682 
10683   ins_encode %{
10684     __ smulh(as_Register($dst$$reg),
10685              as_Register($src1$$reg),
10686              as_Register($src2$$reg));
10687   %}
10688 
10689   ins_pipe(lmul_reg_reg);
10690 %}
10691 
10692 // Combined Integer Multiply & Add/Sub
10693 
10694 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10695   match(Set dst (AddI src3 (MulI src1 src2)));
10696 
10697   ins_cost(INSN_COST * 3);
10698   format %{ "madd  $dst, $src1, $src2, $src3" %}
10699 
10700   ins_encode %{
10701     __ maddw(as_Register($dst$$reg),
10702              as_Register($src1$$reg),
10703              as_Register($src2$$reg),
10704              as_Register($src3$$reg));
10705   %}
10706 
10707   ins_pipe(imac_reg_reg);
10708 %}
10709 
10710 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10711   match(Set dst (SubI src3 (MulI src1 src2)));
10712 
10713   ins_cost(INSN_COST * 3);
10714   format %{ "msub  $dst, $src1, $src2, $src3" %}
10715 
10716   ins_encode %{
10717     __ msubw(as_Register($dst$$reg),
10718              as_Register($src1$$reg),
10719              as_Register($src2$$reg),
10720              as_Register($src3$$reg));
10721   %}
10722 
10723   ins_pipe(imac_reg_reg);
10724 %}
10725 
10726 // Combined Integer Multiply & Neg
10727 
10728 instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
10729   match(Set dst (MulI (SubI zero src1) src2));
10730   match(Set dst (MulI src1 (SubI zero src2)));
10731 
10732   ins_cost(INSN_COST * 3);
10733   format %{ "mneg  $dst, $src1, $src2" %}
10734 
10735   ins_encode %{
10736     __ mnegw(as_Register($dst$$reg),
10737              as_Register($src1$$reg),
10738              as_Register($src2$$reg));
10739   %}
10740 
10741   ins_pipe(imac_reg_reg);
10742 %}
10743 
10744 // Combined Long Multiply & Add/Sub
10745 
10746 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10747   match(Set dst (AddL src3 (MulL src1 src2)));
10748 
10749   ins_cost(INSN_COST * 5);
10750   format %{ "madd  $dst, $src1, $src2, $src3" %}
10751 
10752   ins_encode %{
10753     __ madd(as_Register($dst$$reg),
10754             as_Register($src1$$reg),
10755             as_Register($src2$$reg),
10756             as_Register($src3$$reg));
10757   %}
10758 
10759   ins_pipe(lmac_reg_reg);
10760 %}
10761 
10762 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10763   match(Set dst (SubL src3 (MulL src1 src2)));
10764 
10765   ins_cost(INSN_COST * 5);
10766   format %{ "msub  $dst, $src1, $src2, $src3" %}
10767 
10768   ins_encode %{
10769     __ msub(as_Register($dst$$reg),
10770             as_Register($src1$$reg),
10771             as_Register($src2$$reg),
10772             as_Register($src3$$reg));
10773   %}
10774 
10775   ins_pipe(lmac_reg_reg);
10776 %}
10777 
10778 // Combined Long Multiply & Neg
10779 
10780 instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
10781   match(Set dst (MulL (SubL zero src1) src2));
10782   match(Set dst (MulL src1 (SubL zero src2)));
10783 
10784   ins_cost(INSN_COST * 5);
10785   format %{ "mneg  $dst, $src1, $src2" %}
10786 
10787   ins_encode %{
10788     __ mneg(as_Register($dst$$reg),
10789             as_Register($src1$$reg),
10790             as_Register($src2$$reg));
10791   %}
10792 
10793   ins_pipe(lmac_reg_reg);
10794 %}
10795 
10796 // Combine Integer Signed Multiply & Add/Sub/Neg Long
10797 
10798 instruct smaddL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10799   match(Set dst (AddL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10800 
10801   ins_cost(INSN_COST * 3);
10802   format %{ "smaddl  $dst, $src1, $src2, $src3" %}
10803 
10804   ins_encode %{
10805     __ smaddl(as_Register($dst$$reg),
10806               as_Register($src1$$reg),
10807               as_Register($src2$$reg),
10808               as_Register($src3$$reg));
10809   %}
10810 
10811   ins_pipe(imac_reg_reg);
10812 %}
10813 
10814 instruct smsubL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10815   match(Set dst (SubL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10816 
10817   ins_cost(INSN_COST * 3);
10818   format %{ "smsubl  $dst, $src1, $src2, $src3" %}
10819 
10820   ins_encode %{
10821     __ smsubl(as_Register($dst$$reg),
10822               as_Register($src1$$reg),
10823               as_Register($src2$$reg),
10824               as_Register($src3$$reg));
10825   %}
10826 
10827   ins_pipe(imac_reg_reg);
10828 %}
10829 
10830 instruct smnegL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, immL0 zero) %{
10831   match(Set dst (MulL (SubL zero (ConvI2L src1)) (ConvI2L src2)));
10832   match(Set dst (MulL (ConvI2L src1) (SubL zero (ConvI2L src2))));
10833 
10834   ins_cost(INSN_COST * 3);
10835   format %{ "smnegl  $dst, $src1, $src2" %}
10836 
10837   ins_encode %{
10838     __ smnegl(as_Register($dst$$reg),
10839               as_Register($src1$$reg),
10840               as_Register($src2$$reg));
10841   %}
10842 
10843   ins_pipe(imac_reg_reg);
10844 %}
10845 
10846 // Combined Multiply-Add Shorts into Integer (dst = src1 * src2 + src3 * src4)
10847 
10848 instruct muladdS2I(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3, iRegIorL2I src4) %{
10849   match(Set dst (MulAddS2I (Binary src1 src2) (Binary src3 src4)));
10850 
10851   ins_cost(INSN_COST * 5);
10852   format %{ "mulw  rscratch1, $src1, $src2\n\t"
10853             "maddw $dst, $src3, $src4, rscratch1" %}
10854 
10855   ins_encode %{
10856     __ mulw(rscratch1, as_Register($src1$$reg), as_Register($src2$$reg));
10857     __ maddw(as_Register($dst$$reg), as_Register($src3$$reg), as_Register($src4$$reg), rscratch1); %}
10858 
10859   ins_pipe(imac_reg_reg);
10860 %}
10861 
10862 // Integer Divide
10863 
10864 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10865   match(Set dst (DivI src1 src2));
10866 
10867   ins_cost(INSN_COST * 19);
10868   format %{ "sdivw  $dst, $src1, $src2" %}
10869 
10870   ins_encode(aarch64_enc_divw(dst, src1, src2));
10871   ins_pipe(idiv_reg_reg);
10872 %}
10873 
10874 // Long Divide
10875 
10876 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10877   match(Set dst (DivL src1 src2));
10878 
10879   ins_cost(INSN_COST * 35);
10880   format %{ "sdiv   $dst, $src1, $src2" %}
10881 
10882   ins_encode(aarch64_enc_div(dst, src1, src2));
10883   ins_pipe(ldiv_reg_reg);
10884 %}
10885 
10886 // Integer Remainder
10887 
10888 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10889   match(Set dst (ModI src1 src2));
10890 
10891   ins_cost(INSN_COST * 22);
10892   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10893             "msubw($dst, rscratch1, $src2, $src1" %}
10894 
10895   ins_encode(aarch64_enc_modw(dst, src1, src2));
10896   ins_pipe(idiv_reg_reg);
10897 %}
10898 
10899 // Long Remainder
10900 
10901 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10902   match(Set dst (ModL src1 src2));
10903 
10904   ins_cost(INSN_COST * 38);
10905   format %{ "sdiv   rscratch1, $src1, $src2\n"
10906             "msub($dst, rscratch1, $src2, $src1" %}
10907 
10908   ins_encode(aarch64_enc_mod(dst, src1, src2));
10909   ins_pipe(ldiv_reg_reg);
10910 %}
10911 
10912 // Integer Shifts
10913 
10914 // Shift Left Register
10915 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10916   match(Set dst (LShiftI src1 src2));
10917 
10918   ins_cost(INSN_COST * 2);
10919   format %{ "lslvw  $dst, $src1, $src2" %}
10920 
10921   ins_encode %{
10922     __ lslvw(as_Register($dst$$reg),
10923              as_Register($src1$$reg),
10924              as_Register($src2$$reg));
10925   %}
10926 
10927   ins_pipe(ialu_reg_reg_vshift);
10928 %}
10929 
10930 // Shift Left Immediate
10931 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10932   match(Set dst (LShiftI src1 src2));
10933 
10934   ins_cost(INSN_COST);
10935   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10936 
10937   ins_encode %{
10938     __ lslw(as_Register($dst$$reg),
10939             as_Register($src1$$reg),
10940             $src2$$constant & 0x1f);
10941   %}
10942 
10943   ins_pipe(ialu_reg_shift);
10944 %}
10945 
10946 // Shift Right Logical Register
10947 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10948   match(Set dst (URShiftI src1 src2));
10949 
10950   ins_cost(INSN_COST * 2);
10951   format %{ "lsrvw  $dst, $src1, $src2" %}
10952 
10953   ins_encode %{
10954     __ lsrvw(as_Register($dst$$reg),
10955              as_Register($src1$$reg),
10956              as_Register($src2$$reg));
10957   %}
10958 
10959   ins_pipe(ialu_reg_reg_vshift);
10960 %}
10961 
10962 // Shift Right Logical Immediate
10963 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10964   match(Set dst (URShiftI src1 src2));
10965 
10966   ins_cost(INSN_COST);
10967   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10968 
10969   ins_encode %{
10970     __ lsrw(as_Register($dst$$reg),
10971             as_Register($src1$$reg),
10972             $src2$$constant & 0x1f);
10973   %}
10974 
10975   ins_pipe(ialu_reg_shift);
10976 %}
10977 
10978 // Shift Right Arithmetic Register
10979 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10980   match(Set dst (RShiftI src1 src2));
10981 
10982   ins_cost(INSN_COST * 2);
10983   format %{ "asrvw  $dst, $src1, $src2" %}
10984 
10985   ins_encode %{
10986     __ asrvw(as_Register($dst$$reg),
10987              as_Register($src1$$reg),
10988              as_Register($src2$$reg));
10989   %}
10990 
10991   ins_pipe(ialu_reg_reg_vshift);
10992 %}
10993 
10994 // Shift Right Arithmetic Immediate
10995 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10996   match(Set dst (RShiftI src1 src2));
10997 
10998   ins_cost(INSN_COST);
10999   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
11000 
11001   ins_encode %{
11002     __ asrw(as_Register($dst$$reg),
11003             as_Register($src1$$reg),
11004             $src2$$constant & 0x1f);
11005   %}
11006 
11007   ins_pipe(ialu_reg_shift);
11008 %}
11009 
11010 // Combined Int Mask and Right Shift (using UBFM)
11011 // TODO
11012 
11013 // Long Shifts
11014 
11015 // Shift Left Register
11016 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11017   match(Set dst (LShiftL src1 src2));
11018 
11019   ins_cost(INSN_COST * 2);
11020   format %{ "lslv  $dst, $src1, $src2" %}
11021 
11022   ins_encode %{
11023     __ lslv(as_Register($dst$$reg),
11024             as_Register($src1$$reg),
11025             as_Register($src2$$reg));
11026   %}
11027 
11028   ins_pipe(ialu_reg_reg_vshift);
11029 %}
11030 
11031 // Shift Left Immediate
11032 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11033   match(Set dst (LShiftL src1 src2));
11034 
11035   ins_cost(INSN_COST);
11036   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
11037 
11038   ins_encode %{
11039     __ lsl(as_Register($dst$$reg),
11040             as_Register($src1$$reg),
11041             $src2$$constant & 0x3f);
11042   %}
11043 
11044   ins_pipe(ialu_reg_shift);
11045 %}
11046 
11047 // Shift Right Logical Register
11048 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11049   match(Set dst (URShiftL src1 src2));
11050 
11051   ins_cost(INSN_COST * 2);
11052   format %{ "lsrv  $dst, $src1, $src2" %}
11053 
11054   ins_encode %{
11055     __ lsrv(as_Register($dst$$reg),
11056             as_Register($src1$$reg),
11057             as_Register($src2$$reg));
11058   %}
11059 
11060   ins_pipe(ialu_reg_reg_vshift);
11061 %}
11062 
11063 // Shift Right Logical Immediate
11064 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11065   match(Set dst (URShiftL src1 src2));
11066 
11067   ins_cost(INSN_COST);
11068   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
11069 
11070   ins_encode %{
11071     __ lsr(as_Register($dst$$reg),
11072            as_Register($src1$$reg),
11073            $src2$$constant & 0x3f);
11074   %}
11075 
11076   ins_pipe(ialu_reg_shift);
11077 %}
11078 
11079 // A special-case pattern for card table stores.
11080 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
11081   match(Set dst (URShiftL (CastP2X src1) src2));
11082 
11083   ins_cost(INSN_COST);
11084   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
11085 
11086   ins_encode %{
11087     __ lsr(as_Register($dst$$reg),
11088            as_Register($src1$$reg),
11089            $src2$$constant & 0x3f);
11090   %}
11091 
11092   ins_pipe(ialu_reg_shift);
11093 %}
11094 
11095 // Shift Right Arithmetic Register
11096 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11097   match(Set dst (RShiftL src1 src2));
11098 
11099   ins_cost(INSN_COST * 2);
11100   format %{ "asrv  $dst, $src1, $src2" %}
11101 
11102   ins_encode %{
11103     __ asrv(as_Register($dst$$reg),
11104             as_Register($src1$$reg),
11105             as_Register($src2$$reg));
11106   %}
11107 
11108   ins_pipe(ialu_reg_reg_vshift);
11109 %}
11110 
11111 // Shift Right Arithmetic Immediate
11112 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11113   match(Set dst (RShiftL src1 src2));
11114 
11115   ins_cost(INSN_COST);
11116   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
11117 
11118   ins_encode %{
11119     __ asr(as_Register($dst$$reg),
11120            as_Register($src1$$reg),
11121            $src2$$constant & 0x3f);
11122   %}
11123 
11124   ins_pipe(ialu_reg_shift);
11125 %}
11126 
11127 // BEGIN This section of the file is automatically generated. Do not edit --------------
11128 
11129 
11130 // This pattern is automatically generated from aarch64_ad.m4
11131 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11132 instruct regL_not_reg(iRegLNoSp dst,
11133                          iRegL src1, immL_M1 m1,
11134                          rFlagsReg cr) %{
11135   match(Set dst (XorL src1 m1));
11136   ins_cost(INSN_COST);
11137   format %{ "eon  $dst, $src1, zr" %}
11138 
11139   ins_encode %{
11140     __ eon(as_Register($dst$$reg),
11141               as_Register($src1$$reg),
11142               zr,
11143               Assembler::LSL, 0);
11144   %}
11145 
11146   ins_pipe(ialu_reg);
11147 %}
11148 
11149 // This pattern is automatically generated from aarch64_ad.m4
11150 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11151 instruct regI_not_reg(iRegINoSp dst,
11152                          iRegIorL2I src1, immI_M1 m1,
11153                          rFlagsReg cr) %{
11154   match(Set dst (XorI src1 m1));
11155   ins_cost(INSN_COST);
11156   format %{ "eonw  $dst, $src1, zr" %}
11157 
11158   ins_encode %{
11159     __ eonw(as_Register($dst$$reg),
11160               as_Register($src1$$reg),
11161               zr,
11162               Assembler::LSL, 0);
11163   %}
11164 
11165   ins_pipe(ialu_reg);
11166 %}
11167 
11168 // This pattern is automatically generated from aarch64_ad.m4
11169 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11170 instruct AndI_reg_not_reg(iRegINoSp dst,
11171                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11172                          rFlagsReg cr) %{
11173   match(Set dst (AndI src1 (XorI src2 m1)));
11174   ins_cost(INSN_COST);
11175   format %{ "bicw  $dst, $src1, $src2" %}
11176 
11177   ins_encode %{
11178     __ bicw(as_Register($dst$$reg),
11179               as_Register($src1$$reg),
11180               as_Register($src2$$reg),
11181               Assembler::LSL, 0);
11182   %}
11183 
11184   ins_pipe(ialu_reg_reg);
11185 %}
11186 
11187 // This pattern is automatically generated from aarch64_ad.m4
11188 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11189 instruct AndL_reg_not_reg(iRegLNoSp dst,
11190                          iRegL src1, iRegL src2, immL_M1 m1,
11191                          rFlagsReg cr) %{
11192   match(Set dst (AndL src1 (XorL src2 m1)));
11193   ins_cost(INSN_COST);
11194   format %{ "bic  $dst, $src1, $src2" %}
11195 
11196   ins_encode %{
11197     __ bic(as_Register($dst$$reg),
11198               as_Register($src1$$reg),
11199               as_Register($src2$$reg),
11200               Assembler::LSL, 0);
11201   %}
11202 
11203   ins_pipe(ialu_reg_reg);
11204 %}
11205 
11206 // This pattern is automatically generated from aarch64_ad.m4
11207 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11208 instruct OrI_reg_not_reg(iRegINoSp dst,
11209                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11210                          rFlagsReg cr) %{
11211   match(Set dst (OrI src1 (XorI src2 m1)));
11212   ins_cost(INSN_COST);
11213   format %{ "ornw  $dst, $src1, $src2" %}
11214 
11215   ins_encode %{
11216     __ ornw(as_Register($dst$$reg),
11217               as_Register($src1$$reg),
11218               as_Register($src2$$reg),
11219               Assembler::LSL, 0);
11220   %}
11221 
11222   ins_pipe(ialu_reg_reg);
11223 %}
11224 
11225 // This pattern is automatically generated from aarch64_ad.m4
11226 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11227 instruct OrL_reg_not_reg(iRegLNoSp dst,
11228                          iRegL src1, iRegL src2, immL_M1 m1,
11229                          rFlagsReg cr) %{
11230   match(Set dst (OrL src1 (XorL src2 m1)));
11231   ins_cost(INSN_COST);
11232   format %{ "orn  $dst, $src1, $src2" %}
11233 
11234   ins_encode %{
11235     __ orn(as_Register($dst$$reg),
11236               as_Register($src1$$reg),
11237               as_Register($src2$$reg),
11238               Assembler::LSL, 0);
11239   %}
11240 
11241   ins_pipe(ialu_reg_reg);
11242 %}
11243 
11244 // This pattern is automatically generated from aarch64_ad.m4
11245 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11246 instruct XorI_reg_not_reg(iRegINoSp dst,
11247                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11248                          rFlagsReg cr) %{
11249   match(Set dst (XorI m1 (XorI src2 src1)));
11250   ins_cost(INSN_COST);
11251   format %{ "eonw  $dst, $src1, $src2" %}
11252 
11253   ins_encode %{
11254     __ eonw(as_Register($dst$$reg),
11255               as_Register($src1$$reg),
11256               as_Register($src2$$reg),
11257               Assembler::LSL, 0);
11258   %}
11259 
11260   ins_pipe(ialu_reg_reg);
11261 %}
11262 
11263 // This pattern is automatically generated from aarch64_ad.m4
11264 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11265 instruct XorL_reg_not_reg(iRegLNoSp dst,
11266                          iRegL src1, iRegL src2, immL_M1 m1,
11267                          rFlagsReg cr) %{
11268   match(Set dst (XorL m1 (XorL src2 src1)));
11269   ins_cost(INSN_COST);
11270   format %{ "eon  $dst, $src1, $src2" %}
11271 
11272   ins_encode %{
11273     __ eon(as_Register($dst$$reg),
11274               as_Register($src1$$reg),
11275               as_Register($src2$$reg),
11276               Assembler::LSL, 0);
11277   %}
11278 
11279   ins_pipe(ialu_reg_reg);
11280 %}
11281 
11282 // This pattern is automatically generated from aarch64_ad.m4
11283 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11284 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
11285                          iRegIorL2I src1, iRegIorL2I src2,
11286                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11287   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
11288   ins_cost(1.9 * INSN_COST);
11289   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
11290 
11291   ins_encode %{
11292     __ bicw(as_Register($dst$$reg),
11293               as_Register($src1$$reg),
11294               as_Register($src2$$reg),
11295               Assembler::LSR,
11296               $src3$$constant & 0x1f);
11297   %}
11298 
11299   ins_pipe(ialu_reg_reg_shift);
11300 %}
11301 
11302 // This pattern is automatically generated from aarch64_ad.m4
11303 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11304 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
11305                          iRegL src1, iRegL src2,
11306                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11307   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
11308   ins_cost(1.9 * INSN_COST);
11309   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
11310 
11311   ins_encode %{
11312     __ bic(as_Register($dst$$reg),
11313               as_Register($src1$$reg),
11314               as_Register($src2$$reg),
11315               Assembler::LSR,
11316               $src3$$constant & 0x3f);
11317   %}
11318 
11319   ins_pipe(ialu_reg_reg_shift);
11320 %}
11321 
11322 // This pattern is automatically generated from aarch64_ad.m4
11323 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11324 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
11325                          iRegIorL2I src1, iRegIorL2I src2,
11326                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11327   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
11328   ins_cost(1.9 * INSN_COST);
11329   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
11330 
11331   ins_encode %{
11332     __ bicw(as_Register($dst$$reg),
11333               as_Register($src1$$reg),
11334               as_Register($src2$$reg),
11335               Assembler::ASR,
11336               $src3$$constant & 0x1f);
11337   %}
11338 
11339   ins_pipe(ialu_reg_reg_shift);
11340 %}
11341 
11342 // This pattern is automatically generated from aarch64_ad.m4
11343 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11344 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
11345                          iRegL src1, iRegL src2,
11346                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11347   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
11348   ins_cost(1.9 * INSN_COST);
11349   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
11350 
11351   ins_encode %{
11352     __ bic(as_Register($dst$$reg),
11353               as_Register($src1$$reg),
11354               as_Register($src2$$reg),
11355               Assembler::ASR,
11356               $src3$$constant & 0x3f);
11357   %}
11358 
11359   ins_pipe(ialu_reg_reg_shift);
11360 %}
11361 
11362 // This pattern is automatically generated from aarch64_ad.m4
11363 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11364 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11365                          iRegIorL2I src1, iRegIorL2I src2,
11366                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11367   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11368   ins_cost(1.9 * INSN_COST);
11369   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11370 
11371   ins_encode %{
11372     __ bicw(as_Register($dst$$reg),
11373               as_Register($src1$$reg),
11374               as_Register($src2$$reg),
11375               Assembler::LSL,
11376               $src3$$constant & 0x1f);
11377   %}
11378 
11379   ins_pipe(ialu_reg_reg_shift);
11380 %}
11381 
11382 // This pattern is automatically generated from aarch64_ad.m4
11383 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11384 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11385                          iRegL src1, iRegL src2,
11386                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11387   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11388   ins_cost(1.9 * INSN_COST);
11389   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11390 
11391   ins_encode %{
11392     __ bic(as_Register($dst$$reg),
11393               as_Register($src1$$reg),
11394               as_Register($src2$$reg),
11395               Assembler::LSL,
11396               $src3$$constant & 0x3f);
11397   %}
11398 
11399   ins_pipe(ialu_reg_reg_shift);
11400 %}
11401 
11402 // This pattern is automatically generated from aarch64_ad.m4
11403 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11404 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11405                          iRegIorL2I src1, iRegIorL2I src2,
11406                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11407   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11408   ins_cost(1.9 * INSN_COST);
11409   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11410 
11411   ins_encode %{
11412     __ eonw(as_Register($dst$$reg),
11413               as_Register($src1$$reg),
11414               as_Register($src2$$reg),
11415               Assembler::LSR,
11416               $src3$$constant & 0x1f);
11417   %}
11418 
11419   ins_pipe(ialu_reg_reg_shift);
11420 %}
11421 
11422 // This pattern is automatically generated from aarch64_ad.m4
11423 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11424 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11425                          iRegL src1, iRegL src2,
11426                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11427   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11428   ins_cost(1.9 * INSN_COST);
11429   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11430 
11431   ins_encode %{
11432     __ eon(as_Register($dst$$reg),
11433               as_Register($src1$$reg),
11434               as_Register($src2$$reg),
11435               Assembler::LSR,
11436               $src3$$constant & 0x3f);
11437   %}
11438 
11439   ins_pipe(ialu_reg_reg_shift);
11440 %}
11441 
11442 // This pattern is automatically generated from aarch64_ad.m4
11443 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11444 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11445                          iRegIorL2I src1, iRegIorL2I src2,
11446                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11447   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11448   ins_cost(1.9 * INSN_COST);
11449   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11450 
11451   ins_encode %{
11452     __ eonw(as_Register($dst$$reg),
11453               as_Register($src1$$reg),
11454               as_Register($src2$$reg),
11455               Assembler::ASR,
11456               $src3$$constant & 0x1f);
11457   %}
11458 
11459   ins_pipe(ialu_reg_reg_shift);
11460 %}
11461 
11462 // This pattern is automatically generated from aarch64_ad.m4
11463 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11464 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11465                          iRegL src1, iRegL src2,
11466                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11467   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11468   ins_cost(1.9 * INSN_COST);
11469   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11470 
11471   ins_encode %{
11472     __ eon(as_Register($dst$$reg),
11473               as_Register($src1$$reg),
11474               as_Register($src2$$reg),
11475               Assembler::ASR,
11476               $src3$$constant & 0x3f);
11477   %}
11478 
11479   ins_pipe(ialu_reg_reg_shift);
11480 %}
11481 
11482 // This pattern is automatically generated from aarch64_ad.m4
11483 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11484 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11485                          iRegIorL2I src1, iRegIorL2I src2,
11486                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11487   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11488   ins_cost(1.9 * INSN_COST);
11489   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11490 
11491   ins_encode %{
11492     __ eonw(as_Register($dst$$reg),
11493               as_Register($src1$$reg),
11494               as_Register($src2$$reg),
11495               Assembler::LSL,
11496               $src3$$constant & 0x1f);
11497   %}
11498 
11499   ins_pipe(ialu_reg_reg_shift);
11500 %}
11501 
11502 // This pattern is automatically generated from aarch64_ad.m4
11503 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11504 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11505                          iRegL src1, iRegL src2,
11506                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11507   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11508   ins_cost(1.9 * INSN_COST);
11509   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11510 
11511   ins_encode %{
11512     __ eon(as_Register($dst$$reg),
11513               as_Register($src1$$reg),
11514               as_Register($src2$$reg),
11515               Assembler::LSL,
11516               $src3$$constant & 0x3f);
11517   %}
11518 
11519   ins_pipe(ialu_reg_reg_shift);
11520 %}
11521 
11522 // This pattern is automatically generated from aarch64_ad.m4
11523 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11524 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11525                          iRegIorL2I src1, iRegIorL2I src2,
11526                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11527   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11528   ins_cost(1.9 * INSN_COST);
11529   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11530 
11531   ins_encode %{
11532     __ ornw(as_Register($dst$$reg),
11533               as_Register($src1$$reg),
11534               as_Register($src2$$reg),
11535               Assembler::LSR,
11536               $src3$$constant & 0x1f);
11537   %}
11538 
11539   ins_pipe(ialu_reg_reg_shift);
11540 %}
11541 
11542 // This pattern is automatically generated from aarch64_ad.m4
11543 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11544 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11545                          iRegL src1, iRegL src2,
11546                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11547   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11548   ins_cost(1.9 * INSN_COST);
11549   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11550 
11551   ins_encode %{
11552     __ orn(as_Register($dst$$reg),
11553               as_Register($src1$$reg),
11554               as_Register($src2$$reg),
11555               Assembler::LSR,
11556               $src3$$constant & 0x3f);
11557   %}
11558 
11559   ins_pipe(ialu_reg_reg_shift);
11560 %}
11561 
11562 // This pattern is automatically generated from aarch64_ad.m4
11563 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11564 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11565                          iRegIorL2I src1, iRegIorL2I src2,
11566                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11567   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11568   ins_cost(1.9 * INSN_COST);
11569   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11570 
11571   ins_encode %{
11572     __ ornw(as_Register($dst$$reg),
11573               as_Register($src1$$reg),
11574               as_Register($src2$$reg),
11575               Assembler::ASR,
11576               $src3$$constant & 0x1f);
11577   %}
11578 
11579   ins_pipe(ialu_reg_reg_shift);
11580 %}
11581 
11582 // This pattern is automatically generated from aarch64_ad.m4
11583 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11584 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11585                          iRegL src1, iRegL src2,
11586                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11587   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11588   ins_cost(1.9 * INSN_COST);
11589   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11590 
11591   ins_encode %{
11592     __ orn(as_Register($dst$$reg),
11593               as_Register($src1$$reg),
11594               as_Register($src2$$reg),
11595               Assembler::ASR,
11596               $src3$$constant & 0x3f);
11597   %}
11598 
11599   ins_pipe(ialu_reg_reg_shift);
11600 %}
11601 
11602 // This pattern is automatically generated from aarch64_ad.m4
11603 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11604 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11605                          iRegIorL2I src1, iRegIorL2I src2,
11606                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11607   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11608   ins_cost(1.9 * INSN_COST);
11609   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11610 
11611   ins_encode %{
11612     __ ornw(as_Register($dst$$reg),
11613               as_Register($src1$$reg),
11614               as_Register($src2$$reg),
11615               Assembler::LSL,
11616               $src3$$constant & 0x1f);
11617   %}
11618 
11619   ins_pipe(ialu_reg_reg_shift);
11620 %}
11621 
11622 // This pattern is automatically generated from aarch64_ad.m4
11623 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11624 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11625                          iRegL src1, iRegL src2,
11626                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11627   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11628   ins_cost(1.9 * INSN_COST);
11629   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11630 
11631   ins_encode %{
11632     __ orn(as_Register($dst$$reg),
11633               as_Register($src1$$reg),
11634               as_Register($src2$$reg),
11635               Assembler::LSL,
11636               $src3$$constant & 0x3f);
11637   %}
11638 
11639   ins_pipe(ialu_reg_reg_shift);
11640 %}
11641 
11642 // This pattern is automatically generated from aarch64_ad.m4
11643 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11644 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11645                          iRegIorL2I src1, iRegIorL2I src2,
11646                          immI src3, rFlagsReg cr) %{
11647   match(Set dst (AndI src1 (URShiftI src2 src3)));
11648 
11649   ins_cost(1.9 * INSN_COST);
11650   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11651 
11652   ins_encode %{
11653     __ andw(as_Register($dst$$reg),
11654               as_Register($src1$$reg),
11655               as_Register($src2$$reg),
11656               Assembler::LSR,
11657               $src3$$constant & 0x1f);
11658   %}
11659 
11660   ins_pipe(ialu_reg_reg_shift);
11661 %}
11662 
11663 // This pattern is automatically generated from aarch64_ad.m4
11664 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11665 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11666                          iRegL src1, iRegL src2,
11667                          immI src3, rFlagsReg cr) %{
11668   match(Set dst (AndL src1 (URShiftL src2 src3)));
11669 
11670   ins_cost(1.9 * INSN_COST);
11671   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11672 
11673   ins_encode %{
11674     __ andr(as_Register($dst$$reg),
11675               as_Register($src1$$reg),
11676               as_Register($src2$$reg),
11677               Assembler::LSR,
11678               $src3$$constant & 0x3f);
11679   %}
11680 
11681   ins_pipe(ialu_reg_reg_shift);
11682 %}
11683 
11684 // This pattern is automatically generated from aarch64_ad.m4
11685 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11686 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11687                          iRegIorL2I src1, iRegIorL2I src2,
11688                          immI src3, rFlagsReg cr) %{
11689   match(Set dst (AndI src1 (RShiftI src2 src3)));
11690 
11691   ins_cost(1.9 * INSN_COST);
11692   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11693 
11694   ins_encode %{
11695     __ andw(as_Register($dst$$reg),
11696               as_Register($src1$$reg),
11697               as_Register($src2$$reg),
11698               Assembler::ASR,
11699               $src3$$constant & 0x1f);
11700   %}
11701 
11702   ins_pipe(ialu_reg_reg_shift);
11703 %}
11704 
11705 // This pattern is automatically generated from aarch64_ad.m4
11706 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11707 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11708                          iRegL src1, iRegL src2,
11709                          immI src3, rFlagsReg cr) %{
11710   match(Set dst (AndL src1 (RShiftL src2 src3)));
11711 
11712   ins_cost(1.9 * INSN_COST);
11713   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11714 
11715   ins_encode %{
11716     __ andr(as_Register($dst$$reg),
11717               as_Register($src1$$reg),
11718               as_Register($src2$$reg),
11719               Assembler::ASR,
11720               $src3$$constant & 0x3f);
11721   %}
11722 
11723   ins_pipe(ialu_reg_reg_shift);
11724 %}
11725 
11726 // This pattern is automatically generated from aarch64_ad.m4
11727 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11728 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11729                          iRegIorL2I src1, iRegIorL2I src2,
11730                          immI src3, rFlagsReg cr) %{
11731   match(Set dst (AndI src1 (LShiftI src2 src3)));
11732 
11733   ins_cost(1.9 * INSN_COST);
11734   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11735 
11736   ins_encode %{
11737     __ andw(as_Register($dst$$reg),
11738               as_Register($src1$$reg),
11739               as_Register($src2$$reg),
11740               Assembler::LSL,
11741               $src3$$constant & 0x1f);
11742   %}
11743 
11744   ins_pipe(ialu_reg_reg_shift);
11745 %}
11746 
11747 // This pattern is automatically generated from aarch64_ad.m4
11748 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11749 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11750                          iRegL src1, iRegL src2,
11751                          immI src3, rFlagsReg cr) %{
11752   match(Set dst (AndL src1 (LShiftL src2 src3)));
11753 
11754   ins_cost(1.9 * INSN_COST);
11755   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11756 
11757   ins_encode %{
11758     __ andr(as_Register($dst$$reg),
11759               as_Register($src1$$reg),
11760               as_Register($src2$$reg),
11761               Assembler::LSL,
11762               $src3$$constant & 0x3f);
11763   %}
11764 
11765   ins_pipe(ialu_reg_reg_shift);
11766 %}
11767 
11768 // This pattern is automatically generated from aarch64_ad.m4
11769 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11770 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11771                          iRegIorL2I src1, iRegIorL2I src2,
11772                          immI src3, rFlagsReg cr) %{
11773   match(Set dst (XorI src1 (URShiftI src2 src3)));
11774 
11775   ins_cost(1.9 * INSN_COST);
11776   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11777 
11778   ins_encode %{
11779     __ eorw(as_Register($dst$$reg),
11780               as_Register($src1$$reg),
11781               as_Register($src2$$reg),
11782               Assembler::LSR,
11783               $src3$$constant & 0x1f);
11784   %}
11785 
11786   ins_pipe(ialu_reg_reg_shift);
11787 %}
11788 
11789 // This pattern is automatically generated from aarch64_ad.m4
11790 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11791 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11792                          iRegL src1, iRegL src2,
11793                          immI src3, rFlagsReg cr) %{
11794   match(Set dst (XorL src1 (URShiftL src2 src3)));
11795 
11796   ins_cost(1.9 * INSN_COST);
11797   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11798 
11799   ins_encode %{
11800     __ eor(as_Register($dst$$reg),
11801               as_Register($src1$$reg),
11802               as_Register($src2$$reg),
11803               Assembler::LSR,
11804               $src3$$constant & 0x3f);
11805   %}
11806 
11807   ins_pipe(ialu_reg_reg_shift);
11808 %}
11809 
11810 // This pattern is automatically generated from aarch64_ad.m4
11811 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11812 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11813                          iRegIorL2I src1, iRegIorL2I src2,
11814                          immI src3, rFlagsReg cr) %{
11815   match(Set dst (XorI src1 (RShiftI src2 src3)));
11816 
11817   ins_cost(1.9 * INSN_COST);
11818   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11819 
11820   ins_encode %{
11821     __ eorw(as_Register($dst$$reg),
11822               as_Register($src1$$reg),
11823               as_Register($src2$$reg),
11824               Assembler::ASR,
11825               $src3$$constant & 0x1f);
11826   %}
11827 
11828   ins_pipe(ialu_reg_reg_shift);
11829 %}
11830 
11831 // This pattern is automatically generated from aarch64_ad.m4
11832 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11833 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11834                          iRegL src1, iRegL src2,
11835                          immI src3, rFlagsReg cr) %{
11836   match(Set dst (XorL src1 (RShiftL src2 src3)));
11837 
11838   ins_cost(1.9 * INSN_COST);
11839   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11840 
11841   ins_encode %{
11842     __ eor(as_Register($dst$$reg),
11843               as_Register($src1$$reg),
11844               as_Register($src2$$reg),
11845               Assembler::ASR,
11846               $src3$$constant & 0x3f);
11847   %}
11848 
11849   ins_pipe(ialu_reg_reg_shift);
11850 %}
11851 
11852 // This pattern is automatically generated from aarch64_ad.m4
11853 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11854 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11855                          iRegIorL2I src1, iRegIorL2I src2,
11856                          immI src3, rFlagsReg cr) %{
11857   match(Set dst (XorI src1 (LShiftI src2 src3)));
11858 
11859   ins_cost(1.9 * INSN_COST);
11860   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11861 
11862   ins_encode %{
11863     __ eorw(as_Register($dst$$reg),
11864               as_Register($src1$$reg),
11865               as_Register($src2$$reg),
11866               Assembler::LSL,
11867               $src3$$constant & 0x1f);
11868   %}
11869 
11870   ins_pipe(ialu_reg_reg_shift);
11871 %}
11872 
11873 // This pattern is automatically generated from aarch64_ad.m4
11874 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11875 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11876                          iRegL src1, iRegL src2,
11877                          immI src3, rFlagsReg cr) %{
11878   match(Set dst (XorL src1 (LShiftL src2 src3)));
11879 
11880   ins_cost(1.9 * INSN_COST);
11881   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11882 
11883   ins_encode %{
11884     __ eor(as_Register($dst$$reg),
11885               as_Register($src1$$reg),
11886               as_Register($src2$$reg),
11887               Assembler::LSL,
11888               $src3$$constant & 0x3f);
11889   %}
11890 
11891   ins_pipe(ialu_reg_reg_shift);
11892 %}
11893 
11894 // This pattern is automatically generated from aarch64_ad.m4
11895 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11896 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11897                          iRegIorL2I src1, iRegIorL2I src2,
11898                          immI src3, rFlagsReg cr) %{
11899   match(Set dst (OrI src1 (URShiftI src2 src3)));
11900 
11901   ins_cost(1.9 * INSN_COST);
11902   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11903 
11904   ins_encode %{
11905     __ orrw(as_Register($dst$$reg),
11906               as_Register($src1$$reg),
11907               as_Register($src2$$reg),
11908               Assembler::LSR,
11909               $src3$$constant & 0x1f);
11910   %}
11911 
11912   ins_pipe(ialu_reg_reg_shift);
11913 %}
11914 
11915 // This pattern is automatically generated from aarch64_ad.m4
11916 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11917 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11918                          iRegL src1, iRegL src2,
11919                          immI src3, rFlagsReg cr) %{
11920   match(Set dst (OrL src1 (URShiftL src2 src3)));
11921 
11922   ins_cost(1.9 * INSN_COST);
11923   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11924 
11925   ins_encode %{
11926     __ orr(as_Register($dst$$reg),
11927               as_Register($src1$$reg),
11928               as_Register($src2$$reg),
11929               Assembler::LSR,
11930               $src3$$constant & 0x3f);
11931   %}
11932 
11933   ins_pipe(ialu_reg_reg_shift);
11934 %}
11935 
11936 // This pattern is automatically generated from aarch64_ad.m4
11937 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11938 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11939                          iRegIorL2I src1, iRegIorL2I src2,
11940                          immI src3, rFlagsReg cr) %{
11941   match(Set dst (OrI src1 (RShiftI src2 src3)));
11942 
11943   ins_cost(1.9 * INSN_COST);
11944   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11945 
11946   ins_encode %{
11947     __ orrw(as_Register($dst$$reg),
11948               as_Register($src1$$reg),
11949               as_Register($src2$$reg),
11950               Assembler::ASR,
11951               $src3$$constant & 0x1f);
11952   %}
11953 
11954   ins_pipe(ialu_reg_reg_shift);
11955 %}
11956 
11957 // This pattern is automatically generated from aarch64_ad.m4
11958 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11959 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11960                          iRegL src1, iRegL src2,
11961                          immI src3, rFlagsReg cr) %{
11962   match(Set dst (OrL src1 (RShiftL src2 src3)));
11963 
11964   ins_cost(1.9 * INSN_COST);
11965   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11966 
11967   ins_encode %{
11968     __ orr(as_Register($dst$$reg),
11969               as_Register($src1$$reg),
11970               as_Register($src2$$reg),
11971               Assembler::ASR,
11972               $src3$$constant & 0x3f);
11973   %}
11974 
11975   ins_pipe(ialu_reg_reg_shift);
11976 %}
11977 
11978 // This pattern is automatically generated from aarch64_ad.m4
11979 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11980 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11981                          iRegIorL2I src1, iRegIorL2I src2,
11982                          immI src3, rFlagsReg cr) %{
11983   match(Set dst (OrI src1 (LShiftI src2 src3)));
11984 
11985   ins_cost(1.9 * INSN_COST);
11986   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11987 
11988   ins_encode %{
11989     __ orrw(as_Register($dst$$reg),
11990               as_Register($src1$$reg),
11991               as_Register($src2$$reg),
11992               Assembler::LSL,
11993               $src3$$constant & 0x1f);
11994   %}
11995 
11996   ins_pipe(ialu_reg_reg_shift);
11997 %}
11998 
11999 // This pattern is automatically generated from aarch64_ad.m4
12000 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12001 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
12002                          iRegL src1, iRegL src2,
12003                          immI src3, rFlagsReg cr) %{
12004   match(Set dst (OrL src1 (LShiftL src2 src3)));
12005 
12006   ins_cost(1.9 * INSN_COST);
12007   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
12008 
12009   ins_encode %{
12010     __ orr(as_Register($dst$$reg),
12011               as_Register($src1$$reg),
12012               as_Register($src2$$reg),
12013               Assembler::LSL,
12014               $src3$$constant & 0x3f);
12015   %}
12016 
12017   ins_pipe(ialu_reg_reg_shift);
12018 %}
12019 
12020 // This pattern is automatically generated from aarch64_ad.m4
12021 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12022 instruct AddI_reg_URShift_reg(iRegINoSp dst,
12023                          iRegIorL2I src1, iRegIorL2I src2,
12024                          immI src3, rFlagsReg cr) %{
12025   match(Set dst (AddI src1 (URShiftI src2 src3)));
12026 
12027   ins_cost(1.9 * INSN_COST);
12028   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
12029 
12030   ins_encode %{
12031     __ addw(as_Register($dst$$reg),
12032               as_Register($src1$$reg),
12033               as_Register($src2$$reg),
12034               Assembler::LSR,
12035               $src3$$constant & 0x1f);
12036   %}
12037 
12038   ins_pipe(ialu_reg_reg_shift);
12039 %}
12040 
12041 // This pattern is automatically generated from aarch64_ad.m4
12042 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12043 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
12044                          iRegL src1, iRegL src2,
12045                          immI src3, rFlagsReg cr) %{
12046   match(Set dst (AddL src1 (URShiftL src2 src3)));
12047 
12048   ins_cost(1.9 * INSN_COST);
12049   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
12050 
12051   ins_encode %{
12052     __ add(as_Register($dst$$reg),
12053               as_Register($src1$$reg),
12054               as_Register($src2$$reg),
12055               Assembler::LSR,
12056               $src3$$constant & 0x3f);
12057   %}
12058 
12059   ins_pipe(ialu_reg_reg_shift);
12060 %}
12061 
12062 // This pattern is automatically generated from aarch64_ad.m4
12063 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12064 instruct AddI_reg_RShift_reg(iRegINoSp dst,
12065                          iRegIorL2I src1, iRegIorL2I src2,
12066                          immI src3, rFlagsReg cr) %{
12067   match(Set dst (AddI src1 (RShiftI src2 src3)));
12068 
12069   ins_cost(1.9 * INSN_COST);
12070   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
12071 
12072   ins_encode %{
12073     __ addw(as_Register($dst$$reg),
12074               as_Register($src1$$reg),
12075               as_Register($src2$$reg),
12076               Assembler::ASR,
12077               $src3$$constant & 0x1f);
12078   %}
12079 
12080   ins_pipe(ialu_reg_reg_shift);
12081 %}
12082 
12083 // This pattern is automatically generated from aarch64_ad.m4
12084 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12085 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
12086                          iRegL src1, iRegL src2,
12087                          immI src3, rFlagsReg cr) %{
12088   match(Set dst (AddL src1 (RShiftL src2 src3)));
12089 
12090   ins_cost(1.9 * INSN_COST);
12091   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
12092 
12093   ins_encode %{
12094     __ add(as_Register($dst$$reg),
12095               as_Register($src1$$reg),
12096               as_Register($src2$$reg),
12097               Assembler::ASR,
12098               $src3$$constant & 0x3f);
12099   %}
12100 
12101   ins_pipe(ialu_reg_reg_shift);
12102 %}
12103 
12104 // This pattern is automatically generated from aarch64_ad.m4
12105 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12106 instruct AddI_reg_LShift_reg(iRegINoSp dst,
12107                          iRegIorL2I src1, iRegIorL2I src2,
12108                          immI src3, rFlagsReg cr) %{
12109   match(Set dst (AddI src1 (LShiftI src2 src3)));
12110 
12111   ins_cost(1.9 * INSN_COST);
12112   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
12113 
12114   ins_encode %{
12115     __ addw(as_Register($dst$$reg),
12116               as_Register($src1$$reg),
12117               as_Register($src2$$reg),
12118               Assembler::LSL,
12119               $src3$$constant & 0x1f);
12120   %}
12121 
12122   ins_pipe(ialu_reg_reg_shift);
12123 %}
12124 
12125 // This pattern is automatically generated from aarch64_ad.m4
12126 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12127 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
12128                          iRegL src1, iRegL src2,
12129                          immI src3, rFlagsReg cr) %{
12130   match(Set dst (AddL src1 (LShiftL src2 src3)));
12131 
12132   ins_cost(1.9 * INSN_COST);
12133   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
12134 
12135   ins_encode %{
12136     __ add(as_Register($dst$$reg),
12137               as_Register($src1$$reg),
12138               as_Register($src2$$reg),
12139               Assembler::LSL,
12140               $src3$$constant & 0x3f);
12141   %}
12142 
12143   ins_pipe(ialu_reg_reg_shift);
12144 %}
12145 
12146 // This pattern is automatically generated from aarch64_ad.m4
12147 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12148 instruct SubI_reg_URShift_reg(iRegINoSp dst,
12149                          iRegIorL2I src1, iRegIorL2I src2,
12150                          immI src3, rFlagsReg cr) %{
12151   match(Set dst (SubI src1 (URShiftI src2 src3)));
12152 
12153   ins_cost(1.9 * INSN_COST);
12154   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
12155 
12156   ins_encode %{
12157     __ subw(as_Register($dst$$reg),
12158               as_Register($src1$$reg),
12159               as_Register($src2$$reg),
12160               Assembler::LSR,
12161               $src3$$constant & 0x1f);
12162   %}
12163 
12164   ins_pipe(ialu_reg_reg_shift);
12165 %}
12166 
12167 // This pattern is automatically generated from aarch64_ad.m4
12168 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12169 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
12170                          iRegL src1, iRegL src2,
12171                          immI src3, rFlagsReg cr) %{
12172   match(Set dst (SubL src1 (URShiftL src2 src3)));
12173 
12174   ins_cost(1.9 * INSN_COST);
12175   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
12176 
12177   ins_encode %{
12178     __ sub(as_Register($dst$$reg),
12179               as_Register($src1$$reg),
12180               as_Register($src2$$reg),
12181               Assembler::LSR,
12182               $src3$$constant & 0x3f);
12183   %}
12184 
12185   ins_pipe(ialu_reg_reg_shift);
12186 %}
12187 
12188 // This pattern is automatically generated from aarch64_ad.m4
12189 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12190 instruct SubI_reg_RShift_reg(iRegINoSp dst,
12191                          iRegIorL2I src1, iRegIorL2I src2,
12192                          immI src3, rFlagsReg cr) %{
12193   match(Set dst (SubI src1 (RShiftI src2 src3)));
12194 
12195   ins_cost(1.9 * INSN_COST);
12196   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
12197 
12198   ins_encode %{
12199     __ subw(as_Register($dst$$reg),
12200               as_Register($src1$$reg),
12201               as_Register($src2$$reg),
12202               Assembler::ASR,
12203               $src3$$constant & 0x1f);
12204   %}
12205 
12206   ins_pipe(ialu_reg_reg_shift);
12207 %}
12208 
12209 // This pattern is automatically generated from aarch64_ad.m4
12210 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12211 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
12212                          iRegL src1, iRegL src2,
12213                          immI src3, rFlagsReg cr) %{
12214   match(Set dst (SubL src1 (RShiftL src2 src3)));
12215 
12216   ins_cost(1.9 * INSN_COST);
12217   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
12218 
12219   ins_encode %{
12220     __ sub(as_Register($dst$$reg),
12221               as_Register($src1$$reg),
12222               as_Register($src2$$reg),
12223               Assembler::ASR,
12224               $src3$$constant & 0x3f);
12225   %}
12226 
12227   ins_pipe(ialu_reg_reg_shift);
12228 %}
12229 
12230 // This pattern is automatically generated from aarch64_ad.m4
12231 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12232 instruct SubI_reg_LShift_reg(iRegINoSp dst,
12233                          iRegIorL2I src1, iRegIorL2I src2,
12234                          immI src3, rFlagsReg cr) %{
12235   match(Set dst (SubI src1 (LShiftI src2 src3)));
12236 
12237   ins_cost(1.9 * INSN_COST);
12238   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
12239 
12240   ins_encode %{
12241     __ subw(as_Register($dst$$reg),
12242               as_Register($src1$$reg),
12243               as_Register($src2$$reg),
12244               Assembler::LSL,
12245               $src3$$constant & 0x1f);
12246   %}
12247 
12248   ins_pipe(ialu_reg_reg_shift);
12249 %}
12250 
12251 // This pattern is automatically generated from aarch64_ad.m4
12252 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12253 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
12254                          iRegL src1, iRegL src2,
12255                          immI src3, rFlagsReg cr) %{
12256   match(Set dst (SubL src1 (LShiftL src2 src3)));
12257 
12258   ins_cost(1.9 * INSN_COST);
12259   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
12260 
12261   ins_encode %{
12262     __ sub(as_Register($dst$$reg),
12263               as_Register($src1$$reg),
12264               as_Register($src2$$reg),
12265               Assembler::LSL,
12266               $src3$$constant & 0x3f);
12267   %}
12268 
12269   ins_pipe(ialu_reg_reg_shift);
12270 %}
12271 
12272  
12273 // This pattern is automatically generated from aarch64_ad.m4
12274 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12275 
12276 // Shift Left followed by Shift Right.
12277 // This idiom is used by the compiler for the i2b bytecode etc.
12278 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12279 %{
12280   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
12281   ins_cost(INSN_COST * 2);
12282   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12283   ins_encode %{
12284     int lshift = $lshift_count$$constant & 63;
12285     int rshift = $rshift_count$$constant & 63;
12286     int s = 63 - lshift;
12287     int r = (rshift - lshift) & 63;
12288     __ sbfm(as_Register($dst$$reg),
12289             as_Register($src$$reg),
12290             r, s);
12291   %}
12292 
12293   ins_pipe(ialu_reg_shift);
12294 %}
12295 
12296 // This pattern is automatically generated from aarch64_ad.m4
12297 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12298 
12299 // Shift Left followed by Shift Right.
12300 // This idiom is used by the compiler for the i2b bytecode etc.
12301 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12302 %{
12303   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
12304   ins_cost(INSN_COST * 2);
12305   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12306   ins_encode %{
12307     int lshift = $lshift_count$$constant & 31;
12308     int rshift = $rshift_count$$constant & 31;
12309     int s = 31 - lshift;
12310     int r = (rshift - lshift) & 31;
12311     __ sbfmw(as_Register($dst$$reg),
12312             as_Register($src$$reg),
12313             r, s);
12314   %}
12315 
12316   ins_pipe(ialu_reg_shift);
12317 %}
12318 
12319 // This pattern is automatically generated from aarch64_ad.m4
12320 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12321 
12322 // Shift Left followed by Shift Right.
12323 // This idiom is used by the compiler for the i2b bytecode etc.
12324 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12325 %{
12326   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
12327   ins_cost(INSN_COST * 2);
12328   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12329   ins_encode %{
12330     int lshift = $lshift_count$$constant & 63;
12331     int rshift = $rshift_count$$constant & 63;
12332     int s = 63 - lshift;
12333     int r = (rshift - lshift) & 63;
12334     __ ubfm(as_Register($dst$$reg),
12335             as_Register($src$$reg),
12336             r, s);
12337   %}
12338 
12339   ins_pipe(ialu_reg_shift);
12340 %}
12341 
12342 // This pattern is automatically generated from aarch64_ad.m4
12343 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12344 
12345 // Shift Left followed by Shift Right.
12346 // This idiom is used by the compiler for the i2b bytecode etc.
12347 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12348 %{
12349   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
12350   ins_cost(INSN_COST * 2);
12351   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12352   ins_encode %{
12353     int lshift = $lshift_count$$constant & 31;
12354     int rshift = $rshift_count$$constant & 31;
12355     int s = 31 - lshift;
12356     int r = (rshift - lshift) & 31;
12357     __ ubfmw(as_Register($dst$$reg),
12358             as_Register($src$$reg),
12359             r, s);
12360   %}
12361 
12362   ins_pipe(ialu_reg_shift);
12363 %}
12364 
12365 // Bitfield extract with shift & mask
12366 
12367 // This pattern is automatically generated from aarch64_ad.m4
12368 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12369 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12370 %{
12371   match(Set dst (AndI (URShiftI src rshift) mask));
12372   // Make sure we are not going to exceed what ubfxw can do.
12373   predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12374 
12375   ins_cost(INSN_COST);
12376   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
12377   ins_encode %{
12378     int rshift = $rshift$$constant & 31;
12379     intptr_t mask = $mask$$constant;
12380     int width = exact_log2(mask+1);
12381     __ ubfxw(as_Register($dst$$reg),
12382             as_Register($src$$reg), rshift, width);
12383   %}
12384   ins_pipe(ialu_reg_shift);
12385 %}
12386 
12387 // This pattern is automatically generated from aarch64_ad.m4
12388 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12389 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
12390 %{
12391   match(Set dst (AndL (URShiftL src rshift) mask));
12392   // Make sure we are not going to exceed what ubfx can do.
12393   predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
12394 
12395   ins_cost(INSN_COST);
12396   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12397   ins_encode %{
12398     int rshift = $rshift$$constant & 63;
12399     intptr_t mask = $mask$$constant;
12400     int width = exact_log2_long(mask+1);
12401     __ ubfx(as_Register($dst$$reg),
12402             as_Register($src$$reg), rshift, width);
12403   %}
12404   ins_pipe(ialu_reg_shift);
12405 %}
12406 
12407 
12408 // This pattern is automatically generated from aarch64_ad.m4
12409 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12410 
12411 // We can use ubfx when extending an And with a mask when we know mask
12412 // is positive.  We know that because immI_bitmask guarantees it.
12413 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12414 %{
12415   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
12416   // Make sure we are not going to exceed what ubfxw can do.
12417   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12418 
12419   ins_cost(INSN_COST * 2);
12420   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12421   ins_encode %{
12422     int rshift = $rshift$$constant & 31;
12423     intptr_t mask = $mask$$constant;
12424     int width = exact_log2(mask+1);
12425     __ ubfx(as_Register($dst$$reg),
12426             as_Register($src$$reg), rshift, width);
12427   %}
12428   ins_pipe(ialu_reg_shift);
12429 %}
12430 
12431 
12432 // This pattern is automatically generated from aarch64_ad.m4
12433 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12434 
12435 // We can use ubfiz when masking by a positive number and then left shifting the result.
12436 // We know that the mask is positive because immI_bitmask guarantees it.
12437 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12438 %{
12439   match(Set dst (LShiftI (AndI src mask) lshift));
12440   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 31)) <= (31 + 1));
12441 
12442   ins_cost(INSN_COST);
12443   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12444   ins_encode %{
12445     int lshift = $lshift$$constant & 31;
12446     intptr_t mask = $mask$$constant;
12447     int width = exact_log2(mask+1);
12448     __ ubfizw(as_Register($dst$$reg),
12449           as_Register($src$$reg), lshift, width);
12450   %}
12451   ins_pipe(ialu_reg_shift);
12452 %}
12453 
12454 // This pattern is automatically generated from aarch64_ad.m4
12455 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12456 
12457 // We can use ubfiz when masking by a positive number and then left shifting the result.
12458 // We know that the mask is positive because immL_bitmask guarantees it.
12459 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
12460 %{
12461   match(Set dst (LShiftL (AndL src mask) lshift));
12462   predicate((exact_log2_long(n->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12463 
12464   ins_cost(INSN_COST);
12465   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12466   ins_encode %{
12467     int lshift = $lshift$$constant & 63;
12468     intptr_t mask = $mask$$constant;
12469     int width = exact_log2_long(mask+1);
12470     __ ubfiz(as_Register($dst$$reg),
12471           as_Register($src$$reg), lshift, width);
12472   %}
12473   ins_pipe(ialu_reg_shift);
12474 %}
12475 
12476 // This pattern is automatically generated from aarch64_ad.m4
12477 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12478 
12479 // We can use ubfiz when masking by a positive number and then left shifting the result.
12480 // We know that the mask is positive because immI_bitmask guarantees it.
12481 instruct ubfizwIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12482 %{
12483   match(Set dst (ConvI2L (LShiftI (AndI src mask) lshift)));
12484   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= 31);
12485 
12486   ins_cost(INSN_COST);
12487   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12488   ins_encode %{
12489     int lshift = $lshift$$constant & 31;
12490     intptr_t mask = $mask$$constant;
12491     int width = exact_log2(mask+1);
12492     __ ubfizw(as_Register($dst$$reg),
12493           as_Register($src$$reg), lshift, width);
12494   %}
12495   ins_pipe(ialu_reg_shift);
12496 %}
12497 
12498 // This pattern is automatically generated from aarch64_ad.m4
12499 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12500 
12501 // We can use ubfiz when masking by a positive number and then left shifting the result.
12502 // We know that the mask is positive because immL_bitmask guarantees it.
12503 instruct ubfizLConvL2I(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
12504 %{
12505   match(Set dst (ConvL2I (LShiftL (AndL src mask) lshift)));
12506   predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= 31);
12507 
12508   ins_cost(INSN_COST);
12509   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12510   ins_encode %{
12511     int lshift = $lshift$$constant & 63;
12512     intptr_t mask = $mask$$constant;
12513     int width = exact_log2_long(mask+1);
12514     __ ubfiz(as_Register($dst$$reg),
12515           as_Register($src$$reg), lshift, width);
12516   %}
12517   ins_pipe(ialu_reg_shift);
12518 %}
12519 
12520 
12521 // This pattern is automatically generated from aarch64_ad.m4
12522 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12523 
12524 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
12525 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12526 %{
12527   match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
12528   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12529 
12530   ins_cost(INSN_COST);
12531   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12532   ins_encode %{
12533     int lshift = $lshift$$constant & 63;
12534     intptr_t mask = $mask$$constant;
12535     int width = exact_log2(mask+1);
12536     __ ubfiz(as_Register($dst$$reg),
12537              as_Register($src$$reg), lshift, width);
12538   %}
12539   ins_pipe(ialu_reg_shift);
12540 %}
12541 
12542 // This pattern is automatically generated from aarch64_ad.m4
12543 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12544 
12545 // If there is a convert L to I block between and AndL and a LShiftI, we can also match ubfiz
12546 instruct ubfizLConvL2Ix(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
12547 %{
12548   match(Set dst (LShiftI (ConvL2I (AndL src mask)) lshift));
12549   predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 31)) <= 31);
12550 
12551   ins_cost(INSN_COST);
12552   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12553   ins_encode %{
12554     int lshift = $lshift$$constant & 31;
12555     intptr_t mask = $mask$$constant;
12556     int width = exact_log2(mask+1);
12557     __ ubfiz(as_Register($dst$$reg),
12558              as_Register($src$$reg), lshift, width);
12559   %}
12560   ins_pipe(ialu_reg_shift);
12561 %}
12562 
12563 // This pattern is automatically generated from aarch64_ad.m4
12564 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12565 
12566 // Can skip int2long conversions after AND with small bitmask
12567 instruct ubfizIConvI2LAndI(iRegLNoSp dst, iRegI src, immI_bitmask msk)
12568 %{
12569   match(Set dst (ConvI2L (AndI src msk)));
12570   ins_cost(INSN_COST);
12571   format %{ "ubfiz $dst, $src, 0, exact_log2($msk + 1) " %}
12572   ins_encode %{
12573     __ ubfiz(as_Register($dst$$reg), as_Register($src$$reg), 0, exact_log2($msk$$constant + 1));
12574   %}
12575   ins_pipe(ialu_reg_shift);
12576 %}
12577 
12578 
12579 // Rotations 
12580 // This pattern is automatically generated from aarch64_ad.m4
12581 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12582 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12583 %{
12584   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12585   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12586 
12587   ins_cost(INSN_COST);
12588   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12589 
12590   ins_encode %{
12591     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12592             $rshift$$constant & 63);
12593   %}
12594   ins_pipe(ialu_reg_reg_extr);
12595 %}
12596 
12597 
12598 // This pattern is automatically generated from aarch64_ad.m4
12599 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12600 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12601 %{
12602   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12603   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12604 
12605   ins_cost(INSN_COST);
12606   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12607 
12608   ins_encode %{
12609     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12610             $rshift$$constant & 31);
12611   %}
12612   ins_pipe(ialu_reg_reg_extr);
12613 %}
12614 
12615 
12616 // This pattern is automatically generated from aarch64_ad.m4
12617 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12618 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12619 %{
12620   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12621   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12622 
12623   ins_cost(INSN_COST);
12624   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12625 
12626   ins_encode %{
12627     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12628             $rshift$$constant & 63);
12629   %}
12630   ins_pipe(ialu_reg_reg_extr);
12631 %}
12632 
12633 
12634 // This pattern is automatically generated from aarch64_ad.m4
12635 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12636 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12637 %{
12638   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12639   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12640 
12641   ins_cost(INSN_COST);
12642   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12643 
12644   ins_encode %{
12645     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12646             $rshift$$constant & 31);
12647   %}
12648   ins_pipe(ialu_reg_reg_extr);
12649 %}
12650 
12651 
12652 // This pattern is automatically generated from aarch64_ad.m4
12653 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12654 
12655 // rol expander
12656 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12657 %{
12658   effect(DEF dst, USE src, USE shift);
12659 
12660   format %{ "rol    $dst, $src, $shift" %}
12661   ins_cost(INSN_COST * 3);
12662   ins_encode %{
12663     __ subw(rscratch1, zr, as_Register($shift$$reg));
12664     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12665             rscratch1);
12666     %}
12667   ins_pipe(ialu_reg_reg_vshift);
12668 %}
12669 
12670 // This pattern is automatically generated from aarch64_ad.m4
12671 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12672 
12673 // rol expander
12674 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12675 %{
12676   effect(DEF dst, USE src, USE shift);
12677 
12678   format %{ "rol    $dst, $src, $shift" %}
12679   ins_cost(INSN_COST * 3);
12680   ins_encode %{
12681     __ subw(rscratch1, zr, as_Register($shift$$reg));
12682     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12683             rscratch1);
12684     %}
12685   ins_pipe(ialu_reg_reg_vshift);
12686 %}
12687 
12688 // This pattern is automatically generated from aarch64_ad.m4
12689 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12690 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12691 %{
12692   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
12693 
12694   expand %{
12695     rolL_rReg(dst, src, shift, cr);
12696   %}
12697 %}
12698 
12699 // This pattern is automatically generated from aarch64_ad.m4
12700 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12701 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12702 %{
12703   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
12704 
12705   expand %{
12706     rolL_rReg(dst, src, shift, cr);
12707   %}
12708 %}
12709 
12710 // This pattern is automatically generated from aarch64_ad.m4
12711 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12712 instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12713 %{
12714   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
12715 
12716   expand %{
12717     rolI_rReg(dst, src, shift, cr);
12718   %}
12719 %}
12720 
12721 // This pattern is automatically generated from aarch64_ad.m4
12722 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12723 instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12724 %{
12725   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
12726 
12727   expand %{
12728     rolI_rReg(dst, src, shift, cr);
12729   %}
12730 %}
12731 
12732 // This pattern is automatically generated from aarch64_ad.m4
12733 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12734 
12735 // ror expander
12736 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12737 %{
12738   effect(DEF dst, USE src, USE shift);
12739 
12740   format %{ "ror    $dst, $src, $shift" %}
12741   ins_cost(INSN_COST);
12742   ins_encode %{
12743     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12744             as_Register($shift$$reg));
12745     %}
12746   ins_pipe(ialu_reg_reg_vshift);
12747 %}
12748 
12749 // This pattern is automatically generated from aarch64_ad.m4
12750 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12751 
12752 // ror expander
12753 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12754 %{
12755   effect(DEF dst, USE src, USE shift);
12756 
12757   format %{ "ror    $dst, $src, $shift" %}
12758   ins_cost(INSN_COST);
12759   ins_encode %{
12760     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12761             as_Register($shift$$reg));
12762     %}
12763   ins_pipe(ialu_reg_reg_vshift);
12764 %}
12765 
12766 // This pattern is automatically generated from aarch64_ad.m4
12767 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12768 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12769 %{
12770   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
12771 
12772   expand %{
12773     rorL_rReg(dst, src, shift, cr);
12774   %}
12775 %}
12776 
12777 // This pattern is automatically generated from aarch64_ad.m4
12778 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12779 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12780 %{
12781   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
12782 
12783   expand %{
12784     rorL_rReg(dst, src, shift, cr);
12785   %}
12786 %}
12787 
12788 // This pattern is automatically generated from aarch64_ad.m4
12789 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12790 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12791 %{
12792   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
12793 
12794   expand %{
12795     rorI_rReg(dst, src, shift, cr);
12796   %}
12797 %}
12798 
12799 // This pattern is automatically generated from aarch64_ad.m4
12800 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12801 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12802 %{
12803   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
12804 
12805   expand %{
12806     rorI_rReg(dst, src, shift, cr);
12807   %}
12808 %}
12809 
12810 
12811 // Add/subtract (extended)
12812 
12813 // This pattern is automatically generated from aarch64_ad.m4
12814 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12815 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12816 %{
12817   match(Set dst (AddL src1 (ConvI2L src2)));
12818   ins_cost(INSN_COST);
12819   format %{ "add  $dst, $src1, $src2, sxtw" %}
12820 
12821    ins_encode %{
12822      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12823             as_Register($src2$$reg), ext::sxtw);
12824    %}
12825   ins_pipe(ialu_reg_reg);
12826 %}
12827 
12828 // This pattern is automatically generated from aarch64_ad.m4
12829 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12830 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12831 %{
12832   match(Set dst (SubL src1 (ConvI2L src2)));
12833   ins_cost(INSN_COST);
12834   format %{ "sub  $dst, $src1, $src2, sxtw" %}
12835 
12836    ins_encode %{
12837      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12838             as_Register($src2$$reg), ext::sxtw);
12839    %}
12840   ins_pipe(ialu_reg_reg);
12841 %}
12842 
12843 // This pattern is automatically generated from aarch64_ad.m4
12844 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12845 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12846 %{
12847   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12848   ins_cost(INSN_COST);
12849   format %{ "add  $dst, $src1, $src2, sxth" %}
12850 
12851    ins_encode %{
12852      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12853             as_Register($src2$$reg), ext::sxth);
12854    %}
12855   ins_pipe(ialu_reg_reg);
12856 %}
12857 
12858 // This pattern is automatically generated from aarch64_ad.m4
12859 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12860 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12861 %{
12862   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12863   ins_cost(INSN_COST);
12864   format %{ "add  $dst, $src1, $src2, sxtb" %}
12865 
12866    ins_encode %{
12867      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12868             as_Register($src2$$reg), ext::sxtb);
12869    %}
12870   ins_pipe(ialu_reg_reg);
12871 %}
12872 
12873 // This pattern is automatically generated from aarch64_ad.m4
12874 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12875 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12876 %{
12877   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12878   ins_cost(INSN_COST);
12879   format %{ "add  $dst, $src1, $src2, uxtb" %}
12880 
12881    ins_encode %{
12882      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12883             as_Register($src2$$reg), ext::uxtb);
12884    %}
12885   ins_pipe(ialu_reg_reg);
12886 %}
12887 
12888 // This pattern is automatically generated from aarch64_ad.m4
12889 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12890 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12891 %{
12892   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12893   ins_cost(INSN_COST);
12894   format %{ "add  $dst, $src1, $src2, sxth" %}
12895 
12896    ins_encode %{
12897      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12898             as_Register($src2$$reg), ext::sxth);
12899    %}
12900   ins_pipe(ialu_reg_reg);
12901 %}
12902 
12903 // This pattern is automatically generated from aarch64_ad.m4
12904 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12905 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12906 %{
12907   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12908   ins_cost(INSN_COST);
12909   format %{ "add  $dst, $src1, $src2, sxtw" %}
12910 
12911    ins_encode %{
12912      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12913             as_Register($src2$$reg), ext::sxtw);
12914    %}
12915   ins_pipe(ialu_reg_reg);
12916 %}
12917 
12918 // This pattern is automatically generated from aarch64_ad.m4
12919 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12920 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12921 %{
12922   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12923   ins_cost(INSN_COST);
12924   format %{ "add  $dst, $src1, $src2, sxtb" %}
12925 
12926    ins_encode %{
12927      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12928             as_Register($src2$$reg), ext::sxtb);
12929    %}
12930   ins_pipe(ialu_reg_reg);
12931 %}
12932 
12933 // This pattern is automatically generated from aarch64_ad.m4
12934 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12935 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12936 %{
12937   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
12938   ins_cost(INSN_COST);
12939   format %{ "add  $dst, $src1, $src2, uxtb" %}
12940 
12941    ins_encode %{
12942      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12943             as_Register($src2$$reg), ext::uxtb);
12944    %}
12945   ins_pipe(ialu_reg_reg);
12946 %}
12947 
12948 // This pattern is automatically generated from aarch64_ad.m4
12949 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12950 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12951 %{
12952   match(Set dst (AddI src1 (AndI src2 mask)));
12953   ins_cost(INSN_COST);
12954   format %{ "addw  $dst, $src1, $src2, uxtb" %}
12955 
12956    ins_encode %{
12957      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12958             as_Register($src2$$reg), ext::uxtb);
12959    %}
12960   ins_pipe(ialu_reg_reg);
12961 %}
12962 
12963 // This pattern is automatically generated from aarch64_ad.m4
12964 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12965 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12966 %{
12967   match(Set dst (AddI src1 (AndI src2 mask)));
12968   ins_cost(INSN_COST);
12969   format %{ "addw  $dst, $src1, $src2, uxth" %}
12970 
12971    ins_encode %{
12972      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12973             as_Register($src2$$reg), ext::uxth);
12974    %}
12975   ins_pipe(ialu_reg_reg);
12976 %}
12977 
12978 // This pattern is automatically generated from aarch64_ad.m4
12979 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12980 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12981 %{
12982   match(Set dst (AddL src1 (AndL src2 mask)));
12983   ins_cost(INSN_COST);
12984   format %{ "add  $dst, $src1, $src2, uxtb" %}
12985 
12986    ins_encode %{
12987      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12988             as_Register($src2$$reg), ext::uxtb);
12989    %}
12990   ins_pipe(ialu_reg_reg);
12991 %}
12992 
12993 // This pattern is automatically generated from aarch64_ad.m4
12994 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12995 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12996 %{
12997   match(Set dst (AddL src1 (AndL src2 mask)));
12998   ins_cost(INSN_COST);
12999   format %{ "add  $dst, $src1, $src2, uxth" %}
13000 
13001    ins_encode %{
13002      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13003             as_Register($src2$$reg), ext::uxth);
13004    %}
13005   ins_pipe(ialu_reg_reg);
13006 %}
13007 
13008 // This pattern is automatically generated from aarch64_ad.m4
13009 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13010 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13011 %{
13012   match(Set dst (AddL src1 (AndL src2 mask)));
13013   ins_cost(INSN_COST);
13014   format %{ "add  $dst, $src1, $src2, uxtw" %}
13015 
13016    ins_encode %{
13017      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13018             as_Register($src2$$reg), ext::uxtw);
13019    %}
13020   ins_pipe(ialu_reg_reg);
13021 %}
13022 
13023 // This pattern is automatically generated from aarch64_ad.m4
13024 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13025 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
13026 %{
13027   match(Set dst (SubI src1 (AndI src2 mask)));
13028   ins_cost(INSN_COST);
13029   format %{ "subw  $dst, $src1, $src2, uxtb" %}
13030 
13031    ins_encode %{
13032      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13033             as_Register($src2$$reg), ext::uxtb);
13034    %}
13035   ins_pipe(ialu_reg_reg);
13036 %}
13037 
13038 // This pattern is automatically generated from aarch64_ad.m4
13039 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13040 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
13041 %{
13042   match(Set dst (SubI src1 (AndI src2 mask)));
13043   ins_cost(INSN_COST);
13044   format %{ "subw  $dst, $src1, $src2, uxth" %}
13045 
13046    ins_encode %{
13047      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13048             as_Register($src2$$reg), ext::uxth);
13049    %}
13050   ins_pipe(ialu_reg_reg);
13051 %}
13052 
13053 // This pattern is automatically generated from aarch64_ad.m4
13054 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13055 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
13056 %{
13057   match(Set dst (SubL src1 (AndL src2 mask)));
13058   ins_cost(INSN_COST);
13059   format %{ "sub  $dst, $src1, $src2, uxtb" %}
13060 
13061    ins_encode %{
13062      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13063             as_Register($src2$$reg), ext::uxtb);
13064    %}
13065   ins_pipe(ialu_reg_reg);
13066 %}
13067 
13068 // This pattern is automatically generated from aarch64_ad.m4
13069 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13070 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13071 %{
13072   match(Set dst (SubL src1 (AndL src2 mask)));
13073   ins_cost(INSN_COST);
13074   format %{ "sub  $dst, $src1, $src2, uxth" %}
13075 
13076    ins_encode %{
13077      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13078             as_Register($src2$$reg), ext::uxth);
13079    %}
13080   ins_pipe(ialu_reg_reg);
13081 %}
13082 
13083 // This pattern is automatically generated from aarch64_ad.m4
13084 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13085 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13086 %{
13087   match(Set dst (SubL src1 (AndL src2 mask)));
13088   ins_cost(INSN_COST);
13089   format %{ "sub  $dst, $src1, $src2, uxtw" %}
13090 
13091    ins_encode %{
13092      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13093             as_Register($src2$$reg), ext::uxtw);
13094    %}
13095   ins_pipe(ialu_reg_reg);
13096 %}
13097 
13098 
13099 // This pattern is automatically generated from aarch64_ad.m4
13100 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13101 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13102 %{
13103   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13104   ins_cost(1.9 * INSN_COST);
13105   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
13106 
13107    ins_encode %{
13108      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13109             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13110    %}
13111   ins_pipe(ialu_reg_reg_shift);
13112 %}
13113 
13114 // This pattern is automatically generated from aarch64_ad.m4
13115 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13116 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13117 %{
13118   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13119   ins_cost(1.9 * INSN_COST);
13120   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
13121 
13122    ins_encode %{
13123      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13124             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13125    %}
13126   ins_pipe(ialu_reg_reg_shift);
13127 %}
13128 
13129 // This pattern is automatically generated from aarch64_ad.m4
13130 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13131 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13132 %{
13133   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13134   ins_cost(1.9 * INSN_COST);
13135   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
13136 
13137    ins_encode %{
13138      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13139             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13140    %}
13141   ins_pipe(ialu_reg_reg_shift);
13142 %}
13143 
13144 // This pattern is automatically generated from aarch64_ad.m4
13145 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13146 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13147 %{
13148   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13149   ins_cost(1.9 * INSN_COST);
13150   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
13151 
13152    ins_encode %{
13153      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13154             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13155    %}
13156   ins_pipe(ialu_reg_reg_shift);
13157 %}
13158 
13159 // This pattern is automatically generated from aarch64_ad.m4
13160 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13161 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13162 %{
13163   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13164   ins_cost(1.9 * INSN_COST);
13165   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
13166 
13167    ins_encode %{
13168      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13169             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13170    %}
13171   ins_pipe(ialu_reg_reg_shift);
13172 %}
13173 
13174 // This pattern is automatically generated from aarch64_ad.m4
13175 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13176 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13177 %{
13178   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13179   ins_cost(1.9 * INSN_COST);
13180   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
13181 
13182    ins_encode %{
13183      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13184             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13185    %}
13186   ins_pipe(ialu_reg_reg_shift);
13187 %}
13188 
13189 // This pattern is automatically generated from aarch64_ad.m4
13190 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13191 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13192 %{
13193   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13194   ins_cost(1.9 * INSN_COST);
13195   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
13196 
13197    ins_encode %{
13198      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13199             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13200    %}
13201   ins_pipe(ialu_reg_reg_shift);
13202 %}
13203 
13204 // This pattern is automatically generated from aarch64_ad.m4
13205 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13206 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13207 %{
13208   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13209   ins_cost(1.9 * INSN_COST);
13210   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
13211 
13212    ins_encode %{
13213      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13214             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13215    %}
13216   ins_pipe(ialu_reg_reg_shift);
13217 %}
13218 
13219 // This pattern is automatically generated from aarch64_ad.m4
13220 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13221 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13222 %{
13223   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13224   ins_cost(1.9 * INSN_COST);
13225   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
13226 
13227    ins_encode %{
13228      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13229             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13230    %}
13231   ins_pipe(ialu_reg_reg_shift);
13232 %}
13233 
13234 // This pattern is automatically generated from aarch64_ad.m4
13235 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13236 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13237 %{
13238   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13239   ins_cost(1.9 * INSN_COST);
13240   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
13241 
13242    ins_encode %{
13243      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13244             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13245    %}
13246   ins_pipe(ialu_reg_reg_shift);
13247 %}
13248 
13249 // This pattern is automatically generated from aarch64_ad.m4
13250 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13251 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13252 %{
13253   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
13254   ins_cost(1.9 * INSN_COST);
13255   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
13256 
13257    ins_encode %{
13258      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13259             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13260    %}
13261   ins_pipe(ialu_reg_reg_shift);
13262 %}
13263 
13264 // This pattern is automatically generated from aarch64_ad.m4
13265 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13266 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13267 %{
13268   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
13269   ins_cost(1.9 * INSN_COST);
13270   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
13271 
13272    ins_encode %{
13273      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13274             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13275    %}
13276   ins_pipe(ialu_reg_reg_shift);
13277 %}
13278 
13279 // This pattern is automatically generated from aarch64_ad.m4
13280 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13281 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13282 %{
13283   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13284   ins_cost(1.9 * INSN_COST);
13285   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
13286 
13287    ins_encode %{
13288      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13289             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13290    %}
13291   ins_pipe(ialu_reg_reg_shift);
13292 %}
13293 
13294 // This pattern is automatically generated from aarch64_ad.m4
13295 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13296 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13297 %{
13298   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13299   ins_cost(1.9 * INSN_COST);
13300   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
13301 
13302    ins_encode %{
13303      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13304             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13305    %}
13306   ins_pipe(ialu_reg_reg_shift);
13307 %}
13308 
13309 // This pattern is automatically generated from aarch64_ad.m4
13310 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13311 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13312 %{
13313   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13314   ins_cost(1.9 * INSN_COST);
13315   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
13316 
13317    ins_encode %{
13318      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13319             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13320    %}
13321   ins_pipe(ialu_reg_reg_shift);
13322 %}
13323 
13324 // This pattern is automatically generated from aarch64_ad.m4
13325 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13326 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13327 %{
13328   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13329   ins_cost(1.9 * INSN_COST);
13330   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
13331 
13332    ins_encode %{
13333      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13334             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13335    %}
13336   ins_pipe(ialu_reg_reg_shift);
13337 %}
13338 
13339 // This pattern is automatically generated from aarch64_ad.m4
13340 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13341 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13342 %{
13343   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13344   ins_cost(1.9 * INSN_COST);
13345   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
13346 
13347    ins_encode %{
13348      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13349             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13350    %}
13351   ins_pipe(ialu_reg_reg_shift);
13352 %}
13353 
13354 // This pattern is automatically generated from aarch64_ad.m4
13355 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13356 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13357 %{
13358   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13359   ins_cost(1.9 * INSN_COST);
13360   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
13361 
13362    ins_encode %{
13363      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13364             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13365    %}
13366   ins_pipe(ialu_reg_reg_shift);
13367 %}
13368 
13369 // This pattern is automatically generated from aarch64_ad.m4
13370 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13371 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13372 %{
13373   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13374   ins_cost(1.9 * INSN_COST);
13375   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
13376 
13377    ins_encode %{
13378      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13379             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13380    %}
13381   ins_pipe(ialu_reg_reg_shift);
13382 %}
13383 
13384 // This pattern is automatically generated from aarch64_ad.m4
13385 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13386 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13387 %{
13388   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13389   ins_cost(1.9 * INSN_COST);
13390   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
13391 
13392    ins_encode %{
13393      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13394             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13395    %}
13396   ins_pipe(ialu_reg_reg_shift);
13397 %}
13398 
13399 // This pattern is automatically generated from aarch64_ad.m4
13400 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13401 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13402 %{
13403   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13404   ins_cost(1.9 * INSN_COST);
13405   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
13406 
13407    ins_encode %{
13408      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13409             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13410    %}
13411   ins_pipe(ialu_reg_reg_shift);
13412 %}
13413 
13414 // This pattern is automatically generated from aarch64_ad.m4
13415 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13416 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13417 %{
13418   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13419   ins_cost(1.9 * INSN_COST);
13420   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
13421 
13422    ins_encode %{
13423      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13424             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13425    %}
13426   ins_pipe(ialu_reg_reg_shift);
13427 %}
13428 
13429 
13430 
13431 // END This section of the file is automatically generated. Do not edit --------------
13432 
13433 
13434 // ============================================================================
13435 // Floating Point Arithmetic Instructions
13436 
13437 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13438   match(Set dst (AddF src1 src2));
13439 
13440   ins_cost(INSN_COST * 5);
13441   format %{ "fadds   $dst, $src1, $src2" %}
13442 
13443   ins_encode %{
13444     __ fadds(as_FloatRegister($dst$$reg),
13445              as_FloatRegister($src1$$reg),
13446              as_FloatRegister($src2$$reg));
13447   %}
13448 
13449   ins_pipe(fp_dop_reg_reg_s);
13450 %}
13451 
13452 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13453   match(Set dst (AddD src1 src2));
13454 
13455   ins_cost(INSN_COST * 5);
13456   format %{ "faddd   $dst, $src1, $src2" %}
13457 
13458   ins_encode %{
13459     __ faddd(as_FloatRegister($dst$$reg),
13460              as_FloatRegister($src1$$reg),
13461              as_FloatRegister($src2$$reg));
13462   %}
13463 
13464   ins_pipe(fp_dop_reg_reg_d);
13465 %}
13466 
13467 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13468   match(Set dst (SubF src1 src2));
13469 
13470   ins_cost(INSN_COST * 5);
13471   format %{ "fsubs   $dst, $src1, $src2" %}
13472 
13473   ins_encode %{
13474     __ fsubs(as_FloatRegister($dst$$reg),
13475              as_FloatRegister($src1$$reg),
13476              as_FloatRegister($src2$$reg));
13477   %}
13478 
13479   ins_pipe(fp_dop_reg_reg_s);
13480 %}
13481 
13482 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13483   match(Set dst (SubD src1 src2));
13484 
13485   ins_cost(INSN_COST * 5);
13486   format %{ "fsubd   $dst, $src1, $src2" %}
13487 
13488   ins_encode %{
13489     __ fsubd(as_FloatRegister($dst$$reg),
13490              as_FloatRegister($src1$$reg),
13491              as_FloatRegister($src2$$reg));
13492   %}
13493 
13494   ins_pipe(fp_dop_reg_reg_d);
13495 %}
13496 
13497 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13498   match(Set dst (MulF src1 src2));
13499 
13500   ins_cost(INSN_COST * 6);
13501   format %{ "fmuls   $dst, $src1, $src2" %}
13502 
13503   ins_encode %{
13504     __ fmuls(as_FloatRegister($dst$$reg),
13505              as_FloatRegister($src1$$reg),
13506              as_FloatRegister($src2$$reg));
13507   %}
13508 
13509   ins_pipe(fp_dop_reg_reg_s);
13510 %}
13511 
13512 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13513   match(Set dst (MulD src1 src2));
13514 
13515   ins_cost(INSN_COST * 6);
13516   format %{ "fmuld   $dst, $src1, $src2" %}
13517 
13518   ins_encode %{
13519     __ fmuld(as_FloatRegister($dst$$reg),
13520              as_FloatRegister($src1$$reg),
13521              as_FloatRegister($src2$$reg));
13522   %}
13523 
13524   ins_pipe(fp_dop_reg_reg_d);
13525 %}
13526 
13527 // src1 * src2 + src3
13528 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13529   predicate(UseFMA);
13530   match(Set dst (FmaF src3 (Binary src1 src2)));
13531 
13532   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
13533 
13534   ins_encode %{
13535     __ fmadds(as_FloatRegister($dst$$reg),
13536              as_FloatRegister($src1$$reg),
13537              as_FloatRegister($src2$$reg),
13538              as_FloatRegister($src3$$reg));
13539   %}
13540 
13541   ins_pipe(pipe_class_default);
13542 %}
13543 
13544 // src1 * src2 + src3
13545 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13546   predicate(UseFMA);
13547   match(Set dst (FmaD src3 (Binary src1 src2)));
13548 
13549   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
13550 
13551   ins_encode %{
13552     __ fmaddd(as_FloatRegister($dst$$reg),
13553              as_FloatRegister($src1$$reg),
13554              as_FloatRegister($src2$$reg),
13555              as_FloatRegister($src3$$reg));
13556   %}
13557 
13558   ins_pipe(pipe_class_default);
13559 %}
13560 
13561 // -src1 * src2 + src3
13562 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13563   predicate(UseFMA);
13564   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
13565   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
13566 
13567   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
13568 
13569   ins_encode %{
13570     __ fmsubs(as_FloatRegister($dst$$reg),
13571               as_FloatRegister($src1$$reg),
13572               as_FloatRegister($src2$$reg),
13573               as_FloatRegister($src3$$reg));
13574   %}
13575 
13576   ins_pipe(pipe_class_default);
13577 %}
13578 
13579 // -src1 * src2 + src3
13580 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13581   predicate(UseFMA);
13582   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
13583   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
13584 
13585   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
13586 
13587   ins_encode %{
13588     __ fmsubd(as_FloatRegister($dst$$reg),
13589               as_FloatRegister($src1$$reg),
13590               as_FloatRegister($src2$$reg),
13591               as_FloatRegister($src3$$reg));
13592   %}
13593 
13594   ins_pipe(pipe_class_default);
13595 %}
13596 
13597 // -src1 * src2 - src3
13598 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13599   predicate(UseFMA);
13600   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
13601   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
13602 
13603   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
13604 
13605   ins_encode %{
13606     __ fnmadds(as_FloatRegister($dst$$reg),
13607                as_FloatRegister($src1$$reg),
13608                as_FloatRegister($src2$$reg),
13609                as_FloatRegister($src3$$reg));
13610   %}
13611 
13612   ins_pipe(pipe_class_default);
13613 %}
13614 
13615 // -src1 * src2 - src3
13616 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13617   predicate(UseFMA);
13618   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
13619   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
13620 
13621   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
13622 
13623   ins_encode %{
13624     __ fnmaddd(as_FloatRegister($dst$$reg),
13625                as_FloatRegister($src1$$reg),
13626                as_FloatRegister($src2$$reg),
13627                as_FloatRegister($src3$$reg));
13628   %}
13629 
13630   ins_pipe(pipe_class_default);
13631 %}
13632 
13633 // src1 * src2 - src3
13634 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
13635   predicate(UseFMA);
13636   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
13637 
13638   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
13639 
13640   ins_encode %{
13641     __ fnmsubs(as_FloatRegister($dst$$reg),
13642                as_FloatRegister($src1$$reg),
13643                as_FloatRegister($src2$$reg),
13644                as_FloatRegister($src3$$reg));
13645   %}
13646 
13647   ins_pipe(pipe_class_default);
13648 %}
13649 
13650 // src1 * src2 - src3
13651 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
13652   predicate(UseFMA);
13653   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
13654 
13655   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
13656 
13657   ins_encode %{
13658   // n.b. insn name should be fnmsubd
13659     __ fnmsub(as_FloatRegister($dst$$reg),
13660               as_FloatRegister($src1$$reg),
13661               as_FloatRegister($src2$$reg),
13662               as_FloatRegister($src3$$reg));
13663   %}
13664 
13665   ins_pipe(pipe_class_default);
13666 %}
13667 
13668 
13669 // Math.max(FF)F
13670 instruct maxF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13671   match(Set dst (MaxF src1 src2));
13672 
13673   format %{ "fmaxs   $dst, $src1, $src2" %}
13674   ins_encode %{
13675     __ fmaxs(as_FloatRegister($dst$$reg),
13676              as_FloatRegister($src1$$reg),
13677              as_FloatRegister($src2$$reg));
13678   %}
13679 
13680   ins_pipe(fp_dop_reg_reg_s);
13681 %}
13682 
13683 // Math.min(FF)F
13684 instruct minF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13685   match(Set dst (MinF src1 src2));
13686 
13687   format %{ "fmins   $dst, $src1, $src2" %}
13688   ins_encode %{
13689     __ fmins(as_FloatRegister($dst$$reg),
13690              as_FloatRegister($src1$$reg),
13691              as_FloatRegister($src2$$reg));
13692   %}
13693 
13694   ins_pipe(fp_dop_reg_reg_s);
13695 %}
13696 
13697 // Math.max(DD)D
13698 instruct maxD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13699   match(Set dst (MaxD src1 src2));
13700 
13701   format %{ "fmaxd   $dst, $src1, $src2" %}
13702   ins_encode %{
13703     __ fmaxd(as_FloatRegister($dst$$reg),
13704              as_FloatRegister($src1$$reg),
13705              as_FloatRegister($src2$$reg));
13706   %}
13707 
13708   ins_pipe(fp_dop_reg_reg_d);
13709 %}
13710 
13711 // Math.min(DD)D
13712 instruct minD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13713   match(Set dst (MinD src1 src2));
13714 
13715   format %{ "fmind   $dst, $src1, $src2" %}
13716   ins_encode %{
13717     __ fmind(as_FloatRegister($dst$$reg),
13718              as_FloatRegister($src1$$reg),
13719              as_FloatRegister($src2$$reg));
13720   %}
13721 
13722   ins_pipe(fp_dop_reg_reg_d);
13723 %}
13724 
13725 
13726 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13727   match(Set dst (DivF src1  src2));
13728 
13729   ins_cost(INSN_COST * 18);
13730   format %{ "fdivs   $dst, $src1, $src2" %}
13731 
13732   ins_encode %{
13733     __ fdivs(as_FloatRegister($dst$$reg),
13734              as_FloatRegister($src1$$reg),
13735              as_FloatRegister($src2$$reg));
13736   %}
13737 
13738   ins_pipe(fp_div_s);
13739 %}
13740 
13741 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13742   match(Set dst (DivD src1  src2));
13743 
13744   ins_cost(INSN_COST * 32);
13745   format %{ "fdivd   $dst, $src1, $src2" %}
13746 
13747   ins_encode %{
13748     __ fdivd(as_FloatRegister($dst$$reg),
13749              as_FloatRegister($src1$$reg),
13750              as_FloatRegister($src2$$reg));
13751   %}
13752 
13753   ins_pipe(fp_div_d);
13754 %}
13755 
13756 instruct negF_reg_reg(vRegF dst, vRegF src) %{
13757   match(Set dst (NegF src));
13758 
13759   ins_cost(INSN_COST * 3);
13760   format %{ "fneg   $dst, $src" %}
13761 
13762   ins_encode %{
13763     __ fnegs(as_FloatRegister($dst$$reg),
13764              as_FloatRegister($src$$reg));
13765   %}
13766 
13767   ins_pipe(fp_uop_s);
13768 %}
13769 
13770 instruct negD_reg_reg(vRegD dst, vRegD src) %{
13771   match(Set dst (NegD src));
13772 
13773   ins_cost(INSN_COST * 3);
13774   format %{ "fnegd   $dst, $src" %}
13775 
13776   ins_encode %{
13777     __ fnegd(as_FloatRegister($dst$$reg),
13778              as_FloatRegister($src$$reg));
13779   %}
13780 
13781   ins_pipe(fp_uop_d);
13782 %}
13783 
13784 instruct absI_reg(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
13785 %{
13786   match(Set dst (AbsI src));
13787 
13788   effect(KILL cr);
13789   ins_cost(INSN_COST * 2);
13790   format %{ "cmpw  $src, zr\n\t"
13791             "cnegw $dst, $src, Assembler::LT\t# int abs"
13792   %}
13793 
13794   ins_encode %{
13795     __ cmpw(as_Register($src$$reg), zr);
13796     __ cnegw(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
13797   %}
13798   ins_pipe(pipe_class_default);
13799 %}
13800 
13801 instruct absL_reg(iRegLNoSp dst, iRegL src, rFlagsReg cr)
13802 %{
13803   match(Set dst (AbsL src));
13804 
13805   effect(KILL cr);
13806   ins_cost(INSN_COST * 2);
13807   format %{ "cmp  $src, zr\n\t"
13808             "cneg $dst, $src, Assembler::LT\t# long abs"
13809   %}
13810 
13811   ins_encode %{
13812     __ cmp(as_Register($src$$reg), zr);
13813     __ cneg(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
13814   %}
13815   ins_pipe(pipe_class_default);
13816 %}
13817 
13818 instruct absF_reg(vRegF dst, vRegF src) %{
13819   match(Set dst (AbsF src));
13820 
13821   ins_cost(INSN_COST * 3);
13822   format %{ "fabss   $dst, $src" %}
13823   ins_encode %{
13824     __ fabss(as_FloatRegister($dst$$reg),
13825              as_FloatRegister($src$$reg));
13826   %}
13827 
13828   ins_pipe(fp_uop_s);
13829 %}
13830 
13831 instruct absD_reg(vRegD dst, vRegD src) %{
13832   match(Set dst (AbsD src));
13833 
13834   ins_cost(INSN_COST * 3);
13835   format %{ "fabsd   $dst, $src" %}
13836   ins_encode %{
13837     __ fabsd(as_FloatRegister($dst$$reg),
13838              as_FloatRegister($src$$reg));
13839   %}
13840 
13841   ins_pipe(fp_uop_d);
13842 %}
13843 
13844 instruct sqrtD_reg(vRegD dst, vRegD src) %{
13845   match(Set dst (SqrtD src));
13846 
13847   ins_cost(INSN_COST * 50);
13848   format %{ "fsqrtd  $dst, $src" %}
13849   ins_encode %{
13850     __ fsqrtd(as_FloatRegister($dst$$reg),
13851              as_FloatRegister($src$$reg));
13852   %}
13853 
13854   ins_pipe(fp_div_s);
13855 %}
13856 
13857 instruct sqrtF_reg(vRegF dst, vRegF src) %{
13858   match(Set dst (SqrtF src));
13859 
13860   ins_cost(INSN_COST * 50);
13861   format %{ "fsqrts  $dst, $src" %}
13862   ins_encode %{
13863     __ fsqrts(as_FloatRegister($dst$$reg),
13864              as_FloatRegister($src$$reg));
13865   %}
13866 
13867   ins_pipe(fp_div_d);
13868 %}
13869 
13870 // Math.rint, floor, ceil
13871 instruct roundD_reg(vRegD dst, vRegD src, immI rmode) %{
13872   match(Set dst (RoundDoubleMode src rmode));
13873   format %{ "frint  $dst, $src, $rmode" %}
13874   ins_encode %{
13875     switch ($rmode$$constant) {
13876       case RoundDoubleModeNode::rmode_rint:
13877         __ frintnd(as_FloatRegister($dst$$reg),
13878                    as_FloatRegister($src$$reg));
13879         break;
13880       case RoundDoubleModeNode::rmode_floor:
13881         __ frintmd(as_FloatRegister($dst$$reg),
13882                    as_FloatRegister($src$$reg));
13883         break;
13884       case RoundDoubleModeNode::rmode_ceil:
13885         __ frintpd(as_FloatRegister($dst$$reg),
13886                    as_FloatRegister($src$$reg));
13887         break;
13888     }
13889   %}
13890   ins_pipe(fp_uop_d);
13891 %}
13892 
13893 // ============================================================================
13894 // Logical Instructions
13895 
13896 // Integer Logical Instructions
13897 
13898 // And Instructions
13899 
13900 
13901 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
13902   match(Set dst (AndI src1 src2));
13903 
13904   format %{ "andw  $dst, $src1, $src2\t# int" %}
13905 
13906   ins_cost(INSN_COST);
13907   ins_encode %{
13908     __ andw(as_Register($dst$$reg),
13909             as_Register($src1$$reg),
13910             as_Register($src2$$reg));
13911   %}
13912 
13913   ins_pipe(ialu_reg_reg);
13914 %}
13915 
13916 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
13917   match(Set dst (AndI src1 src2));
13918 
13919   format %{ "andsw  $dst, $src1, $src2\t# int" %}
13920 
13921   ins_cost(INSN_COST);
13922   ins_encode %{
13923     __ andw(as_Register($dst$$reg),
13924             as_Register($src1$$reg),
13925             (uint64_t)($src2$$constant));
13926   %}
13927 
13928   ins_pipe(ialu_reg_imm);
13929 %}
13930 
13931 // Or Instructions
13932 
13933 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13934   match(Set dst (OrI src1 src2));
13935 
13936   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13937 
13938   ins_cost(INSN_COST);
13939   ins_encode %{
13940     __ orrw(as_Register($dst$$reg),
13941             as_Register($src1$$reg),
13942             as_Register($src2$$reg));
13943   %}
13944 
13945   ins_pipe(ialu_reg_reg);
13946 %}
13947 
13948 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13949   match(Set dst (OrI src1 src2));
13950 
13951   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13952 
13953   ins_cost(INSN_COST);
13954   ins_encode %{
13955     __ orrw(as_Register($dst$$reg),
13956             as_Register($src1$$reg),
13957             (uint64_t)($src2$$constant));
13958   %}
13959 
13960   ins_pipe(ialu_reg_imm);
13961 %}
13962 
13963 // Xor Instructions
13964 
13965 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13966   match(Set dst (XorI src1 src2));
13967 
13968   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13969 
13970   ins_cost(INSN_COST);
13971   ins_encode %{
13972     __ eorw(as_Register($dst$$reg),
13973             as_Register($src1$$reg),
13974             as_Register($src2$$reg));
13975   %}
13976 
13977   ins_pipe(ialu_reg_reg);
13978 %}
13979 
13980 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13981   match(Set dst (XorI src1 src2));
13982 
13983   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13984 
13985   ins_cost(INSN_COST);
13986   ins_encode %{
13987     __ eorw(as_Register($dst$$reg),
13988             as_Register($src1$$reg),
13989             (uint64_t)($src2$$constant));
13990   %}
13991 
13992   ins_pipe(ialu_reg_imm);
13993 %}
13994 
13995 // Long Logical Instructions
13996 // TODO
13997 
13998 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
13999   match(Set dst (AndL src1 src2));
14000 
14001   format %{ "and  $dst, $src1, $src2\t# int" %}
14002 
14003   ins_cost(INSN_COST);
14004   ins_encode %{
14005     __ andr(as_Register($dst$$reg),
14006             as_Register($src1$$reg),
14007             as_Register($src2$$reg));
14008   %}
14009 
14010   ins_pipe(ialu_reg_reg);
14011 %}
14012 
14013 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
14014   match(Set dst (AndL src1 src2));
14015 
14016   format %{ "and  $dst, $src1, $src2\t# int" %}
14017 
14018   ins_cost(INSN_COST);
14019   ins_encode %{
14020     __ andr(as_Register($dst$$reg),
14021             as_Register($src1$$reg),
14022             (uint64_t)($src2$$constant));
14023   %}
14024 
14025   ins_pipe(ialu_reg_imm);
14026 %}
14027 
14028 // Or Instructions
14029 
14030 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14031   match(Set dst (OrL src1 src2));
14032 
14033   format %{ "orr  $dst, $src1, $src2\t# int" %}
14034 
14035   ins_cost(INSN_COST);
14036   ins_encode %{
14037     __ orr(as_Register($dst$$reg),
14038            as_Register($src1$$reg),
14039            as_Register($src2$$reg));
14040   %}
14041 
14042   ins_pipe(ialu_reg_reg);
14043 %}
14044 
14045 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14046   match(Set dst (OrL src1 src2));
14047 
14048   format %{ "orr  $dst, $src1, $src2\t# int" %}
14049 
14050   ins_cost(INSN_COST);
14051   ins_encode %{
14052     __ orr(as_Register($dst$$reg),
14053            as_Register($src1$$reg),
14054            (uint64_t)($src2$$constant));
14055   %}
14056 
14057   ins_pipe(ialu_reg_imm);
14058 %}
14059 
14060 // Xor Instructions
14061 
14062 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14063   match(Set dst (XorL src1 src2));
14064 
14065   format %{ "eor  $dst, $src1, $src2\t# int" %}
14066 
14067   ins_cost(INSN_COST);
14068   ins_encode %{
14069     __ eor(as_Register($dst$$reg),
14070            as_Register($src1$$reg),
14071            as_Register($src2$$reg));
14072   %}
14073 
14074   ins_pipe(ialu_reg_reg);
14075 %}
14076 
14077 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14078   match(Set dst (XorL src1 src2));
14079 
14080   ins_cost(INSN_COST);
14081   format %{ "eor  $dst, $src1, $src2\t# int" %}
14082 
14083   ins_encode %{
14084     __ eor(as_Register($dst$$reg),
14085            as_Register($src1$$reg),
14086            (uint64_t)($src2$$constant));
14087   %}
14088 
14089   ins_pipe(ialu_reg_imm);
14090 %}
14091 
14092 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
14093 %{
14094   match(Set dst (ConvI2L src));
14095 
14096   ins_cost(INSN_COST);
14097   format %{ "sxtw  $dst, $src\t# i2l" %}
14098   ins_encode %{
14099     __ sbfm($dst$$Register, $src$$Register, 0, 31);
14100   %}
14101   ins_pipe(ialu_reg_shift);
14102 %}
14103 
14104 // this pattern occurs in bigmath arithmetic
14105 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
14106 %{
14107   match(Set dst (AndL (ConvI2L src) mask));
14108 
14109   ins_cost(INSN_COST);
14110   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
14111   ins_encode %{
14112     __ ubfm($dst$$Register, $src$$Register, 0, 31);
14113   %}
14114 
14115   ins_pipe(ialu_reg_shift);
14116 %}
14117 
14118 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
14119   match(Set dst (ConvL2I src));
14120 
14121   ins_cost(INSN_COST);
14122   format %{ "movw  $dst, $src \t// l2i" %}
14123 
14124   ins_encode %{
14125     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
14126   %}
14127 
14128   ins_pipe(ialu_reg);
14129 %}
14130 
14131 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
14132 %{
14133   match(Set dst (Conv2B src));
14134   effect(KILL cr);
14135 
14136   format %{
14137     "cmpw $src, zr\n\t"
14138     "cset $dst, ne"
14139   %}
14140 
14141   ins_encode %{
14142     __ cmpw(as_Register($src$$reg), zr);
14143     __ cset(as_Register($dst$$reg), Assembler::NE);
14144   %}
14145 
14146   ins_pipe(ialu_reg);
14147 %}
14148 
14149 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
14150 %{
14151   match(Set dst (Conv2B src));
14152   effect(KILL cr);
14153 
14154   format %{
14155     "cmp  $src, zr\n\t"
14156     "cset $dst, ne"
14157   %}
14158 
14159   ins_encode %{
14160     __ cmp(as_Register($src$$reg), zr);
14161     __ cset(as_Register($dst$$reg), Assembler::NE);
14162   %}
14163 
14164   ins_pipe(ialu_reg);
14165 %}
14166 
14167 instruct convD2F_reg(vRegF dst, vRegD src) %{
14168   match(Set dst (ConvD2F src));
14169 
14170   ins_cost(INSN_COST * 5);
14171   format %{ "fcvtd  $dst, $src \t// d2f" %}
14172 
14173   ins_encode %{
14174     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14175   %}
14176 
14177   ins_pipe(fp_d2f);
14178 %}
14179 
14180 instruct convF2D_reg(vRegD dst, vRegF src) %{
14181   match(Set dst (ConvF2D src));
14182 
14183   ins_cost(INSN_COST * 5);
14184   format %{ "fcvts  $dst, $src \t// f2d" %}
14185 
14186   ins_encode %{
14187     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14188   %}
14189 
14190   ins_pipe(fp_f2d);
14191 %}
14192 
14193 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14194   match(Set dst (ConvF2I src));
14195 
14196   ins_cost(INSN_COST * 5);
14197   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
14198 
14199   ins_encode %{
14200     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14201   %}
14202 
14203   ins_pipe(fp_f2i);
14204 %}
14205 
14206 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
14207   match(Set dst (ConvF2L src));
14208 
14209   ins_cost(INSN_COST * 5);
14210   format %{ "fcvtzs  $dst, $src \t// f2l" %}
14211 
14212   ins_encode %{
14213     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14214   %}
14215 
14216   ins_pipe(fp_f2l);
14217 %}
14218 
14219 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
14220   match(Set dst (ConvI2F src));
14221 
14222   ins_cost(INSN_COST * 5);
14223   format %{ "scvtfws  $dst, $src \t// i2f" %}
14224 
14225   ins_encode %{
14226     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14227   %}
14228 
14229   ins_pipe(fp_i2f);
14230 %}
14231 
14232 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
14233   match(Set dst (ConvL2F src));
14234 
14235   ins_cost(INSN_COST * 5);
14236   format %{ "scvtfs  $dst, $src \t// l2f" %}
14237 
14238   ins_encode %{
14239     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14240   %}
14241 
14242   ins_pipe(fp_l2f);
14243 %}
14244 
14245 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
14246   match(Set dst (ConvD2I src));
14247 
14248   ins_cost(INSN_COST * 5);
14249   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
14250 
14251   ins_encode %{
14252     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14253   %}
14254 
14255   ins_pipe(fp_d2i);
14256 %}
14257 
14258 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14259   match(Set dst (ConvD2L src));
14260 
14261   ins_cost(INSN_COST * 5);
14262   format %{ "fcvtzd  $dst, $src \t// d2l" %}
14263 
14264   ins_encode %{
14265     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14266   %}
14267 
14268   ins_pipe(fp_d2l);
14269 %}
14270 
14271 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
14272   match(Set dst (ConvI2D src));
14273 
14274   ins_cost(INSN_COST * 5);
14275   format %{ "scvtfwd  $dst, $src \t// i2d" %}
14276 
14277   ins_encode %{
14278     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14279   %}
14280 
14281   ins_pipe(fp_i2d);
14282 %}
14283 
14284 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
14285   match(Set dst (ConvL2D src));
14286 
14287   ins_cost(INSN_COST * 5);
14288   format %{ "scvtfd  $dst, $src \t// l2d" %}
14289 
14290   ins_encode %{
14291     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14292   %}
14293 
14294   ins_pipe(fp_l2d);
14295 %}
14296 
14297 // stack <-> reg and reg <-> reg shuffles with no conversion
14298 
14299 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
14300 
14301   match(Set dst (MoveF2I src));
14302 
14303   effect(DEF dst, USE src);
14304 
14305   ins_cost(4 * INSN_COST);
14306 
14307   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
14308 
14309   ins_encode %{
14310     __ ldrw($dst$$Register, Address(sp, $src$$disp));
14311   %}
14312 
14313   ins_pipe(iload_reg_reg);
14314 
14315 %}
14316 
14317 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
14318 
14319   match(Set dst (MoveI2F src));
14320 
14321   effect(DEF dst, USE src);
14322 
14323   ins_cost(4 * INSN_COST);
14324 
14325   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
14326 
14327   ins_encode %{
14328     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14329   %}
14330 
14331   ins_pipe(pipe_class_memory);
14332 
14333 %}
14334 
14335 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
14336 
14337   match(Set dst (MoveD2L src));
14338 
14339   effect(DEF dst, USE src);
14340 
14341   ins_cost(4 * INSN_COST);
14342 
14343   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
14344 
14345   ins_encode %{
14346     __ ldr($dst$$Register, Address(sp, $src$$disp));
14347   %}
14348 
14349   ins_pipe(iload_reg_reg);
14350 
14351 %}
14352 
14353 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
14354 
14355   match(Set dst (MoveL2D src));
14356 
14357   effect(DEF dst, USE src);
14358 
14359   ins_cost(4 * INSN_COST);
14360 
14361   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
14362 
14363   ins_encode %{
14364     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14365   %}
14366 
14367   ins_pipe(pipe_class_memory);
14368 
14369 %}
14370 
14371 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
14372 
14373   match(Set dst (MoveF2I src));
14374 
14375   effect(DEF dst, USE src);
14376 
14377   ins_cost(INSN_COST);
14378 
14379   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
14380 
14381   ins_encode %{
14382     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14383   %}
14384 
14385   ins_pipe(pipe_class_memory);
14386 
14387 %}
14388 
14389 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
14390 
14391   match(Set dst (MoveI2F src));
14392 
14393   effect(DEF dst, USE src);
14394 
14395   ins_cost(INSN_COST);
14396 
14397   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
14398 
14399   ins_encode %{
14400     __ strw($src$$Register, Address(sp, $dst$$disp));
14401   %}
14402 
14403   ins_pipe(istore_reg_reg);
14404 
14405 %}
14406 
14407 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
14408 
14409   match(Set dst (MoveD2L src));
14410 
14411   effect(DEF dst, USE src);
14412 
14413   ins_cost(INSN_COST);
14414 
14415   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
14416 
14417   ins_encode %{
14418     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14419   %}
14420 
14421   ins_pipe(pipe_class_memory);
14422 
14423 %}
14424 
14425 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
14426 
14427   match(Set dst (MoveL2D src));
14428 
14429   effect(DEF dst, USE src);
14430 
14431   ins_cost(INSN_COST);
14432 
14433   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
14434 
14435   ins_encode %{
14436     __ str($src$$Register, Address(sp, $dst$$disp));
14437   %}
14438 
14439   ins_pipe(istore_reg_reg);
14440 
14441 %}
14442 
14443 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14444 
14445   match(Set dst (MoveF2I src));
14446 
14447   effect(DEF dst, USE src);
14448 
14449   ins_cost(INSN_COST);
14450 
14451   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
14452 
14453   ins_encode %{
14454     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
14455   %}
14456 
14457   ins_pipe(fp_f2i);
14458 
14459 %}
14460 
14461 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
14462 
14463   match(Set dst (MoveI2F src));
14464 
14465   effect(DEF dst, USE src);
14466 
14467   ins_cost(INSN_COST);
14468 
14469   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
14470 
14471   ins_encode %{
14472     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
14473   %}
14474 
14475   ins_pipe(fp_i2f);
14476 
14477 %}
14478 
14479 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14480 
14481   match(Set dst (MoveD2L src));
14482 
14483   effect(DEF dst, USE src);
14484 
14485   ins_cost(INSN_COST);
14486 
14487   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
14488 
14489   ins_encode %{
14490     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
14491   %}
14492 
14493   ins_pipe(fp_d2l);
14494 
14495 %}
14496 
14497 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
14498 
14499   match(Set dst (MoveL2D src));
14500 
14501   effect(DEF dst, USE src);
14502 
14503   ins_cost(INSN_COST);
14504 
14505   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
14506 
14507   ins_encode %{
14508     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
14509   %}
14510 
14511   ins_pipe(fp_l2d);
14512 
14513 %}
14514 
14515 // ============================================================================
14516 // clearing of an array
14517 
14518 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
14519 %{
14520   match(Set dummy (ClearArray cnt base));
14521   effect(USE_KILL cnt, USE_KILL base, KILL cr);
14522 
14523   ins_cost(4 * INSN_COST);
14524   format %{ "ClearArray $cnt, $base" %}
14525 
14526   ins_encode %{
14527     __ zero_words($base$$Register, $cnt$$Register);
14528   %}
14529 
14530   ins_pipe(pipe_class_memory);
14531 %}
14532 
14533 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
14534 %{
14535   predicate((uint64_t)n->in(2)->get_long()
14536             < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
14537   match(Set dummy (ClearArray cnt base));
14538   effect(USE_KILL base);
14539 
14540   ins_cost(4 * INSN_COST);
14541   format %{ "ClearArray $cnt, $base" %}
14542 
14543   ins_encode %{
14544     __ zero_words($base$$Register, (uint64_t)$cnt$$constant);
14545   %}
14546 
14547   ins_pipe(pipe_class_memory);
14548 %}
14549 
14550 // ============================================================================
14551 // Overflow Math Instructions
14552 
14553 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14554 %{
14555   match(Set cr (OverflowAddI op1 op2));
14556 
14557   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
14558   ins_cost(INSN_COST);
14559   ins_encode %{
14560     __ cmnw($op1$$Register, $op2$$Register);
14561   %}
14562 
14563   ins_pipe(icmp_reg_reg);
14564 %}
14565 
14566 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
14567 %{
14568   match(Set cr (OverflowAddI op1 op2));
14569 
14570   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
14571   ins_cost(INSN_COST);
14572   ins_encode %{
14573     __ cmnw($op1$$Register, $op2$$constant);
14574   %}
14575 
14576   ins_pipe(icmp_reg_imm);
14577 %}
14578 
14579 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14580 %{
14581   match(Set cr (OverflowAddL op1 op2));
14582 
14583   format %{ "cmn   $op1, $op2\t# overflow check long" %}
14584   ins_cost(INSN_COST);
14585   ins_encode %{
14586     __ cmn($op1$$Register, $op2$$Register);
14587   %}
14588 
14589   ins_pipe(icmp_reg_reg);
14590 %}
14591 
14592 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
14593 %{
14594   match(Set cr (OverflowAddL op1 op2));
14595 
14596   format %{ "cmn   $op1, $op2\t# overflow check long" %}
14597   ins_cost(INSN_COST);
14598   ins_encode %{
14599     __ cmn($op1$$Register, $op2$$constant);
14600   %}
14601 
14602   ins_pipe(icmp_reg_imm);
14603 %}
14604 
14605 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14606 %{
14607   match(Set cr (OverflowSubI op1 op2));
14608 
14609   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
14610   ins_cost(INSN_COST);
14611   ins_encode %{
14612     __ cmpw($op1$$Register, $op2$$Register);
14613   %}
14614 
14615   ins_pipe(icmp_reg_reg);
14616 %}
14617 
14618 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
14619 %{
14620   match(Set cr (OverflowSubI op1 op2));
14621 
14622   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
14623   ins_cost(INSN_COST);
14624   ins_encode %{
14625     __ cmpw($op1$$Register, $op2$$constant);
14626   %}
14627 
14628   ins_pipe(icmp_reg_imm);
14629 %}
14630 
14631 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14632 %{
14633   match(Set cr (OverflowSubL op1 op2));
14634 
14635   format %{ "cmp   $op1, $op2\t# overflow check long" %}
14636   ins_cost(INSN_COST);
14637   ins_encode %{
14638     __ cmp($op1$$Register, $op2$$Register);
14639   %}
14640 
14641   ins_pipe(icmp_reg_reg);
14642 %}
14643 
14644 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
14645 %{
14646   match(Set cr (OverflowSubL op1 op2));
14647 
14648   format %{ "cmp   $op1, $op2\t# overflow check long" %}
14649   ins_cost(INSN_COST);
14650   ins_encode %{
14651     __ subs(zr, $op1$$Register, $op2$$constant);
14652   %}
14653 
14654   ins_pipe(icmp_reg_imm);
14655 %}
14656 
14657 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
14658 %{
14659   match(Set cr (OverflowSubI zero op1));
14660 
14661   format %{ "cmpw  zr, $op1\t# overflow check int" %}
14662   ins_cost(INSN_COST);
14663   ins_encode %{
14664     __ cmpw(zr, $op1$$Register);
14665   %}
14666 
14667   ins_pipe(icmp_reg_imm);
14668 %}
14669 
14670 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
14671 %{
14672   match(Set cr (OverflowSubL zero op1));
14673 
14674   format %{ "cmp   zr, $op1\t# overflow check long" %}
14675   ins_cost(INSN_COST);
14676   ins_encode %{
14677     __ cmp(zr, $op1$$Register);
14678   %}
14679 
14680   ins_pipe(icmp_reg_imm);
14681 %}
14682 
14683 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14684 %{
14685   match(Set cr (OverflowMulI op1 op2));
14686 
14687   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14688             "cmp   rscratch1, rscratch1, sxtw\n\t"
14689             "movw  rscratch1, #0x80000000\n\t"
14690             "cselw rscratch1, rscratch1, zr, NE\n\t"
14691             "cmpw  rscratch1, #1" %}
14692   ins_cost(5 * INSN_COST);
14693   ins_encode %{
14694     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14695     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14696     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
14697     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
14698     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
14699   %}
14700 
14701   ins_pipe(pipe_slow);
14702 %}
14703 
14704 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
14705 %{
14706   match(If cmp (OverflowMulI op1 op2));
14707   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14708             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14709   effect(USE labl, KILL cr);
14710 
14711   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14712             "cmp   rscratch1, rscratch1, sxtw\n\t"
14713             "b$cmp   $labl" %}
14714   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
14715   ins_encode %{
14716     Label* L = $labl$$label;
14717     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14718     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14719     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14720     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14721   %}
14722 
14723   ins_pipe(pipe_serial);
14724 %}
14725 
14726 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14727 %{
14728   match(Set cr (OverflowMulL op1 op2));
14729 
14730   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
14731             "smulh rscratch2, $op1, $op2\n\t"
14732             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14733             "movw  rscratch1, #0x80000000\n\t"
14734             "cselw rscratch1, rscratch1, zr, NE\n\t"
14735             "cmpw  rscratch1, #1" %}
14736   ins_cost(6 * INSN_COST);
14737   ins_encode %{
14738     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14739     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14740     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14741     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
14742     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
14743     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
14744   %}
14745 
14746   ins_pipe(pipe_slow);
14747 %}
14748 
14749 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
14750 %{
14751   match(If cmp (OverflowMulL op1 op2));
14752   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14753             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14754   effect(USE labl, KILL cr);
14755 
14756   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
14757             "smulh rscratch2, $op1, $op2\n\t"
14758             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14759             "b$cmp $labl" %}
14760   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
14761   ins_encode %{
14762     Label* L = $labl$$label;
14763     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14764     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14765     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14766     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14767     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14768   %}
14769 
14770   ins_pipe(pipe_serial);
14771 %}
14772 
14773 // ============================================================================
14774 // Compare Instructions
14775 
14776 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
14777 %{
14778   match(Set cr (CmpI op1 op2));
14779 
14780   effect(DEF cr, USE op1, USE op2);
14781 
14782   ins_cost(INSN_COST);
14783   format %{ "cmpw  $op1, $op2" %}
14784 
14785   ins_encode(aarch64_enc_cmpw(op1, op2));
14786 
14787   ins_pipe(icmp_reg_reg);
14788 %}
14789 
14790 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
14791 %{
14792   match(Set cr (CmpI op1 zero));
14793 
14794   effect(DEF cr, USE op1);
14795 
14796   ins_cost(INSN_COST);
14797   format %{ "cmpw $op1, 0" %}
14798 
14799   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14800 
14801   ins_pipe(icmp_reg_imm);
14802 %}
14803 
14804 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
14805 %{
14806   match(Set cr (CmpI op1 op2));
14807 
14808   effect(DEF cr, USE op1);
14809 
14810   ins_cost(INSN_COST);
14811   format %{ "cmpw  $op1, $op2" %}
14812 
14813   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14814 
14815   ins_pipe(icmp_reg_imm);
14816 %}
14817 
14818 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
14819 %{
14820   match(Set cr (CmpI op1 op2));
14821 
14822   effect(DEF cr, USE op1);
14823 
14824   ins_cost(INSN_COST * 2);
14825   format %{ "cmpw  $op1, $op2" %}
14826 
14827   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14828 
14829   ins_pipe(icmp_reg_imm);
14830 %}
14831 
14832 // Unsigned compare Instructions; really, same as signed compare
14833 // except it should only be used to feed an If or a CMovI which takes a
14834 // cmpOpU.
14835 
14836 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
14837 %{
14838   match(Set cr (CmpU op1 op2));
14839 
14840   effect(DEF cr, USE op1, USE op2);
14841 
14842   ins_cost(INSN_COST);
14843   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14844 
14845   ins_encode(aarch64_enc_cmpw(op1, op2));
14846 
14847   ins_pipe(icmp_reg_reg);
14848 %}
14849 
14850 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
14851 %{
14852   match(Set cr (CmpU op1 zero));
14853 
14854   effect(DEF cr, USE op1);
14855 
14856   ins_cost(INSN_COST);
14857   format %{ "cmpw $op1, #0\t# unsigned" %}
14858 
14859   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14860 
14861   ins_pipe(icmp_reg_imm);
14862 %}
14863 
14864 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
14865 %{
14866   match(Set cr (CmpU op1 op2));
14867 
14868   effect(DEF cr, USE op1);
14869 
14870   ins_cost(INSN_COST);
14871   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14872 
14873   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14874 
14875   ins_pipe(icmp_reg_imm);
14876 %}
14877 
14878 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
14879 %{
14880   match(Set cr (CmpU op1 op2));
14881 
14882   effect(DEF cr, USE op1);
14883 
14884   ins_cost(INSN_COST * 2);
14885   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14886 
14887   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14888 
14889   ins_pipe(icmp_reg_imm);
14890 %}
14891 
14892 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14893 %{
14894   match(Set cr (CmpL op1 op2));
14895 
14896   effect(DEF cr, USE op1, USE op2);
14897 
14898   ins_cost(INSN_COST);
14899   format %{ "cmp  $op1, $op2" %}
14900 
14901   ins_encode(aarch64_enc_cmp(op1, op2));
14902 
14903   ins_pipe(icmp_reg_reg);
14904 %}
14905 
14906 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
14907 %{
14908   match(Set cr (CmpL op1 zero));
14909 
14910   effect(DEF cr, USE op1);
14911 
14912   ins_cost(INSN_COST);
14913   format %{ "tst  $op1" %}
14914 
14915   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14916 
14917   ins_pipe(icmp_reg_imm);
14918 %}
14919 
14920 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
14921 %{
14922   match(Set cr (CmpL op1 op2));
14923 
14924   effect(DEF cr, USE op1);
14925 
14926   ins_cost(INSN_COST);
14927   format %{ "cmp  $op1, $op2" %}
14928 
14929   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
14930 
14931   ins_pipe(icmp_reg_imm);
14932 %}
14933 
14934 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
14935 %{
14936   match(Set cr (CmpL op1 op2));
14937 
14938   effect(DEF cr, USE op1);
14939 
14940   ins_cost(INSN_COST * 2);
14941   format %{ "cmp  $op1, $op2" %}
14942 
14943   ins_encode(aarch64_enc_cmp_imm(op1, op2));
14944 
14945   ins_pipe(icmp_reg_imm);
14946 %}
14947 
14948 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
14949 %{
14950   match(Set cr (CmpUL op1 op2));
14951 
14952   effect(DEF cr, USE op1, USE op2);
14953 
14954   ins_cost(INSN_COST);
14955   format %{ "cmp  $op1, $op2" %}
14956 
14957   ins_encode(aarch64_enc_cmp(op1, op2));
14958 
14959   ins_pipe(icmp_reg_reg);
14960 %}
14961 
14962 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
14963 %{
14964   match(Set cr (CmpUL op1 zero));
14965 
14966   effect(DEF cr, USE op1);
14967 
14968   ins_cost(INSN_COST);
14969   format %{ "tst  $op1" %}
14970 
14971   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14972 
14973   ins_pipe(icmp_reg_imm);
14974 %}
14975 
14976 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
14977 %{
14978   match(Set cr (CmpUL op1 op2));
14979 
14980   effect(DEF cr, USE op1);
14981 
14982   ins_cost(INSN_COST);
14983   format %{ "cmp  $op1, $op2" %}
14984 
14985   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
14986 
14987   ins_pipe(icmp_reg_imm);
14988 %}
14989 
14990 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
14991 %{
14992   match(Set cr (CmpUL op1 op2));
14993 
14994   effect(DEF cr, USE op1);
14995 
14996   ins_cost(INSN_COST * 2);
14997   format %{ "cmp  $op1, $op2" %}
14998 
14999   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15000 
15001   ins_pipe(icmp_reg_imm);
15002 %}
15003 
15004 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
15005 %{
15006   match(Set cr (CmpP op1 op2));
15007 
15008   effect(DEF cr, USE op1, USE op2);
15009 
15010   ins_cost(INSN_COST);
15011   format %{ "cmp  $op1, $op2\t // ptr" %}
15012 
15013   ins_encode(aarch64_enc_cmpp(op1, op2));
15014 
15015   ins_pipe(icmp_reg_reg);
15016 %}
15017 
15018 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
15019 %{
15020   match(Set cr (CmpN op1 op2));
15021 
15022   effect(DEF cr, USE op1, USE op2);
15023 
15024   ins_cost(INSN_COST);
15025   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
15026 
15027   ins_encode(aarch64_enc_cmpn(op1, op2));
15028 
15029   ins_pipe(icmp_reg_reg);
15030 %}
15031 
15032 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
15033 %{
15034   match(Set cr (CmpP op1 zero));
15035 
15036   effect(DEF cr, USE op1, USE zero);
15037 
15038   ins_cost(INSN_COST);
15039   format %{ "cmp  $op1, 0\t // ptr" %}
15040 
15041   ins_encode(aarch64_enc_testp(op1));
15042 
15043   ins_pipe(icmp_reg_imm);
15044 %}
15045 
15046 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
15047 %{
15048   match(Set cr (CmpN op1 zero));
15049 
15050   effect(DEF cr, USE op1, USE zero);
15051 
15052   ins_cost(INSN_COST);
15053   format %{ "cmp  $op1, 0\t // compressed ptr" %}
15054 
15055   ins_encode(aarch64_enc_testn(op1));
15056 
15057   ins_pipe(icmp_reg_imm);
15058 %}
15059 
15060 // FP comparisons
15061 //
15062 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
15063 // using normal cmpOp. See declaration of rFlagsReg for details.
15064 
15065 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
15066 %{
15067   match(Set cr (CmpF src1 src2));
15068 
15069   ins_cost(3 * INSN_COST);
15070   format %{ "fcmps $src1, $src2" %}
15071 
15072   ins_encode %{
15073     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15074   %}
15075 
15076   ins_pipe(pipe_class_compare);
15077 %}
15078 
15079 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
15080 %{
15081   match(Set cr (CmpF src1 src2));
15082 
15083   ins_cost(3 * INSN_COST);
15084   format %{ "fcmps $src1, 0.0" %}
15085 
15086   ins_encode %{
15087     __ fcmps(as_FloatRegister($src1$$reg), 0.0);
15088   %}
15089 
15090   ins_pipe(pipe_class_compare);
15091 %}
15092 // FROM HERE
15093 
15094 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
15095 %{
15096   match(Set cr (CmpD src1 src2));
15097 
15098   ins_cost(3 * INSN_COST);
15099   format %{ "fcmpd $src1, $src2" %}
15100 
15101   ins_encode %{
15102     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15103   %}
15104 
15105   ins_pipe(pipe_class_compare);
15106 %}
15107 
15108 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
15109 %{
15110   match(Set cr (CmpD src1 src2));
15111 
15112   ins_cost(3 * INSN_COST);
15113   format %{ "fcmpd $src1, 0.0" %}
15114 
15115   ins_encode %{
15116     __ fcmpd(as_FloatRegister($src1$$reg), 0.0);
15117   %}
15118 
15119   ins_pipe(pipe_class_compare);
15120 %}
15121 
15122 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
15123 %{
15124   match(Set dst (CmpF3 src1 src2));
15125   effect(KILL cr);
15126 
15127   ins_cost(5 * INSN_COST);
15128   format %{ "fcmps $src1, $src2\n\t"
15129             "csinvw($dst, zr, zr, eq\n\t"
15130             "csnegw($dst, $dst, $dst, lt)"
15131   %}
15132 
15133   ins_encode %{
15134     Label done;
15135     FloatRegister s1 = as_FloatRegister($src1$$reg);
15136     FloatRegister s2 = as_FloatRegister($src2$$reg);
15137     Register d = as_Register($dst$$reg);
15138     __ fcmps(s1, s2);
15139     // installs 0 if EQ else -1
15140     __ csinvw(d, zr, zr, Assembler::EQ);
15141     // keeps -1 if less or unordered else installs 1
15142     __ csnegw(d, d, d, Assembler::LT);
15143     __ bind(done);
15144   %}
15145 
15146   ins_pipe(pipe_class_default);
15147 
15148 %}
15149 
15150 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
15151 %{
15152   match(Set dst (CmpD3 src1 src2));
15153   effect(KILL cr);
15154 
15155   ins_cost(5 * INSN_COST);
15156   format %{ "fcmpd $src1, $src2\n\t"
15157             "csinvw($dst, zr, zr, eq\n\t"
15158             "csnegw($dst, $dst, $dst, lt)"
15159   %}
15160 
15161   ins_encode %{
15162     Label done;
15163     FloatRegister s1 = as_FloatRegister($src1$$reg);
15164     FloatRegister s2 = as_FloatRegister($src2$$reg);
15165     Register d = as_Register($dst$$reg);
15166     __ fcmpd(s1, s2);
15167     // installs 0 if EQ else -1
15168     __ csinvw(d, zr, zr, Assembler::EQ);
15169     // keeps -1 if less or unordered else installs 1
15170     __ csnegw(d, d, d, Assembler::LT);
15171     __ bind(done);
15172   %}
15173   ins_pipe(pipe_class_default);
15174 
15175 %}
15176 
15177 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
15178 %{
15179   match(Set dst (CmpF3 src1 zero));
15180   effect(KILL cr);
15181 
15182   ins_cost(5 * INSN_COST);
15183   format %{ "fcmps $src1, 0.0\n\t"
15184             "csinvw($dst, zr, zr, eq\n\t"
15185             "csnegw($dst, $dst, $dst, lt)"
15186   %}
15187 
15188   ins_encode %{
15189     Label done;
15190     FloatRegister s1 = as_FloatRegister($src1$$reg);
15191     Register d = as_Register($dst$$reg);
15192     __ fcmps(s1, 0.0);
15193     // installs 0 if EQ else -1
15194     __ csinvw(d, zr, zr, Assembler::EQ);
15195     // keeps -1 if less or unordered else installs 1
15196     __ csnegw(d, d, d, Assembler::LT);
15197     __ bind(done);
15198   %}
15199 
15200   ins_pipe(pipe_class_default);
15201 
15202 %}
15203 
15204 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
15205 %{
15206   match(Set dst (CmpD3 src1 zero));
15207   effect(KILL cr);
15208 
15209   ins_cost(5 * INSN_COST);
15210   format %{ "fcmpd $src1, 0.0\n\t"
15211             "csinvw($dst, zr, zr, eq\n\t"
15212             "csnegw($dst, $dst, $dst, lt)"
15213   %}
15214 
15215   ins_encode %{
15216     Label done;
15217     FloatRegister s1 = as_FloatRegister($src1$$reg);
15218     Register d = as_Register($dst$$reg);
15219     __ fcmpd(s1, 0.0);
15220     // installs 0 if EQ else -1
15221     __ csinvw(d, zr, zr, Assembler::EQ);
15222     // keeps -1 if less or unordered else installs 1
15223     __ csnegw(d, d, d, Assembler::LT);
15224     __ bind(done);
15225   %}
15226   ins_pipe(pipe_class_default);
15227 
15228 %}
15229 
15230 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
15231 %{
15232   match(Set dst (CmpLTMask p q));
15233   effect(KILL cr);
15234 
15235   ins_cost(3 * INSN_COST);
15236 
15237   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
15238             "csetw $dst, lt\n\t"
15239             "subw $dst, zr, $dst"
15240   %}
15241 
15242   ins_encode %{
15243     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
15244     __ csetw(as_Register($dst$$reg), Assembler::LT);
15245     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
15246   %}
15247 
15248   ins_pipe(ialu_reg_reg);
15249 %}
15250 
15251 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
15252 %{
15253   match(Set dst (CmpLTMask src zero));
15254   effect(KILL cr);
15255 
15256   ins_cost(INSN_COST);
15257 
15258   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
15259 
15260   ins_encode %{
15261     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
15262   %}
15263 
15264   ins_pipe(ialu_reg_shift);
15265 %}
15266 
15267 // ============================================================================
15268 // Max and Min
15269 
15270 instruct cmovI_reg_reg_lt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
15271 %{
15272   effect( DEF dst, USE src1, USE src2, USE cr );
15273 
15274   ins_cost(INSN_COST * 2);
15275   format %{ "cselw $dst, $src1, $src2 lt\t"  %}
15276 
15277   ins_encode %{
15278     __ cselw(as_Register($dst$$reg),
15279              as_Register($src1$$reg),
15280              as_Register($src2$$reg),
15281              Assembler::LT);
15282   %}
15283 
15284   ins_pipe(icond_reg_reg);
15285 %}
15286 
15287 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
15288 %{
15289   match(Set dst (MinI src1 src2));
15290   ins_cost(INSN_COST * 3);
15291 
15292   expand %{
15293     rFlagsReg cr;
15294     compI_reg_reg(cr, src1, src2);
15295     cmovI_reg_reg_lt(dst, src1, src2, cr);
15296   %}
15297 
15298 %}
15299 // FROM HERE
15300 
15301 instruct cmovI_reg_reg_gt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
15302 %{
15303   effect( DEF dst, USE src1, USE src2, USE cr );
15304 
15305   ins_cost(INSN_COST * 2);
15306   format %{ "cselw $dst, $src1, $src2 gt\t"  %}
15307 
15308   ins_encode %{
15309     __ cselw(as_Register($dst$$reg),
15310              as_Register($src1$$reg),
15311              as_Register($src2$$reg),
15312              Assembler::GT);
15313   %}
15314 
15315   ins_pipe(icond_reg_reg);
15316 %}
15317 
15318 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
15319 %{
15320   match(Set dst (MaxI src1 src2));
15321   ins_cost(INSN_COST * 3);
15322   expand %{
15323     rFlagsReg cr;
15324     compI_reg_reg(cr, src1, src2);
15325     cmovI_reg_reg_gt(dst, src1, src2, cr);
15326   %}
15327 %}
15328 
15329 // ============================================================================
15330 // Branch Instructions
15331 
15332 // Direct Branch.
15333 instruct branch(label lbl)
15334 %{
15335   match(Goto);
15336 
15337   effect(USE lbl);
15338 
15339   ins_cost(BRANCH_COST);
15340   format %{ "b  $lbl" %}
15341 
15342   ins_encode(aarch64_enc_b(lbl));
15343 
15344   ins_pipe(pipe_branch);
15345 %}
15346 
15347 // Conditional Near Branch
15348 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
15349 %{
15350   // Same match rule as `branchConFar'.
15351   match(If cmp cr);
15352 
15353   effect(USE lbl);
15354 
15355   ins_cost(BRANCH_COST);
15356   // If set to 1 this indicates that the current instruction is a
15357   // short variant of a long branch. This avoids using this
15358   // instruction in first-pass matching. It will then only be used in
15359   // the `Shorten_branches' pass.
15360   // ins_short_branch(1);
15361   format %{ "b$cmp  $lbl" %}
15362 
15363   ins_encode(aarch64_enc_br_con(cmp, lbl));
15364 
15365   ins_pipe(pipe_branch_cond);
15366 %}
15367 
15368 // Conditional Near Branch Unsigned
15369 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
15370 %{
15371   // Same match rule as `branchConFar'.
15372   match(If cmp cr);
15373 
15374   effect(USE lbl);
15375 
15376   ins_cost(BRANCH_COST);
15377   // If set to 1 this indicates that the current instruction is a
15378   // short variant of a long branch. This avoids using this
15379   // instruction in first-pass matching. It will then only be used in
15380   // the `Shorten_branches' pass.
15381   // ins_short_branch(1);
15382   format %{ "b$cmp  $lbl\t# unsigned" %}
15383 
15384   ins_encode(aarch64_enc_br_conU(cmp, lbl));
15385 
15386   ins_pipe(pipe_branch_cond);
15387 %}
15388 
15389 // Make use of CBZ and CBNZ.  These instructions, as well as being
15390 // shorter than (cmp; branch), have the additional benefit of not
15391 // killing the flags.
15392 
15393 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
15394   match(If cmp (CmpI op1 op2));
15395   effect(USE labl);
15396 
15397   ins_cost(BRANCH_COST);
15398   format %{ "cbw$cmp   $op1, $labl" %}
15399   ins_encode %{
15400     Label* L = $labl$$label;
15401     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15402     if (cond == Assembler::EQ)
15403       __ cbzw($op1$$Register, *L);
15404     else
15405       __ cbnzw($op1$$Register, *L);
15406   %}
15407   ins_pipe(pipe_cmp_branch);
15408 %}
15409 
15410 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
15411   match(If cmp (CmpL op1 op2));
15412   effect(USE labl);
15413 
15414   ins_cost(BRANCH_COST);
15415   format %{ "cb$cmp   $op1, $labl" %}
15416   ins_encode %{
15417     Label* L = $labl$$label;
15418     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15419     if (cond == Assembler::EQ)
15420       __ cbz($op1$$Register, *L);
15421     else
15422       __ cbnz($op1$$Register, *L);
15423   %}
15424   ins_pipe(pipe_cmp_branch);
15425 %}
15426 
15427 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
15428   match(If cmp (CmpP op1 op2));
15429   effect(USE labl);
15430 
15431   ins_cost(BRANCH_COST);
15432   format %{ "cb$cmp   $op1, $labl" %}
15433   ins_encode %{
15434     Label* L = $labl$$label;
15435     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15436     if (cond == Assembler::EQ)
15437       __ cbz($op1$$Register, *L);
15438     else
15439       __ cbnz($op1$$Register, *L);
15440   %}
15441   ins_pipe(pipe_cmp_branch);
15442 %}
15443 
15444 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
15445   match(If cmp (CmpN op1 op2));
15446   effect(USE labl);
15447 
15448   ins_cost(BRANCH_COST);
15449   format %{ "cbw$cmp   $op1, $labl" %}
15450   ins_encode %{
15451     Label* L = $labl$$label;
15452     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15453     if (cond == Assembler::EQ)
15454       __ cbzw($op1$$Register, *L);
15455     else
15456       __ cbnzw($op1$$Register, *L);
15457   %}
15458   ins_pipe(pipe_cmp_branch);
15459 %}
15460 
15461 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
15462   match(If cmp (CmpP (DecodeN oop) zero));
15463   effect(USE labl);
15464 
15465   ins_cost(BRANCH_COST);
15466   format %{ "cb$cmp   $oop, $labl" %}
15467   ins_encode %{
15468     Label* L = $labl$$label;
15469     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15470     if (cond == Assembler::EQ)
15471       __ cbzw($oop$$Register, *L);
15472     else
15473       __ cbnzw($oop$$Register, *L);
15474   %}
15475   ins_pipe(pipe_cmp_branch);
15476 %}
15477 
15478 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
15479   match(If cmp (CmpU op1 op2));
15480   effect(USE labl);
15481 
15482   ins_cost(BRANCH_COST);
15483   format %{ "cbw$cmp   $op1, $labl" %}
15484   ins_encode %{
15485     Label* L = $labl$$label;
15486     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15487     if (cond == Assembler::EQ || cond == Assembler::LS)
15488       __ cbzw($op1$$Register, *L);
15489     else
15490       __ cbnzw($op1$$Register, *L);
15491   %}
15492   ins_pipe(pipe_cmp_branch);
15493 %}
15494 
15495 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
15496   match(If cmp (CmpUL op1 op2));
15497   effect(USE labl);
15498 
15499   ins_cost(BRANCH_COST);
15500   format %{ "cb$cmp   $op1, $labl" %}
15501   ins_encode %{
15502     Label* L = $labl$$label;
15503     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15504     if (cond == Assembler::EQ || cond == Assembler::LS)
15505       __ cbz($op1$$Register, *L);
15506     else
15507       __ cbnz($op1$$Register, *L);
15508   %}
15509   ins_pipe(pipe_cmp_branch);
15510 %}
15511 
15512 // Test bit and Branch
15513 
15514 // Patterns for short (< 32KiB) variants
15515 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
15516   match(If cmp (CmpL op1 op2));
15517   effect(USE labl);
15518 
15519   ins_cost(BRANCH_COST);
15520   format %{ "cb$cmp   $op1, $labl # long" %}
15521   ins_encode %{
15522     Label* L = $labl$$label;
15523     Assembler::Condition cond =
15524       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15525     __ tbr(cond, $op1$$Register, 63, *L);
15526   %}
15527   ins_pipe(pipe_cmp_branch);
15528   ins_short_branch(1);
15529 %}
15530 
15531 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15532   match(If cmp (CmpI op1 op2));
15533   effect(USE labl);
15534 
15535   ins_cost(BRANCH_COST);
15536   format %{ "cb$cmp   $op1, $labl # int" %}
15537   ins_encode %{
15538     Label* L = $labl$$label;
15539     Assembler::Condition cond =
15540       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15541     __ tbr(cond, $op1$$Register, 31, *L);
15542   %}
15543   ins_pipe(pipe_cmp_branch);
15544   ins_short_branch(1);
15545 %}
15546 
15547 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
15548   match(If cmp (CmpL (AndL op1 op2) op3));
15549   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
15550   effect(USE labl);
15551 
15552   ins_cost(BRANCH_COST);
15553   format %{ "tb$cmp   $op1, $op2, $labl" %}
15554   ins_encode %{
15555     Label* L = $labl$$label;
15556     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15557     int bit = exact_log2_long($op2$$constant);
15558     __ tbr(cond, $op1$$Register, bit, *L);
15559   %}
15560   ins_pipe(pipe_cmp_branch);
15561   ins_short_branch(1);
15562 %}
15563 
15564 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
15565   match(If cmp (CmpI (AndI op1 op2) op3));
15566   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
15567   effect(USE labl);
15568 
15569   ins_cost(BRANCH_COST);
15570   format %{ "tb$cmp   $op1, $op2, $labl" %}
15571   ins_encode %{
15572     Label* L = $labl$$label;
15573     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15574     int bit = exact_log2((juint)$op2$$constant);
15575     __ tbr(cond, $op1$$Register, bit, *L);
15576   %}
15577   ins_pipe(pipe_cmp_branch);
15578   ins_short_branch(1);
15579 %}
15580 
15581 // And far variants
15582 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
15583   match(If cmp (CmpL op1 op2));
15584   effect(USE labl);
15585 
15586   ins_cost(BRANCH_COST);
15587   format %{ "cb$cmp   $op1, $labl # long" %}
15588   ins_encode %{
15589     Label* L = $labl$$label;
15590     Assembler::Condition cond =
15591       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15592     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
15593   %}
15594   ins_pipe(pipe_cmp_branch);
15595 %}
15596 
15597 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15598   match(If cmp (CmpI op1 op2));
15599   effect(USE labl);
15600 
15601   ins_cost(BRANCH_COST);
15602   format %{ "cb$cmp   $op1, $labl # int" %}
15603   ins_encode %{
15604     Label* L = $labl$$label;
15605     Assembler::Condition cond =
15606       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15607     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
15608   %}
15609   ins_pipe(pipe_cmp_branch);
15610 %}
15611 
15612 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
15613   match(If cmp (CmpL (AndL op1 op2) op3));
15614   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
15615   effect(USE labl);
15616 
15617   ins_cost(BRANCH_COST);
15618   format %{ "tb$cmp   $op1, $op2, $labl" %}
15619   ins_encode %{
15620     Label* L = $labl$$label;
15621     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15622     int bit = exact_log2_long($op2$$constant);
15623     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
15624   %}
15625   ins_pipe(pipe_cmp_branch);
15626 %}
15627 
15628 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
15629   match(If cmp (CmpI (AndI op1 op2) op3));
15630   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
15631   effect(USE labl);
15632 
15633   ins_cost(BRANCH_COST);
15634   format %{ "tb$cmp   $op1, $op2, $labl" %}
15635   ins_encode %{
15636     Label* L = $labl$$label;
15637     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15638     int bit = exact_log2((juint)$op2$$constant);
15639     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
15640   %}
15641   ins_pipe(pipe_cmp_branch);
15642 %}
15643 
15644 // Test bits
15645 
15646 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
15647   match(Set cr (CmpL (AndL op1 op2) op3));
15648   predicate(Assembler::operand_valid_for_logical_immediate
15649             (/*is_32*/false, n->in(1)->in(2)->get_long()));
15650 
15651   ins_cost(INSN_COST);
15652   format %{ "tst $op1, $op2 # long" %}
15653   ins_encode %{
15654     __ tst($op1$$Register, $op2$$constant);
15655   %}
15656   ins_pipe(ialu_reg_reg);
15657 %}
15658 
15659 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
15660   match(Set cr (CmpI (AndI op1 op2) op3));
15661   predicate(Assembler::operand_valid_for_logical_immediate
15662             (/*is_32*/true, n->in(1)->in(2)->get_int()));
15663 
15664   ins_cost(INSN_COST);
15665   format %{ "tst $op1, $op2 # int" %}
15666   ins_encode %{
15667     __ tstw($op1$$Register, $op2$$constant);
15668   %}
15669   ins_pipe(ialu_reg_reg);
15670 %}
15671 
15672 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
15673   match(Set cr (CmpL (AndL op1 op2) op3));
15674 
15675   ins_cost(INSN_COST);
15676   format %{ "tst $op1, $op2 # long" %}
15677   ins_encode %{
15678     __ tst($op1$$Register, $op2$$Register);
15679   %}
15680   ins_pipe(ialu_reg_reg);
15681 %}
15682 
15683 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
15684   match(Set cr (CmpI (AndI op1 op2) op3));
15685 
15686   ins_cost(INSN_COST);
15687   format %{ "tstw $op1, $op2 # int" %}
15688   ins_encode %{
15689     __ tstw($op1$$Register, $op2$$Register);
15690   %}
15691   ins_pipe(ialu_reg_reg);
15692 %}
15693 
15694 
15695 // Conditional Far Branch
15696 // Conditional Far Branch Unsigned
15697 // TODO: fixme
15698 
15699 // counted loop end branch near
15700 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
15701 %{
15702   match(CountedLoopEnd cmp cr);
15703 
15704   effect(USE lbl);
15705 
15706   ins_cost(BRANCH_COST);
15707   // short variant.
15708   // ins_short_branch(1);
15709   format %{ "b$cmp $lbl \t// counted loop end" %}
15710 
15711   ins_encode(aarch64_enc_br_con(cmp, lbl));
15712 
15713   ins_pipe(pipe_branch);
15714 %}
15715 
15716 // counted loop end branch near Unsigned
15717 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
15718 %{
15719   match(CountedLoopEnd cmp cr);
15720 
15721   effect(USE lbl);
15722 
15723   ins_cost(BRANCH_COST);
15724   // short variant.
15725   // ins_short_branch(1);
15726   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
15727 
15728   ins_encode(aarch64_enc_br_conU(cmp, lbl));
15729 
15730   ins_pipe(pipe_branch);
15731 %}
15732 
15733 // counted loop end branch far
15734 // counted loop end branch far unsigned
15735 // TODO: fixme
15736 
15737 // ============================================================================
15738 // inlined locking and unlocking
15739 
15740 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
15741 %{
15742   match(Set cr (FastLock object box));
15743   effect(TEMP tmp, TEMP tmp2);
15744 
15745   // TODO
15746   // identify correct cost
15747   ins_cost(5 * INSN_COST);
15748   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
15749 
15750   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
15751 
15752   ins_pipe(pipe_serial);
15753 %}
15754 
15755 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
15756 %{
15757   match(Set cr (FastUnlock object box));
15758   effect(TEMP tmp, TEMP tmp2);
15759 
15760   ins_cost(5 * INSN_COST);
15761   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
15762 
15763   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
15764 
15765   ins_pipe(pipe_serial);
15766 %}
15767 
15768 
15769 // ============================================================================
15770 // Safepoint Instructions
15771 
15772 // TODO
15773 // provide a near and far version of this code
15774 
15775 instruct safePoint(rFlagsReg cr, iRegP poll)
15776 %{
15777   match(SafePoint poll);
15778   effect(KILL cr);
15779 
15780   format %{
15781     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
15782   %}
15783   ins_encode %{
15784     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
15785   %}
15786   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
15787 %}
15788 
15789 
15790 // ============================================================================
15791 // Procedure Call/Return Instructions
15792 
15793 // Call Java Static Instruction
15794 
15795 instruct CallStaticJavaDirect(method meth)
15796 %{
15797   match(CallStaticJava);
15798 
15799   effect(USE meth);
15800 
15801   ins_cost(CALL_COST);
15802 
15803   format %{ "call,static $meth \t// ==> " %}
15804 
15805   ins_encode( aarch64_enc_java_static_call(meth),
15806               aarch64_enc_call_epilog );
15807 
15808   ins_pipe(pipe_class_call);
15809 %}
15810 
15811 // TO HERE
15812 
15813 // Call Java Dynamic Instruction
15814 instruct CallDynamicJavaDirect(method meth)
15815 %{
15816   match(CallDynamicJava);
15817 
15818   effect(USE meth);
15819 
15820   ins_cost(CALL_COST);
15821 
15822   format %{ "CALL,dynamic $meth \t// ==> " %}
15823 
15824   ins_encode( aarch64_enc_java_dynamic_call(meth),
15825                aarch64_enc_call_epilog );
15826 
15827   ins_pipe(pipe_class_call);
15828 %}
15829 
15830 // Call Runtime Instruction
15831 
15832 instruct CallRuntimeDirect(method meth)
15833 %{
15834   match(CallRuntime);
15835 
15836   effect(USE meth);
15837 
15838   ins_cost(CALL_COST);
15839 
15840   format %{ "CALL, runtime $meth" %}
15841 
15842   ins_encode( aarch64_enc_java_to_runtime(meth) );
15843 
15844   ins_pipe(pipe_class_call);
15845 %}
15846 
15847 // Call Runtime Instruction
15848 
15849 instruct CallLeafDirect(method meth)
15850 %{
15851   match(CallLeaf);
15852 
15853   effect(USE meth);
15854 
15855   ins_cost(CALL_COST);
15856 
15857   format %{ "CALL, runtime leaf $meth" %}
15858 
15859   ins_encode( aarch64_enc_java_to_runtime(meth) );
15860 
15861   ins_pipe(pipe_class_call);
15862 %}
15863 
15864 // Call Runtime Instruction
15865 
15866 instruct CallLeafNoFPDirect(method meth)
15867 %{
15868   match(CallLeafNoFP);
15869 
15870   effect(USE meth);
15871 
15872   ins_cost(CALL_COST);
15873 
15874   format %{ "CALL, runtime leaf nofp $meth" %}
15875 
15876   ins_encode( aarch64_enc_java_to_runtime(meth) );
15877 
15878   ins_pipe(pipe_class_call);
15879 %}
15880 
15881 // Tail Call; Jump from runtime stub to Java code.
15882 // Also known as an 'interprocedural jump'.
15883 // Target of jump will eventually return to caller.
15884 // TailJump below removes the return address.
15885 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_ptr)
15886 %{
15887   match(TailCall jump_target method_ptr);
15888 
15889   ins_cost(CALL_COST);
15890 
15891   format %{ "br $jump_target\t# $method_ptr holds method" %}
15892 
15893   ins_encode(aarch64_enc_tail_call(jump_target));
15894 
15895   ins_pipe(pipe_class_call);
15896 %}
15897 
15898 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
15899 %{
15900   match(TailJump jump_target ex_oop);
15901 
15902   ins_cost(CALL_COST);
15903 
15904   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
15905 
15906   ins_encode(aarch64_enc_tail_jmp(jump_target));
15907 
15908   ins_pipe(pipe_class_call);
15909 %}
15910 
15911 // Create exception oop: created by stack-crawling runtime code.
15912 // Created exception is now available to this handler, and is setup
15913 // just prior to jumping to this handler. No code emitted.
15914 // TODO check
15915 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
15916 instruct CreateException(iRegP_R0 ex_oop)
15917 %{
15918   match(Set ex_oop (CreateEx));
15919 
15920   format %{ " -- \t// exception oop; no code emitted" %}
15921 
15922   size(0);
15923 
15924   ins_encode( /*empty*/ );
15925 
15926   ins_pipe(pipe_class_empty);
15927 %}
15928 
15929 // Rethrow exception: The exception oop will come in the first
15930 // argument position. Then JUMP (not call) to the rethrow stub code.
15931 instruct RethrowException() %{
15932   match(Rethrow);
15933   ins_cost(CALL_COST);
15934 
15935   format %{ "b rethrow_stub" %}
15936 
15937   ins_encode( aarch64_enc_rethrow() );
15938 
15939   ins_pipe(pipe_class_call);
15940 %}
15941 
15942 
15943 // Return Instruction
15944 // epilog node loads ret address into lr as part of frame pop
15945 instruct Ret()
15946 %{
15947   match(Return);
15948 
15949   format %{ "ret\t// return register" %}
15950 
15951   ins_encode( aarch64_enc_ret() );
15952 
15953   ins_pipe(pipe_branch);
15954 %}
15955 
15956 // Die now.
15957 instruct ShouldNotReachHere() %{
15958   match(Halt);
15959 
15960   ins_cost(CALL_COST);
15961   format %{ "ShouldNotReachHere" %}
15962 
15963   ins_encode %{
15964     if (is_reachable()) {
15965       __ stop(_halt_reason);
15966     }
15967   %}
15968 
15969   ins_pipe(pipe_class_default);
15970 %}
15971 
15972 // ============================================================================
15973 // Partial Subtype Check
15974 //
15975 // superklass array for an instance of the superklass.  Set a hidden
15976 // internal cache on a hit (cache is checked with exposed code in
15977 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
15978 // encoding ALSO sets flags.
15979 
15980 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
15981 %{
15982   match(Set result (PartialSubtypeCheck sub super));
15983   effect(KILL cr, KILL temp);
15984 
15985   ins_cost(1100);  // slightly larger than the next version
15986   format %{ "partialSubtypeCheck $result, $sub, $super" %}
15987 
15988   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
15989 
15990   opcode(0x1); // Force zero of result reg on hit
15991 
15992   ins_pipe(pipe_class_memory);
15993 %}
15994 
15995 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
15996 %{
15997   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
15998   effect(KILL temp, KILL result);
15999 
16000   ins_cost(1100);  // slightly larger than the next version
16001   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
16002 
16003   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
16004 
16005   opcode(0x0); // Don't zero result reg on hit
16006 
16007   ins_pipe(pipe_class_memory);
16008 %}
16009 
16010 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16011                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16012 %{
16013   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
16014   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16015   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16016 
16017   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16018   ins_encode %{
16019     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16020     __ string_compare($str1$$Register, $str2$$Register,
16021                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16022                       $tmp1$$Register, $tmp2$$Register,
16023                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::UU);
16024   %}
16025   ins_pipe(pipe_class_memory);
16026 %}
16027 
16028 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16029                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16030 %{
16031   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
16032   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16033   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16034 
16035   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16036   ins_encode %{
16037     __ string_compare($str1$$Register, $str2$$Register,
16038                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16039                       $tmp1$$Register, $tmp2$$Register,
16040                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::LL);
16041   %}
16042   ins_pipe(pipe_class_memory);
16043 %}
16044 
16045 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16046                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16047                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16048 %{
16049   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
16050   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16051   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16052          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16053 
16054   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16055   ins_encode %{
16056     __ string_compare($str1$$Register, $str2$$Register,
16057                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16058                       $tmp1$$Register, $tmp2$$Register,
16059                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16060                       $vtmp3$$FloatRegister, StrIntrinsicNode::UL);
16061   %}
16062   ins_pipe(pipe_class_memory);
16063 %}
16064 
16065 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16066                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16067                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16068 %{
16069   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
16070   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16071   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16072          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16073 
16074   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16075   ins_encode %{
16076     __ string_compare($str1$$Register, $str2$$Register,
16077                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16078                       $tmp1$$Register, $tmp2$$Register,
16079                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16080                       $vtmp3$$FloatRegister,StrIntrinsicNode::LU);
16081   %}
16082   ins_pipe(pipe_class_memory);
16083 %}
16084 
16085 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16086        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
16087        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
16088 %{
16089   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16090   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16091   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16092          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
16093   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
16094 
16095   ins_encode %{
16096     __ string_indexof($str1$$Register, $str2$$Register,
16097                       $cnt1$$Register, $cnt2$$Register,
16098                       $tmp1$$Register, $tmp2$$Register,
16099                       $tmp3$$Register, $tmp4$$Register,
16100                       $tmp5$$Register, $tmp6$$Register,
16101                       -1, $result$$Register, StrIntrinsicNode::UU);
16102   %}
16103   ins_pipe(pipe_class_memory);
16104 %}
16105 
16106 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16107        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
16108        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
16109 %{
16110   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16111   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16112   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16113          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
16114   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
16115 
16116   ins_encode %{
16117     __ string_indexof($str1$$Register, $str2$$Register,
16118                       $cnt1$$Register, $cnt2$$Register,
16119                       $tmp1$$Register, $tmp2$$Register,
16120                       $tmp3$$Register, $tmp4$$Register,
16121                       $tmp5$$Register, $tmp6$$Register,
16122                       -1, $result$$Register, StrIntrinsicNode::LL);
16123   %}
16124   ins_pipe(pipe_class_memory);
16125 %}
16126 
16127 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16128        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
16129        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
16130 %{
16131   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16132   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16133   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16134          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
16135   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
16136 
16137   ins_encode %{
16138     __ string_indexof($str1$$Register, $str2$$Register,
16139                       $cnt1$$Register, $cnt2$$Register,
16140                       $tmp1$$Register, $tmp2$$Register,
16141                       $tmp3$$Register, $tmp4$$Register,
16142                       $tmp5$$Register, $tmp6$$Register,
16143                       -1, $result$$Register, StrIntrinsicNode::UL);
16144   %}
16145   ins_pipe(pipe_class_memory);
16146 %}
16147 
16148 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16149                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16150                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16151 %{
16152   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16153   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16154   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16155          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16156   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
16157 
16158   ins_encode %{
16159     int icnt2 = (int)$int_cnt2$$constant;
16160     __ string_indexof($str1$$Register, $str2$$Register,
16161                       $cnt1$$Register, zr,
16162                       $tmp1$$Register, $tmp2$$Register,
16163                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16164                       icnt2, $result$$Register, StrIntrinsicNode::UU);
16165   %}
16166   ins_pipe(pipe_class_memory);
16167 %}
16168 
16169 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16170                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16171                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16172 %{
16173   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16174   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16175   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16176          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16177   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
16178 
16179   ins_encode %{
16180     int icnt2 = (int)$int_cnt2$$constant;
16181     __ string_indexof($str1$$Register, $str2$$Register,
16182                       $cnt1$$Register, zr,
16183                       $tmp1$$Register, $tmp2$$Register,
16184                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16185                       icnt2, $result$$Register, StrIntrinsicNode::LL);
16186   %}
16187   ins_pipe(pipe_class_memory);
16188 %}
16189 
16190 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16191                  immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16192                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16193 %{
16194   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16195   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16196   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16197          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16198   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
16199 
16200   ins_encode %{
16201     int icnt2 = (int)$int_cnt2$$constant;
16202     __ string_indexof($str1$$Register, $str2$$Register,
16203                       $cnt1$$Register, zr,
16204                       $tmp1$$Register, $tmp2$$Register,
16205                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16206                       icnt2, $result$$Register, StrIntrinsicNode::UL);
16207   %}
16208   ins_pipe(pipe_class_memory);
16209 %}
16210 
16211 instruct string_indexofU_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16212                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16213                               iRegINoSp tmp3, rFlagsReg cr)
16214 %{
16215   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
16216   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
16217          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16218 
16219   format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result" %}
16220 
16221   ins_encode %{
16222     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
16223                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
16224                            $tmp3$$Register);
16225   %}
16226   ins_pipe(pipe_class_memory);
16227 %}
16228 
16229 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
16230                         iRegI_R0 result, rFlagsReg cr)
16231 %{
16232   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
16233   match(Set result (StrEquals (Binary str1 str2) cnt));
16234   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
16235 
16236   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
16237   ins_encode %{
16238     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16239     __ string_equals($str1$$Register, $str2$$Register,
16240                      $result$$Register, $cnt$$Register, 1);
16241   %}
16242   ins_pipe(pipe_class_memory);
16243 %}
16244 
16245 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
16246                         iRegI_R0 result, rFlagsReg cr)
16247 %{
16248   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
16249   match(Set result (StrEquals (Binary str1 str2) cnt));
16250   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
16251 
16252   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
16253   ins_encode %{
16254     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16255     __ string_equals($str1$$Register, $str2$$Register,
16256                      $result$$Register, $cnt$$Register, 2);
16257   %}
16258   ins_pipe(pipe_class_memory);
16259 %}
16260 
16261 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
16262                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
16263                        iRegP_R10 tmp, rFlagsReg cr)
16264 %{
16265   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
16266   match(Set result (AryEq ary1 ary2));
16267   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16268 
16269   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
16270   ins_encode %{
16271     __ arrays_equals($ary1$$Register, $ary2$$Register,
16272                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
16273                      $result$$Register, $tmp$$Register, 1);
16274     %}
16275   ins_pipe(pipe_class_memory);
16276 %}
16277 
16278 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
16279                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
16280                        iRegP_R10 tmp, rFlagsReg cr)
16281 %{
16282   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
16283   match(Set result (AryEq ary1 ary2));
16284   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16285 
16286   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
16287   ins_encode %{
16288     __ arrays_equals($ary1$$Register, $ary2$$Register,
16289                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
16290                      $result$$Register, $tmp$$Register, 2);
16291   %}
16292   ins_pipe(pipe_class_memory);
16293 %}
16294 
16295 instruct has_negatives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
16296 %{
16297   match(Set result (HasNegatives ary1 len));
16298   effect(USE_KILL ary1, USE_KILL len, KILL cr);
16299   format %{ "has negatives byte[] $ary1,$len -> $result" %}
16300   ins_encode %{
16301     __ has_negatives($ary1$$Register, $len$$Register, $result$$Register);
16302   %}
16303   ins_pipe( pipe_slow );
16304 %}
16305 
16306 // fast char[] to byte[] compression
16307 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
16308                          vRegD_V0 tmp1, vRegD_V1 tmp2,
16309                          vRegD_V2 tmp3, vRegD_V3 tmp4,
16310                          iRegI_R0 result, rFlagsReg cr)
16311 %{
16312   match(Set result (StrCompressedCopy src (Binary dst len)));
16313   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
16314 
16315   format %{ "String Compress $src,$dst -> $result    // KILL R1, R2, R3, R4" %}
16316   ins_encode %{
16317     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
16318                            $tmp1$$FloatRegister, $tmp2$$FloatRegister,
16319                            $tmp3$$FloatRegister, $tmp4$$FloatRegister,
16320                            $result$$Register);
16321   %}
16322   ins_pipe( pipe_slow );
16323 %}
16324 
16325 // fast byte[] to char[] inflation
16326 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len,
16327                         vRegD_V0 tmp1, vRegD_V1 tmp2, vRegD_V2 tmp3, iRegP_R3 tmp4, rFlagsReg cr)
16328 %{
16329   match(Set dummy (StrInflatedCopy src (Binary dst len)));
16330   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
16331 
16332   format %{ "String Inflate $src,$dst    // KILL $tmp1, $tmp2" %}
16333   ins_encode %{
16334     __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
16335                           $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister, $tmp4$$Register);
16336   %}
16337   ins_pipe(pipe_class_memory);
16338 %}
16339 
16340 // encode char[] to byte[] in ISO_8859_1
16341 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
16342                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
16343                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
16344                           iRegI_R0 result, rFlagsReg cr)
16345 %{
16346   match(Set result (EncodeISOArray src (Binary dst len)));
16347   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
16348          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
16349 
16350   format %{ "Encode array $src,$dst,$len -> $result" %}
16351   ins_encode %{
16352     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
16353          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
16354          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
16355   %}
16356   ins_pipe( pipe_class_memory );
16357 %}
16358 
16359 // ============================================================================
16360 // This name is KNOWN by the ADLC and cannot be changed.
16361 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
16362 // for this guy.
16363 instruct tlsLoadP(thread_RegP dst)
16364 %{
16365   match(Set dst (ThreadLocal));
16366 
16367   ins_cost(0);
16368 
16369   format %{ " -- \t// $dst=Thread::current(), empty" %}
16370 
16371   size(0);
16372 
16373   ins_encode( /*empty*/ );
16374 
16375   ins_pipe(pipe_class_empty);
16376 %}
16377 
16378 // ====================VECTOR INSTRUCTIONS=====================================
16379 
16380 // Load vector (32 bits)
16381 instruct loadV4(vecD dst, vmem4 mem)
16382 %{
16383   predicate(n->as_LoadVector()->memory_size() == 4);
16384   match(Set dst (LoadVector mem));
16385   ins_cost(4 * INSN_COST);
16386   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
16387   ins_encode( aarch64_enc_ldrvS(dst, mem) );
16388   ins_pipe(vload_reg_mem64);
16389 %}
16390 
16391 // Load vector (64 bits)
16392 instruct loadV8(vecD dst, vmem8 mem)
16393 %{
16394   predicate(n->as_LoadVector()->memory_size() == 8);
16395   match(Set dst (LoadVector mem));
16396   ins_cost(4 * INSN_COST);
16397   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
16398   ins_encode( aarch64_enc_ldrvD(dst, mem) );
16399   ins_pipe(vload_reg_mem64);
16400 %}
16401 
16402 // Load Vector (128 bits)
16403 instruct loadV16(vecX dst, vmem16 mem)
16404 %{
16405   predicate(n->as_LoadVector()->memory_size() == 16);
16406   match(Set dst (LoadVector mem));
16407   ins_cost(4 * INSN_COST);
16408   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
16409   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
16410   ins_pipe(vload_reg_mem128);
16411 %}
16412 
16413 // Store Vector (32 bits)
16414 instruct storeV4(vecD src, vmem4 mem)
16415 %{
16416   predicate(n->as_StoreVector()->memory_size() == 4);
16417   match(Set mem (StoreVector mem src));
16418   ins_cost(4 * INSN_COST);
16419   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
16420   ins_encode( aarch64_enc_strvS(src, mem) );
16421   ins_pipe(vstore_reg_mem64);
16422 %}
16423 
16424 // Store Vector (64 bits)
16425 instruct storeV8(vecD src, vmem8 mem)
16426 %{
16427   predicate(n->as_StoreVector()->memory_size() == 8);
16428   match(Set mem (StoreVector mem src));
16429   ins_cost(4 * INSN_COST);
16430   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
16431   ins_encode( aarch64_enc_strvD(src, mem) );
16432   ins_pipe(vstore_reg_mem64);
16433 %}
16434 
16435 // Store Vector (128 bits)
16436 instruct storeV16(vecX src, vmem16 mem)
16437 %{
16438   predicate(n->as_StoreVector()->memory_size() == 16);
16439   match(Set mem (StoreVector mem src));
16440   ins_cost(4 * INSN_COST);
16441   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
16442   ins_encode( aarch64_enc_strvQ(src, mem) );
16443   ins_pipe(vstore_reg_mem128);
16444 %}
16445 
16446 instruct replicate8B(vecD dst, iRegIorL2I src)
16447 %{
16448   predicate(n->as_Vector()->length() == 4 ||
16449             n->as_Vector()->length() == 8);
16450   match(Set dst (ReplicateB src));
16451   ins_cost(INSN_COST);
16452   format %{ "dup  $dst, $src\t# vector (8B)" %}
16453   ins_encode %{
16454     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
16455   %}
16456   ins_pipe(vdup_reg_reg64);
16457 %}
16458 
16459 instruct replicate16B(vecX dst, iRegIorL2I src)
16460 %{
16461   predicate(n->as_Vector()->length() == 16);
16462   match(Set dst (ReplicateB src));
16463   ins_cost(INSN_COST);
16464   format %{ "dup  $dst, $src\t# vector (16B)" %}
16465   ins_encode %{
16466     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
16467   %}
16468   ins_pipe(vdup_reg_reg128);
16469 %}
16470 
16471 instruct replicate8B_imm(vecD dst, immI con)
16472 %{
16473   predicate(n->as_Vector()->length() == 4 ||
16474             n->as_Vector()->length() == 8);
16475   match(Set dst (ReplicateB con));
16476   ins_cost(INSN_COST);
16477   format %{ "movi  $dst, $con\t# vector(8B)" %}
16478   ins_encode %{
16479     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
16480   %}
16481   ins_pipe(vmovi_reg_imm64);
16482 %}
16483 
16484 instruct replicate16B_imm(vecX dst, immI con)
16485 %{
16486   predicate(n->as_Vector()->length() == 16);
16487   match(Set dst (ReplicateB con));
16488   ins_cost(INSN_COST);
16489   format %{ "movi  $dst, $con\t# vector(16B)" %}
16490   ins_encode %{
16491     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
16492   %}
16493   ins_pipe(vmovi_reg_imm128);
16494 %}
16495 
16496 instruct replicate4S(vecD dst, iRegIorL2I src)
16497 %{
16498   predicate(n->as_Vector()->length() == 2 ||
16499             n->as_Vector()->length() == 4);
16500   match(Set dst (ReplicateS src));
16501   ins_cost(INSN_COST);
16502   format %{ "dup  $dst, $src\t# vector (4S)" %}
16503   ins_encode %{
16504     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
16505   %}
16506   ins_pipe(vdup_reg_reg64);
16507 %}
16508 
16509 instruct replicate8S(vecX dst, iRegIorL2I src)
16510 %{
16511   predicate(n->as_Vector()->length() == 8);
16512   match(Set dst (ReplicateS src));
16513   ins_cost(INSN_COST);
16514   format %{ "dup  $dst, $src\t# vector (8S)" %}
16515   ins_encode %{
16516     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
16517   %}
16518   ins_pipe(vdup_reg_reg128);
16519 %}
16520 
16521 instruct replicate4S_imm(vecD dst, immI con)
16522 %{
16523   predicate(n->as_Vector()->length() == 2 ||
16524             n->as_Vector()->length() == 4);
16525   match(Set dst (ReplicateS con));
16526   ins_cost(INSN_COST);
16527   format %{ "movi  $dst, $con\t# vector(4H)" %}
16528   ins_encode %{
16529     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
16530   %}
16531   ins_pipe(vmovi_reg_imm64);
16532 %}
16533 
16534 instruct replicate8S_imm(vecX dst, immI con)
16535 %{
16536   predicate(n->as_Vector()->length() == 8);
16537   match(Set dst (ReplicateS con));
16538   ins_cost(INSN_COST);
16539   format %{ "movi  $dst, $con\t# vector(8H)" %}
16540   ins_encode %{
16541     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
16542   %}
16543   ins_pipe(vmovi_reg_imm128);
16544 %}
16545 
16546 instruct replicate2I(vecD dst, iRegIorL2I src)
16547 %{
16548   predicate(n->as_Vector()->length() == 2);
16549   match(Set dst (ReplicateI src));
16550   ins_cost(INSN_COST);
16551   format %{ "dup  $dst, $src\t# vector (2I)" %}
16552   ins_encode %{
16553     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
16554   %}
16555   ins_pipe(vdup_reg_reg64);
16556 %}
16557 
16558 instruct replicate4I(vecX dst, iRegIorL2I src)
16559 %{
16560   predicate(n->as_Vector()->length() == 4);
16561   match(Set dst (ReplicateI src));
16562   ins_cost(INSN_COST);
16563   format %{ "dup  $dst, $src\t# vector (4I)" %}
16564   ins_encode %{
16565     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
16566   %}
16567   ins_pipe(vdup_reg_reg128);
16568 %}
16569 
16570 instruct replicate2I_imm(vecD dst, immI con)
16571 %{
16572   predicate(n->as_Vector()->length() == 2);
16573   match(Set dst (ReplicateI con));
16574   ins_cost(INSN_COST);
16575   format %{ "movi  $dst, $con\t# vector(2I)" %}
16576   ins_encode %{
16577     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
16578   %}
16579   ins_pipe(vmovi_reg_imm64);
16580 %}
16581 
16582 instruct replicate4I_imm(vecX dst, immI con)
16583 %{
16584   predicate(n->as_Vector()->length() == 4);
16585   match(Set dst (ReplicateI con));
16586   ins_cost(INSN_COST);
16587   format %{ "movi  $dst, $con\t# vector(4I)" %}
16588   ins_encode %{
16589     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
16590   %}
16591   ins_pipe(vmovi_reg_imm128);
16592 %}
16593 
16594 instruct replicate2L(vecX dst, iRegL src)
16595 %{
16596   predicate(n->as_Vector()->length() == 2);
16597   match(Set dst (ReplicateL src));
16598   ins_cost(INSN_COST);
16599   format %{ "dup  $dst, $src\t# vector (2L)" %}
16600   ins_encode %{
16601     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
16602   %}
16603   ins_pipe(vdup_reg_reg128);
16604 %}
16605 
16606 instruct replicate2L_zero(vecX dst, immI0 zero)
16607 %{
16608   predicate(n->as_Vector()->length() == 2);
16609   match(Set dst (ReplicateI zero));
16610   ins_cost(INSN_COST);
16611   format %{ "movi  $dst, $zero\t# vector(4I)" %}
16612   ins_encode %{
16613     __ eor(as_FloatRegister($dst$$reg), __ T16B,
16614            as_FloatRegister($dst$$reg),
16615            as_FloatRegister($dst$$reg));
16616   %}
16617   ins_pipe(vmovi_reg_imm128);
16618 %}
16619 
16620 instruct replicate2F(vecD dst, vRegF src)
16621 %{
16622   predicate(n->as_Vector()->length() == 2);
16623   match(Set dst (ReplicateF src));
16624   ins_cost(INSN_COST);
16625   format %{ "dup  $dst, $src\t# vector (2F)" %}
16626   ins_encode %{
16627     __ dup(as_FloatRegister($dst$$reg), __ T2S,
16628            as_FloatRegister($src$$reg));
16629   %}
16630   ins_pipe(vdup_reg_freg64);
16631 %}
16632 
16633 instruct replicate4F(vecX dst, vRegF src)
16634 %{
16635   predicate(n->as_Vector()->length() == 4);
16636   match(Set dst (ReplicateF src));
16637   ins_cost(INSN_COST);
16638   format %{ "dup  $dst, $src\t# vector (4F)" %}
16639   ins_encode %{
16640     __ dup(as_FloatRegister($dst$$reg), __ T4S,
16641            as_FloatRegister($src$$reg));
16642   %}
16643   ins_pipe(vdup_reg_freg128);
16644 %}
16645 
16646 instruct replicate2D(vecX dst, vRegD src)
16647 %{
16648   predicate(n->as_Vector()->length() == 2);
16649   match(Set dst (ReplicateD src));
16650   ins_cost(INSN_COST);
16651   format %{ "dup  $dst, $src\t# vector (2D)" %}
16652   ins_encode %{
16653     __ dup(as_FloatRegister($dst$$reg), __ T2D,
16654            as_FloatRegister($src$$reg));
16655   %}
16656   ins_pipe(vdup_reg_dreg128);
16657 %}
16658 
16659 // ====================REDUCTION ARITHMETIC====================================
16660 
16661 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I isrc, vecD vsrc, iRegINoSp tmp, iRegINoSp tmp2)
16662 %{
16663   match(Set dst (AddReductionVI isrc vsrc));
16664   ins_cost(INSN_COST);
16665   effect(TEMP tmp, TEMP tmp2);
16666   format %{ "umov  $tmp, $vsrc, S, 0\n\t"
16667             "umov  $tmp2, $vsrc, S, 1\n\t"
16668             "addw  $tmp, $isrc, $tmp\n\t"
16669             "addw  $dst, $tmp, $tmp2\t# add reduction2I"
16670   %}
16671   ins_encode %{
16672     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ S, 0);
16673     __ umov($tmp2$$Register, as_FloatRegister($vsrc$$reg), __ S, 1);
16674     __ addw($tmp$$Register, $isrc$$Register, $tmp$$Register);
16675     __ addw($dst$$Register, $tmp$$Register, $tmp2$$Register);
16676   %}
16677   ins_pipe(pipe_class_default);
16678 %}
16679 
16680 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I isrc, vecX vsrc, vecX vtmp, iRegINoSp itmp)
16681 %{
16682   match(Set dst (AddReductionVI isrc vsrc));
16683   ins_cost(INSN_COST);
16684   effect(TEMP vtmp, TEMP itmp);
16685   format %{ "addv  $vtmp, T4S, $vsrc\n\t"
16686             "umov  $itmp, $vtmp, S, 0\n\t"
16687             "addw  $dst, $itmp, $isrc\t# add reduction4I"
16688   %}
16689   ins_encode %{
16690     __ addv(as_FloatRegister($vtmp$$reg), __ T4S,
16691             as_FloatRegister($vsrc$$reg));
16692     __ umov($itmp$$Register, as_FloatRegister($vtmp$$reg), __ S, 0);
16693     __ addw($dst$$Register, $itmp$$Register, $isrc$$Register);
16694   %}
16695   ins_pipe(pipe_class_default);
16696 %}
16697 
16698 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I isrc, vecD vsrc, iRegINoSp tmp)
16699 %{
16700   match(Set dst (MulReductionVI isrc vsrc));
16701   ins_cost(INSN_COST);
16702   effect(TEMP tmp, TEMP dst);
16703   format %{ "umov  $tmp, $vsrc, S, 0\n\t"
16704             "mul   $dst, $tmp, $isrc\n\t"
16705             "umov  $tmp, $vsrc, S, 1\n\t"
16706             "mul   $dst, $tmp, $dst\t# mul reduction2I"
16707   %}
16708   ins_encode %{
16709     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ S, 0);
16710     __ mul($dst$$Register, $tmp$$Register, $isrc$$Register);
16711     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ S, 1);
16712     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
16713   %}
16714   ins_pipe(pipe_class_default);
16715 %}
16716 
16717 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I isrc, vecX vsrc, vecX vtmp, iRegINoSp itmp)
16718 %{
16719   match(Set dst (MulReductionVI isrc vsrc));
16720   ins_cost(INSN_COST);
16721   effect(TEMP vtmp, TEMP itmp, TEMP dst);
16722   format %{ "ins   $vtmp, D, $vsrc, 0, 1\n\t"
16723             "mulv  $vtmp, T2S, $vtmp, $vsrc\n\t"
16724             "umov  $itmp, $vtmp, S, 0\n\t"
16725             "mul   $dst, $itmp, $isrc\n\t"
16726             "umov  $itmp, $vtmp, S, 1\n\t"
16727             "mul   $dst, $itmp, $dst\t# mul reduction4I"
16728   %}
16729   ins_encode %{
16730     __ ins(as_FloatRegister($vtmp$$reg), __ D,
16731            as_FloatRegister($vsrc$$reg), 0, 1);
16732     __ mulv(as_FloatRegister($vtmp$$reg), __ T2S,
16733             as_FloatRegister($vtmp$$reg), as_FloatRegister($vsrc$$reg));
16734     __ umov($itmp$$Register, as_FloatRegister($vtmp$$reg), __ S, 0);
16735     __ mul($dst$$Register, $itmp$$Register, $isrc$$Register);
16736     __ umov($itmp$$Register, as_FloatRegister($vtmp$$reg), __ S, 1);
16737     __ mul($dst$$Register, $itmp$$Register, $dst$$Register);
16738   %}
16739   ins_pipe(pipe_class_default);
16740 %}
16741 
16742 instruct reduce_add2F(vRegF dst, vRegF fsrc, vecD vsrc, vecD tmp)
16743 %{
16744   match(Set dst (AddReductionVF fsrc vsrc));
16745   ins_cost(INSN_COST);
16746   effect(TEMP tmp, TEMP dst);
16747   format %{ "fadds $dst, $fsrc, $vsrc\n\t"
16748             "ins   $tmp, S, $vsrc, 0, 1\n\t"
16749             "fadds $dst, $dst, $tmp\t# add reduction2F"
16750   %}
16751   ins_encode %{
16752     __ fadds(as_FloatRegister($dst$$reg),
16753              as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
16754     __ ins(as_FloatRegister($tmp$$reg), __ S,
16755            as_FloatRegister($vsrc$$reg), 0, 1);
16756     __ fadds(as_FloatRegister($dst$$reg),
16757              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16758   %}
16759   ins_pipe(pipe_class_default);
16760 %}
16761 
16762 instruct reduce_add4F(vRegF dst, vRegF fsrc, vecX vsrc, vecX tmp)
16763 %{
16764   match(Set dst (AddReductionVF fsrc vsrc));
16765   ins_cost(INSN_COST);
16766   effect(TEMP tmp, TEMP dst);
16767   format %{ "fadds $dst, $fsrc, $vsrc\n\t"
16768             "ins   $tmp, S, $vsrc, 0, 1\n\t"
16769             "fadds $dst, $dst, $tmp\n\t"
16770             "ins   $tmp, S, $vsrc, 0, 2\n\t"
16771             "fadds $dst, $dst, $tmp\n\t"
16772             "ins   $tmp, S, $vsrc, 0, 3\n\t"
16773             "fadds $dst, $dst, $tmp\t# add reduction4F"
16774   %}
16775   ins_encode %{
16776     __ fadds(as_FloatRegister($dst$$reg),
16777              as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
16778     __ ins(as_FloatRegister($tmp$$reg), __ S,
16779            as_FloatRegister($vsrc$$reg), 0, 1);
16780     __ fadds(as_FloatRegister($dst$$reg),
16781              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16782     __ ins(as_FloatRegister($tmp$$reg), __ S,
16783            as_FloatRegister($vsrc$$reg), 0, 2);
16784     __ fadds(as_FloatRegister($dst$$reg),
16785              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16786     __ ins(as_FloatRegister($tmp$$reg), __ S,
16787            as_FloatRegister($vsrc$$reg), 0, 3);
16788     __ fadds(as_FloatRegister($dst$$reg),
16789              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16790   %}
16791   ins_pipe(pipe_class_default);
16792 %}
16793 
16794 instruct reduce_mul2F(vRegF dst, vRegF fsrc, vecD vsrc, vecD tmp)
16795 %{
16796   match(Set dst (MulReductionVF fsrc vsrc));
16797   ins_cost(INSN_COST);
16798   effect(TEMP tmp, TEMP dst);
16799   format %{ "fmuls $dst, $fsrc, $vsrc\n\t"
16800             "ins   $tmp, S, $vsrc, 0, 1\n\t"
16801             "fmuls $dst, $dst, $tmp\t# mul reduction2F"
16802   %}
16803   ins_encode %{
16804     __ fmuls(as_FloatRegister($dst$$reg),
16805              as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
16806     __ ins(as_FloatRegister($tmp$$reg), __ S,
16807            as_FloatRegister($vsrc$$reg), 0, 1);
16808     __ fmuls(as_FloatRegister($dst$$reg),
16809              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16810   %}
16811   ins_pipe(pipe_class_default);
16812 %}
16813 
16814 instruct reduce_mul4F(vRegF dst, vRegF fsrc, vecX vsrc, vecX tmp)
16815 %{
16816   match(Set dst (MulReductionVF fsrc vsrc));
16817   ins_cost(INSN_COST);
16818   effect(TEMP tmp, TEMP dst);
16819   format %{ "fmuls $dst, $fsrc, $vsrc\n\t"
16820             "ins   $tmp, S, $vsrc, 0, 1\n\t"
16821             "fmuls $dst, $dst, $tmp\n\t"
16822             "ins   $tmp, S, $vsrc, 0, 2\n\t"
16823             "fmuls $dst, $dst, $tmp\n\t"
16824             "ins   $tmp, S, $vsrc, 0, 3\n\t"
16825             "fmuls $dst, $dst, $tmp\t# mul reduction4F"
16826   %}
16827   ins_encode %{
16828     __ fmuls(as_FloatRegister($dst$$reg),
16829              as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
16830     __ ins(as_FloatRegister($tmp$$reg), __ S,
16831            as_FloatRegister($vsrc$$reg), 0, 1);
16832     __ fmuls(as_FloatRegister($dst$$reg),
16833              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16834     __ ins(as_FloatRegister($tmp$$reg), __ S,
16835            as_FloatRegister($vsrc$$reg), 0, 2);
16836     __ fmuls(as_FloatRegister($dst$$reg),
16837              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16838     __ ins(as_FloatRegister($tmp$$reg), __ S,
16839            as_FloatRegister($vsrc$$reg), 0, 3);
16840     __ fmuls(as_FloatRegister($dst$$reg),
16841              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16842   %}
16843   ins_pipe(pipe_class_default);
16844 %}
16845 
16846 instruct reduce_add2D(vRegD dst, vRegD dsrc, vecX vsrc, vecX tmp)
16847 %{
16848   match(Set dst (AddReductionVD dsrc vsrc));
16849   ins_cost(INSN_COST);
16850   effect(TEMP tmp, TEMP dst);
16851   format %{ "faddd $dst, $dsrc, $vsrc\n\t"
16852             "ins   $tmp, D, $vsrc, 0, 1\n\t"
16853             "faddd $dst, $dst, $tmp\t# add reduction2D"
16854   %}
16855   ins_encode %{
16856     __ faddd(as_FloatRegister($dst$$reg),
16857              as_FloatRegister($dsrc$$reg), as_FloatRegister($vsrc$$reg));
16858     __ ins(as_FloatRegister($tmp$$reg), __ D,
16859            as_FloatRegister($vsrc$$reg), 0, 1);
16860     __ faddd(as_FloatRegister($dst$$reg),
16861              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16862   %}
16863   ins_pipe(pipe_class_default);
16864 %}
16865 
16866 instruct reduce_mul2D(vRegD dst, vRegD dsrc, vecX vsrc, vecX tmp)
16867 %{
16868   match(Set dst (MulReductionVD dsrc vsrc));
16869   ins_cost(INSN_COST);
16870   effect(TEMP tmp, TEMP dst);
16871   format %{ "fmuld $dst, $dsrc, $vsrc\n\t"
16872             "ins   $tmp, D, $vsrc, 0, 1\n\t"
16873             "fmuld $dst, $dst, $tmp\t# mul reduction2D"
16874   %}
16875   ins_encode %{
16876     __ fmuld(as_FloatRegister($dst$$reg),
16877              as_FloatRegister($dsrc$$reg), as_FloatRegister($vsrc$$reg));
16878     __ ins(as_FloatRegister($tmp$$reg), __ D,
16879            as_FloatRegister($vsrc$$reg), 0, 1);
16880     __ fmuld(as_FloatRegister($dst$$reg),
16881              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16882   %}
16883   ins_pipe(pipe_class_default);
16884 %}
16885 
16886 instruct reduce_max2F(vRegF dst, vRegF fsrc, vecD vsrc, vecD tmp) %{
16887   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16888   match(Set dst (MaxReductionV fsrc vsrc));
16889   ins_cost(INSN_COST);
16890   effect(TEMP_DEF dst, TEMP tmp);
16891   format %{ "fmaxs $dst, $fsrc, $vsrc\n\t"
16892             "ins   $tmp, S, $vsrc, 0, 1\n\t"
16893             "fmaxs $dst, $dst, $tmp\t# max reduction2F" %}
16894   ins_encode %{
16895     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
16896     __ ins(as_FloatRegister($tmp$$reg), __ S, as_FloatRegister($vsrc$$reg), 0, 1);
16897     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16898   %}
16899   ins_pipe(pipe_class_default);
16900 %}
16901 
16902 instruct reduce_max4F(vRegF dst, vRegF fsrc, vecX vsrc) %{
16903   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16904   match(Set dst (MaxReductionV fsrc vsrc));
16905   ins_cost(INSN_COST);
16906   effect(TEMP_DEF dst);
16907   format %{ "fmaxv $dst, T4S, $vsrc\n\t"
16908             "fmaxs $dst, $dst, $fsrc\t# max reduction4F" %}
16909   ins_encode %{
16910     __ fmaxv(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($vsrc$$reg));
16911     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($fsrc$$reg));
16912   %}
16913   ins_pipe(pipe_class_default);
16914 %}
16915 
16916 instruct reduce_max2D(vRegD dst, vRegD dsrc, vecX vsrc, vecX tmp) %{
16917   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
16918   match(Set dst (MaxReductionV dsrc vsrc));
16919   ins_cost(INSN_COST);
16920   effect(TEMP_DEF dst, TEMP tmp);
16921   format %{ "fmaxd $dst, $dsrc, $vsrc\n\t"
16922             "ins   $tmp, D, $vsrc, 0, 1\n\t"
16923             "fmaxd $dst, $dst, $tmp\t# max reduction2D" %}
16924   ins_encode %{
16925     __ fmaxd(as_FloatRegister($dst$$reg), as_FloatRegister($dsrc$$reg), as_FloatRegister($vsrc$$reg));
16926     __ ins(as_FloatRegister($tmp$$reg), __ D, as_FloatRegister($vsrc$$reg), 0, 1);
16927     __ fmaxd(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16928   %}
16929   ins_pipe(pipe_class_default);
16930 %}
16931 
16932 instruct reduce_min2F(vRegF dst, vRegF fsrc, vecD vsrc, vecD tmp) %{
16933   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16934   match(Set dst (MinReductionV fsrc vsrc));
16935   ins_cost(INSN_COST);
16936   effect(TEMP_DEF dst, TEMP tmp);
16937   format %{ "fmins $dst, $fsrc, $vsrc\n\t"
16938             "ins   $tmp, S, $vsrc, 0, 1\n\t"
16939             "fmins $dst, $dst, $tmp\t# min reduction2F" %}
16940   ins_encode %{
16941     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
16942     __ ins(as_FloatRegister($tmp$$reg), __ S, as_FloatRegister($vsrc$$reg), 0, 1);
16943     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16944   %}
16945   ins_pipe(pipe_class_default);
16946 %}
16947 
16948 instruct reduce_min4F(vRegF dst, vRegF fsrc, vecX vsrc) %{
16949   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16950   match(Set dst (MinReductionV fsrc vsrc));
16951   ins_cost(INSN_COST);
16952   effect(TEMP_DEF dst);
16953   format %{ "fminv $dst, T4S, $vsrc\n\t"
16954             "fmins $dst, $dst, $fsrc\t# min reduction4F" %}
16955   ins_encode %{
16956     __ fminv(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($vsrc$$reg));
16957     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($fsrc$$reg));
16958   %}
16959   ins_pipe(pipe_class_default);
16960 %}
16961 
16962 instruct reduce_min2D(vRegD dst, vRegD dsrc, vecX vsrc, vecX tmp) %{
16963   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
16964   match(Set dst (MinReductionV dsrc vsrc));
16965   ins_cost(INSN_COST);
16966   effect(TEMP_DEF dst, TEMP tmp);
16967   format %{ "fmind $dst, $dsrc, $vsrc\n\t"
16968             "ins   $tmp, D, $vsrc, 0, 1\n\t"
16969             "fmind $dst, $dst, $tmp\t# min reduction2D" %}
16970   ins_encode %{
16971     __ fmind(as_FloatRegister($dst$$reg), as_FloatRegister($dsrc$$reg), as_FloatRegister($vsrc$$reg));
16972     __ ins(as_FloatRegister($tmp$$reg), __ D, as_FloatRegister($vsrc$$reg), 0, 1);
16973     __ fmind(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16974   %}
16975   ins_pipe(pipe_class_default);
16976 %}
16977 
16978 // ====================VECTOR ARITHMETIC=======================================
16979 
16980 // --------------------------------- ADD --------------------------------------
16981 
16982 instruct vadd8B(vecD dst, vecD src1, vecD src2)
16983 %{
16984   predicate(n->as_Vector()->length() == 4 ||
16985             n->as_Vector()->length() == 8);
16986   match(Set dst (AddVB src1 src2));
16987   ins_cost(INSN_COST);
16988   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
16989   ins_encode %{
16990     __ addv(as_FloatRegister($dst$$reg), __ T8B,
16991             as_FloatRegister($src1$$reg),
16992             as_FloatRegister($src2$$reg));
16993   %}
16994   ins_pipe(vdop64);
16995 %}
16996 
16997 instruct vadd16B(vecX dst, vecX src1, vecX src2)
16998 %{
16999   predicate(n->as_Vector()->length() == 16);
17000   match(Set dst (AddVB src1 src2));
17001   ins_cost(INSN_COST);
17002   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
17003   ins_encode %{
17004     __ addv(as_FloatRegister($dst$$reg), __ T16B,
17005             as_FloatRegister($src1$$reg),
17006             as_FloatRegister($src2$$reg));
17007   %}
17008   ins_pipe(vdop128);
17009 %}
17010 
17011 instruct vadd4S(vecD dst, vecD src1, vecD src2)
17012 %{
17013   predicate(n->as_Vector()->length() == 2 ||
17014             n->as_Vector()->length() == 4);
17015   match(Set dst (AddVS src1 src2));
17016   ins_cost(INSN_COST);
17017   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
17018   ins_encode %{
17019     __ addv(as_FloatRegister($dst$$reg), __ T4H,
17020             as_FloatRegister($src1$$reg),
17021             as_FloatRegister($src2$$reg));
17022   %}
17023   ins_pipe(vdop64);
17024 %}
17025 
17026 instruct vadd8S(vecX dst, vecX src1, vecX src2)
17027 %{
17028   predicate(n->as_Vector()->length() == 8);
17029   match(Set dst (AddVS src1 src2));
17030   ins_cost(INSN_COST);
17031   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
17032   ins_encode %{
17033     __ addv(as_FloatRegister($dst$$reg), __ T8H,
17034             as_FloatRegister($src1$$reg),
17035             as_FloatRegister($src2$$reg));
17036   %}
17037   ins_pipe(vdop128);
17038 %}
17039 
17040 instruct vadd2I(vecD dst, vecD src1, vecD src2)
17041 %{
17042   predicate(n->as_Vector()->length() == 2);
17043   match(Set dst (AddVI src1 src2));
17044   ins_cost(INSN_COST);
17045   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
17046   ins_encode %{
17047     __ addv(as_FloatRegister($dst$$reg), __ T2S,
17048             as_FloatRegister($src1$$reg),
17049             as_FloatRegister($src2$$reg));
17050   %}
17051   ins_pipe(vdop64);
17052 %}
17053 
17054 instruct vadd4I(vecX dst, vecX src1, vecX src2)
17055 %{
17056   predicate(n->as_Vector()->length() == 4);
17057   match(Set dst (AddVI src1 src2));
17058   ins_cost(INSN_COST);
17059   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
17060   ins_encode %{
17061     __ addv(as_FloatRegister($dst$$reg), __ T4S,
17062             as_FloatRegister($src1$$reg),
17063             as_FloatRegister($src2$$reg));
17064   %}
17065   ins_pipe(vdop128);
17066 %}
17067 
17068 instruct vadd2L(vecX dst, vecX src1, vecX src2)
17069 %{
17070   predicate(n->as_Vector()->length() == 2);
17071   match(Set dst (AddVL src1 src2));
17072   ins_cost(INSN_COST);
17073   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
17074   ins_encode %{
17075     __ addv(as_FloatRegister($dst$$reg), __ T2D,
17076             as_FloatRegister($src1$$reg),
17077             as_FloatRegister($src2$$reg));
17078   %}
17079   ins_pipe(vdop128);
17080 %}
17081 
17082 instruct vadd2F(vecD dst, vecD src1, vecD src2)
17083 %{
17084   predicate(n->as_Vector()->length() == 2);
17085   match(Set dst (AddVF src1 src2));
17086   ins_cost(INSN_COST);
17087   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
17088   ins_encode %{
17089     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
17090             as_FloatRegister($src1$$reg),
17091             as_FloatRegister($src2$$reg));
17092   %}
17093   ins_pipe(vdop_fp64);
17094 %}
17095 
17096 instruct vadd4F(vecX dst, vecX src1, vecX src2)
17097 %{
17098   predicate(n->as_Vector()->length() == 4);
17099   match(Set dst (AddVF src1 src2));
17100   ins_cost(INSN_COST);
17101   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
17102   ins_encode %{
17103     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
17104             as_FloatRegister($src1$$reg),
17105             as_FloatRegister($src2$$reg));
17106   %}
17107   ins_pipe(vdop_fp128);
17108 %}
17109 
17110 instruct vadd2D(vecX dst, vecX src1, vecX src2)
17111 %{
17112   match(Set dst (AddVD src1 src2));
17113   ins_cost(INSN_COST);
17114   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
17115   ins_encode %{
17116     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
17117             as_FloatRegister($src1$$reg),
17118             as_FloatRegister($src2$$reg));
17119   %}
17120   ins_pipe(vdop_fp128);
17121 %}
17122 
17123 // --------------------------------- SUB --------------------------------------
17124 
17125 instruct vsub8B(vecD dst, vecD src1, vecD src2)
17126 %{
17127   predicate(n->as_Vector()->length() == 4 ||
17128             n->as_Vector()->length() == 8);
17129   match(Set dst (SubVB src1 src2));
17130   ins_cost(INSN_COST);
17131   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
17132   ins_encode %{
17133     __ subv(as_FloatRegister($dst$$reg), __ T8B,
17134             as_FloatRegister($src1$$reg),
17135             as_FloatRegister($src2$$reg));
17136   %}
17137   ins_pipe(vdop64);
17138 %}
17139 
17140 instruct vsub16B(vecX dst, vecX src1, vecX src2)
17141 %{
17142   predicate(n->as_Vector()->length() == 16);
17143   match(Set dst (SubVB src1 src2));
17144   ins_cost(INSN_COST);
17145   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
17146   ins_encode %{
17147     __ subv(as_FloatRegister($dst$$reg), __ T16B,
17148             as_FloatRegister($src1$$reg),
17149             as_FloatRegister($src2$$reg));
17150   %}
17151   ins_pipe(vdop128);
17152 %}
17153 
17154 instruct vsub4S(vecD dst, vecD src1, vecD src2)
17155 %{
17156   predicate(n->as_Vector()->length() == 2 ||
17157             n->as_Vector()->length() == 4);
17158   match(Set dst (SubVS src1 src2));
17159   ins_cost(INSN_COST);
17160   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
17161   ins_encode %{
17162     __ subv(as_FloatRegister($dst$$reg), __ T4H,
17163             as_FloatRegister($src1$$reg),
17164             as_FloatRegister($src2$$reg));
17165   %}
17166   ins_pipe(vdop64);
17167 %}
17168 
17169 instruct vsub8S(vecX dst, vecX src1, vecX src2)
17170 %{
17171   predicate(n->as_Vector()->length() == 8);
17172   match(Set dst (SubVS src1 src2));
17173   ins_cost(INSN_COST);
17174   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
17175   ins_encode %{
17176     __ subv(as_FloatRegister($dst$$reg), __ T8H,
17177             as_FloatRegister($src1$$reg),
17178             as_FloatRegister($src2$$reg));
17179   %}
17180   ins_pipe(vdop128);
17181 %}
17182 
17183 instruct vsub2I(vecD dst, vecD src1, vecD src2)
17184 %{
17185   predicate(n->as_Vector()->length() == 2);
17186   match(Set dst (SubVI src1 src2));
17187   ins_cost(INSN_COST);
17188   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
17189   ins_encode %{
17190     __ subv(as_FloatRegister($dst$$reg), __ T2S,
17191             as_FloatRegister($src1$$reg),
17192             as_FloatRegister($src2$$reg));
17193   %}
17194   ins_pipe(vdop64);
17195 %}
17196 
17197 instruct vsub4I(vecX dst, vecX src1, vecX src2)
17198 %{
17199   predicate(n->as_Vector()->length() == 4);
17200   match(Set dst (SubVI src1 src2));
17201   ins_cost(INSN_COST);
17202   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
17203   ins_encode %{
17204     __ subv(as_FloatRegister($dst$$reg), __ T4S,
17205             as_FloatRegister($src1$$reg),
17206             as_FloatRegister($src2$$reg));
17207   %}
17208   ins_pipe(vdop128);
17209 %}
17210 
17211 instruct vsub2L(vecX dst, vecX src1, vecX src2)
17212 %{
17213   predicate(n->as_Vector()->length() == 2);
17214   match(Set dst (SubVL src1 src2));
17215   ins_cost(INSN_COST);
17216   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
17217   ins_encode %{
17218     __ subv(as_FloatRegister($dst$$reg), __ T2D,
17219             as_FloatRegister($src1$$reg),
17220             as_FloatRegister($src2$$reg));
17221   %}
17222   ins_pipe(vdop128);
17223 %}
17224 
17225 instruct vsub2F(vecD dst, vecD src1, vecD src2)
17226 %{
17227   predicate(n->as_Vector()->length() == 2);
17228   match(Set dst (SubVF src1 src2));
17229   ins_cost(INSN_COST);
17230   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
17231   ins_encode %{
17232     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
17233             as_FloatRegister($src1$$reg),
17234             as_FloatRegister($src2$$reg));
17235   %}
17236   ins_pipe(vdop_fp64);
17237 %}
17238 
17239 instruct vsub4F(vecX dst, vecX src1, vecX src2)
17240 %{
17241   predicate(n->as_Vector()->length() == 4);
17242   match(Set dst (SubVF src1 src2));
17243   ins_cost(INSN_COST);
17244   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
17245   ins_encode %{
17246     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
17247             as_FloatRegister($src1$$reg),
17248             as_FloatRegister($src2$$reg));
17249   %}
17250   ins_pipe(vdop_fp128);
17251 %}
17252 
17253 instruct vsub2D(vecX dst, vecX src1, vecX src2)
17254 %{
17255   predicate(n->as_Vector()->length() == 2);
17256   match(Set dst (SubVD src1 src2));
17257   ins_cost(INSN_COST);
17258   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
17259   ins_encode %{
17260     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
17261             as_FloatRegister($src1$$reg),
17262             as_FloatRegister($src2$$reg));
17263   %}
17264   ins_pipe(vdop_fp128);
17265 %}
17266 
17267 // --------------------------------- MUL --------------------------------------
17268 
17269 instruct vmul8B(vecD dst, vecD src1, vecD src2)
17270 %{
17271   predicate(n->as_Vector()->length() == 4 ||
17272             n->as_Vector()->length() == 8);
17273   match(Set dst (MulVB src1 src2));
17274   ins_cost(INSN_COST);
17275   format %{ "mulv  $dst,$src1,$src2\t# vector (8B)" %}
17276   ins_encode %{
17277     __ mulv(as_FloatRegister($dst$$reg), __ T8B,
17278             as_FloatRegister($src1$$reg),
17279             as_FloatRegister($src2$$reg));
17280   %}
17281   ins_pipe(vmul64);
17282 %}
17283 
17284 instruct vmul16B(vecX dst, vecX src1, vecX src2)
17285 %{
17286   predicate(n->as_Vector()->length() == 16);
17287   match(Set dst (MulVB src1 src2));
17288   ins_cost(INSN_COST);
17289   format %{ "mulv  $dst,$src1,$src2\t# vector (16B)" %}
17290   ins_encode %{
17291     __ mulv(as_FloatRegister($dst$$reg), __ T16B,
17292             as_FloatRegister($src1$$reg),
17293             as_FloatRegister($src2$$reg));
17294   %}
17295   ins_pipe(vmul128);
17296 %}
17297 
17298 instruct vmul4S(vecD dst, vecD src1, vecD src2)
17299 %{
17300   predicate(n->as_Vector()->length() == 2 ||
17301             n->as_Vector()->length() == 4);
17302   match(Set dst (MulVS src1 src2));
17303   ins_cost(INSN_COST);
17304   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
17305   ins_encode %{
17306     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
17307             as_FloatRegister($src1$$reg),
17308             as_FloatRegister($src2$$reg));
17309   %}
17310   ins_pipe(vmul64);
17311 %}
17312 
17313 instruct vmul8S(vecX dst, vecX src1, vecX src2)
17314 %{
17315   predicate(n->as_Vector()->length() == 8);
17316   match(Set dst (MulVS src1 src2));
17317   ins_cost(INSN_COST);
17318   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
17319   ins_encode %{
17320     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
17321             as_FloatRegister($src1$$reg),
17322             as_FloatRegister($src2$$reg));
17323   %}
17324   ins_pipe(vmul128);
17325 %}
17326 
17327 instruct vmul2I(vecD dst, vecD src1, vecD src2)
17328 %{
17329   predicate(n->as_Vector()->length() == 2);
17330   match(Set dst (MulVI src1 src2));
17331   ins_cost(INSN_COST);
17332   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
17333   ins_encode %{
17334     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
17335             as_FloatRegister($src1$$reg),
17336             as_FloatRegister($src2$$reg));
17337   %}
17338   ins_pipe(vmul64);
17339 %}
17340 
17341 instruct vmul4I(vecX dst, vecX src1, vecX src2)
17342 %{
17343   predicate(n->as_Vector()->length() == 4);
17344   match(Set dst (MulVI src1 src2));
17345   ins_cost(INSN_COST);
17346   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
17347   ins_encode %{
17348     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
17349             as_FloatRegister($src1$$reg),
17350             as_FloatRegister($src2$$reg));
17351   %}
17352   ins_pipe(vmul128);
17353 %}
17354 
17355 instruct vmul2F(vecD dst, vecD src1, vecD src2)
17356 %{
17357   predicate(n->as_Vector()->length() == 2);
17358   match(Set dst (MulVF src1 src2));
17359   ins_cost(INSN_COST);
17360   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
17361   ins_encode %{
17362     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
17363             as_FloatRegister($src1$$reg),
17364             as_FloatRegister($src2$$reg));
17365   %}
17366   ins_pipe(vmuldiv_fp64);
17367 %}
17368 
17369 instruct vmul4F(vecX dst, vecX src1, vecX src2)
17370 %{
17371   predicate(n->as_Vector()->length() == 4);
17372   match(Set dst (MulVF src1 src2));
17373   ins_cost(INSN_COST);
17374   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
17375   ins_encode %{
17376     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
17377             as_FloatRegister($src1$$reg),
17378             as_FloatRegister($src2$$reg));
17379   %}
17380   ins_pipe(vmuldiv_fp128);
17381 %}
17382 
17383 instruct vmul2D(vecX dst, vecX src1, vecX src2)
17384 %{
17385   predicate(n->as_Vector()->length() == 2);
17386   match(Set dst (MulVD src1 src2));
17387   ins_cost(INSN_COST);
17388   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
17389   ins_encode %{
17390     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
17391             as_FloatRegister($src1$$reg),
17392             as_FloatRegister($src2$$reg));
17393   %}
17394   ins_pipe(vmuldiv_fp128);
17395 %}
17396 
17397 // --------------------------------- MLA --------------------------------------
17398 
17399 instruct vmla4S(vecD dst, vecD src1, vecD src2)
17400 %{
17401   predicate(n->as_Vector()->length() == 2 ||
17402             n->as_Vector()->length() == 4);
17403   match(Set dst (AddVS dst (MulVS src1 src2)));
17404   ins_cost(INSN_COST);
17405   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
17406   ins_encode %{
17407     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
17408             as_FloatRegister($src1$$reg),
17409             as_FloatRegister($src2$$reg));
17410   %}
17411   ins_pipe(vmla64);
17412 %}
17413 
17414 instruct vmla8S(vecX dst, vecX src1, vecX src2)
17415 %{
17416   predicate(n->as_Vector()->length() == 8);
17417   match(Set dst (AddVS dst (MulVS src1 src2)));
17418   ins_cost(INSN_COST);
17419   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
17420   ins_encode %{
17421     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
17422             as_FloatRegister($src1$$reg),
17423             as_FloatRegister($src2$$reg));
17424   %}
17425   ins_pipe(vmla128);
17426 %}
17427 
17428 instruct vmla2I(vecD dst, vecD src1, vecD src2)
17429 %{
17430   predicate(n->as_Vector()->length() == 2);
17431   match(Set dst (AddVI dst (MulVI src1 src2)));
17432   ins_cost(INSN_COST);
17433   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
17434   ins_encode %{
17435     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
17436             as_FloatRegister($src1$$reg),
17437             as_FloatRegister($src2$$reg));
17438   %}
17439   ins_pipe(vmla64);
17440 %}
17441 
17442 instruct vmla4I(vecX dst, vecX src1, vecX src2)
17443 %{
17444   predicate(n->as_Vector()->length() == 4);
17445   match(Set dst (AddVI dst (MulVI src1 src2)));
17446   ins_cost(INSN_COST);
17447   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
17448   ins_encode %{
17449     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
17450             as_FloatRegister($src1$$reg),
17451             as_FloatRegister($src2$$reg));
17452   %}
17453   ins_pipe(vmla128);
17454 %}
17455 
17456 // dst + src1 * src2
17457 instruct vmla2F(vecD dst, vecD src1, vecD src2) %{
17458   predicate(UseFMA && n->as_Vector()->length() == 2);
17459   match(Set dst (FmaVF  dst (Binary src1 src2)));
17460   format %{ "fmla  $dst,$src1,$src2\t# vector (2S)" %}
17461   ins_cost(INSN_COST);
17462   ins_encode %{
17463     __ fmla(as_FloatRegister($dst$$reg), __ T2S,
17464             as_FloatRegister($src1$$reg),
17465             as_FloatRegister($src2$$reg));
17466   %}
17467   ins_pipe(vmuldiv_fp64);
17468 %}
17469 
17470 // dst + src1 * src2
17471 instruct vmla4F(vecX dst, vecX src1, vecX src2) %{
17472   predicate(UseFMA && n->as_Vector()->length() == 4);
17473   match(Set dst (FmaVF  dst (Binary src1 src2)));
17474   format %{ "fmla  $dst,$src1,$src2\t# vector (4S)" %}
17475   ins_cost(INSN_COST);
17476   ins_encode %{
17477     __ fmla(as_FloatRegister($dst$$reg), __ T4S,
17478             as_FloatRegister($src1$$reg),
17479             as_FloatRegister($src2$$reg));
17480   %}
17481   ins_pipe(vmuldiv_fp128);
17482 %}
17483 
17484 // dst + src1 * src2
17485 instruct vmla2D(vecX dst, vecX src1, vecX src2) %{
17486   predicate(UseFMA && n->as_Vector()->length() == 2);
17487   match(Set dst (FmaVD  dst (Binary src1 src2)));
17488   format %{ "fmla  $dst,$src1,$src2\t# vector (2D)" %}
17489   ins_cost(INSN_COST);
17490   ins_encode %{
17491     __ fmla(as_FloatRegister($dst$$reg), __ T2D,
17492             as_FloatRegister($src1$$reg),
17493             as_FloatRegister($src2$$reg));
17494   %}
17495   ins_pipe(vmuldiv_fp128);
17496 %}
17497 
17498 // --------------------------------- MLS --------------------------------------
17499 
17500 instruct vmls4S(vecD dst, vecD src1, vecD src2)
17501 %{
17502   predicate(n->as_Vector()->length() == 2 ||
17503             n->as_Vector()->length() == 4);
17504   match(Set dst (SubVS dst (MulVS src1 src2)));
17505   ins_cost(INSN_COST);
17506   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
17507   ins_encode %{
17508     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
17509             as_FloatRegister($src1$$reg),
17510             as_FloatRegister($src2$$reg));
17511   %}
17512   ins_pipe(vmla64);
17513 %}
17514 
17515 instruct vmls8S(vecX dst, vecX src1, vecX src2)
17516 %{
17517   predicate(n->as_Vector()->length() == 8);
17518   match(Set dst (SubVS dst (MulVS src1 src2)));
17519   ins_cost(INSN_COST);
17520   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
17521   ins_encode %{
17522     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
17523             as_FloatRegister($src1$$reg),
17524             as_FloatRegister($src2$$reg));
17525   %}
17526   ins_pipe(vmla128);
17527 %}
17528 
17529 instruct vmls2I(vecD dst, vecD src1, vecD src2)
17530 %{
17531   predicate(n->as_Vector()->length() == 2);
17532   match(Set dst (SubVI dst (MulVI src1 src2)));
17533   ins_cost(INSN_COST);
17534   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
17535   ins_encode %{
17536     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
17537             as_FloatRegister($src1$$reg),
17538             as_FloatRegister($src2$$reg));
17539   %}
17540   ins_pipe(vmla64);
17541 %}
17542 
17543 instruct vmls4I(vecX dst, vecX src1, vecX src2)
17544 %{
17545   predicate(n->as_Vector()->length() == 4);
17546   match(Set dst (SubVI dst (MulVI src1 src2)));
17547   ins_cost(INSN_COST);
17548   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
17549   ins_encode %{
17550     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
17551             as_FloatRegister($src1$$reg),
17552             as_FloatRegister($src2$$reg));
17553   %}
17554   ins_pipe(vmla128);
17555 %}
17556 
17557 // dst - src1 * src2
17558 instruct vmls2F(vecD dst, vecD src1, vecD src2) %{
17559   predicate(UseFMA && n->as_Vector()->length() == 2);
17560   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
17561   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
17562   format %{ "fmls  $dst,$src1,$src2\t# vector (2S)" %}
17563   ins_cost(INSN_COST);
17564   ins_encode %{
17565     __ fmls(as_FloatRegister($dst$$reg), __ T2S,
17566             as_FloatRegister($src1$$reg),
17567             as_FloatRegister($src2$$reg));
17568   %}
17569   ins_pipe(vmuldiv_fp64);
17570 %}
17571 
17572 // dst - src1 * src2
17573 instruct vmls4F(vecX dst, vecX src1, vecX src2) %{
17574   predicate(UseFMA && n->as_Vector()->length() == 4);
17575   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
17576   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
17577   format %{ "fmls  $dst,$src1,$src2\t# vector (4S)" %}
17578   ins_cost(INSN_COST);
17579   ins_encode %{
17580     __ fmls(as_FloatRegister($dst$$reg), __ T4S,
17581             as_FloatRegister($src1$$reg),
17582             as_FloatRegister($src2$$reg));
17583   %}
17584   ins_pipe(vmuldiv_fp128);
17585 %}
17586 
17587 // dst - src1 * src2
17588 instruct vmls2D(vecX dst, vecX src1, vecX src2) %{
17589   predicate(UseFMA && n->as_Vector()->length() == 2);
17590   match(Set dst (FmaVD  dst (Binary (NegVD src1) src2)));
17591   match(Set dst (FmaVD  dst (Binary src1 (NegVD src2))));
17592   format %{ "fmls  $dst,$src1,$src2\t# vector (2D)" %}
17593   ins_cost(INSN_COST);
17594   ins_encode %{
17595     __ fmls(as_FloatRegister($dst$$reg), __ T2D,
17596             as_FloatRegister($src1$$reg),
17597             as_FloatRegister($src2$$reg));
17598   %}
17599   ins_pipe(vmuldiv_fp128);
17600 %}
17601 
17602 // --------------- Vector Multiply-Add Shorts into Integer --------------------
17603 
17604 instruct vmuladdS2I(vecX dst, vecX src1, vecX src2, vecX tmp) %{
17605   predicate(n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
17606   match(Set dst (MulAddVS2VI src1 src2));
17607   ins_cost(INSN_COST);
17608   effect(TEMP_DEF dst, TEMP tmp);
17609   format %{ "smullv  $tmp, $src1, $src2\t# vector (4H)\n\t"
17610             "smullv  $dst, $src1, $src2\t# vector (8H)\n\t"
17611             "addpv   $dst, $tmp, $dst\t# vector (4S)\n\t" %}
17612   ins_encode %{
17613     __ smullv(as_FloatRegister($tmp$$reg), __ T4H,
17614               as_FloatRegister($src1$$reg),
17615               as_FloatRegister($src2$$reg));
17616     __ smullv(as_FloatRegister($dst$$reg), __ T8H,
17617               as_FloatRegister($src1$$reg),
17618               as_FloatRegister($src2$$reg));
17619     __ addpv(as_FloatRegister($dst$$reg), __ T4S,
17620              as_FloatRegister($tmp$$reg),
17621              as_FloatRegister($dst$$reg));
17622   %}
17623   ins_pipe(vmuldiv_fp128);
17624 %}
17625 
17626 // --------------------------------- DIV --------------------------------------
17627 
17628 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
17629 %{
17630   predicate(n->as_Vector()->length() == 2);
17631   match(Set dst (DivVF src1 src2));
17632   ins_cost(INSN_COST);
17633   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
17634   ins_encode %{
17635     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
17636             as_FloatRegister($src1$$reg),
17637             as_FloatRegister($src2$$reg));
17638   %}
17639   ins_pipe(vmuldiv_fp64);
17640 %}
17641 
17642 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
17643 %{
17644   predicate(n->as_Vector()->length() == 4);
17645   match(Set dst (DivVF src1 src2));
17646   ins_cost(INSN_COST);
17647   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
17648   ins_encode %{
17649     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
17650             as_FloatRegister($src1$$reg),
17651             as_FloatRegister($src2$$reg));
17652   %}
17653   ins_pipe(vmuldiv_fp128);
17654 %}
17655 
17656 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
17657 %{
17658   predicate(n->as_Vector()->length() == 2);
17659   match(Set dst (DivVD src1 src2));
17660   ins_cost(INSN_COST);
17661   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
17662   ins_encode %{
17663     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
17664             as_FloatRegister($src1$$reg),
17665             as_FloatRegister($src2$$reg));
17666   %}
17667   ins_pipe(vmuldiv_fp128);
17668 %}
17669 
17670 // --------------------------------- SQRT -------------------------------------
17671 
17672 instruct vsqrt2F(vecD dst, vecD src)
17673 %{
17674   predicate(n->as_Vector()->length() == 2);
17675   match(Set dst (SqrtVF src));
17676   format %{ "fsqrt  $dst, $src\t# vector (2F)" %}
17677   ins_encode %{
17678     __ fsqrt(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($src$$reg));
17679   %}
17680   ins_pipe(vunop_fp64);
17681 %}
17682 
17683 instruct vsqrt4F(vecX dst, vecX src)
17684 %{
17685   predicate(n->as_Vector()->length() == 4);
17686   match(Set dst (SqrtVF src));
17687   format %{ "fsqrt  $dst, $src\t# vector (4F)" %}
17688   ins_encode %{
17689     __ fsqrt(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src$$reg));
17690   %}
17691   ins_pipe(vsqrt_fp128);
17692 %}
17693 
17694 instruct vsqrt2D(vecX dst, vecX src)
17695 %{
17696   predicate(n->as_Vector()->length() == 2);
17697   match(Set dst (SqrtVD src));
17698   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
17699   ins_encode %{
17700     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
17701              as_FloatRegister($src$$reg));
17702   %}
17703   ins_pipe(vsqrt_fp128);
17704 %}
17705 
17706 // --------------------------------- ABS --------------------------------------
17707 
17708 instruct vabs8B(vecD dst, vecD src)
17709 %{
17710   predicate(n->as_Vector()->length() == 4 ||
17711             n->as_Vector()->length() == 8);
17712   match(Set dst (AbsVB src));
17713   ins_cost(INSN_COST);
17714   format %{ "abs  $dst, $src\t# vector (8B)" %}
17715   ins_encode %{
17716     __ absr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($src$$reg));
17717   %}
17718   ins_pipe(vlogical64);
17719 %}
17720 
17721 instruct vabs16B(vecX dst, vecX src)
17722 %{
17723   predicate(n->as_Vector()->length() == 16);
17724   match(Set dst (AbsVB src));
17725   ins_cost(INSN_COST);
17726   format %{ "abs  $dst, $src\t# vector (16B)" %}
17727   ins_encode %{
17728     __ absr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($src$$reg));
17729   %}
17730   ins_pipe(vlogical128);
17731 %}
17732 
17733 instruct vabs4S(vecD dst, vecD src)
17734 %{
17735   predicate(n->as_Vector()->length() == 4);
17736   match(Set dst (AbsVS src));
17737   ins_cost(INSN_COST);
17738   format %{ "abs  $dst, $src\t# vector (4H)" %}
17739   ins_encode %{
17740     __ absr(as_FloatRegister($dst$$reg), __ T4H, as_FloatRegister($src$$reg));
17741   %}
17742   ins_pipe(vlogical64);
17743 %}
17744 
17745 instruct vabs8S(vecX dst, vecX src)
17746 %{
17747   predicate(n->as_Vector()->length() == 8);
17748   match(Set dst (AbsVS src));
17749   ins_cost(INSN_COST);
17750   format %{ "abs  $dst, $src\t# vector (8H)" %}
17751   ins_encode %{
17752     __ absr(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg));
17753   %}
17754   ins_pipe(vlogical128);
17755 %}
17756 
17757 instruct vabs2I(vecD dst, vecD src)
17758 %{
17759   predicate(n->as_Vector()->length() == 2);
17760   match(Set dst (AbsVI src));
17761   ins_cost(INSN_COST);
17762   format %{ "abs  $dst, $src\t# vector (2S)" %}
17763   ins_encode %{
17764     __ absr(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($src$$reg));
17765   %}
17766   ins_pipe(vlogical64);
17767 %}
17768 
17769 instruct vabs4I(vecX dst, vecX src)
17770 %{
17771   predicate(n->as_Vector()->length() == 4);
17772   match(Set dst (AbsVI src));
17773   ins_cost(INSN_COST);
17774   format %{ "abs  $dst, $src\t# vector (4S)" %}
17775   ins_encode %{
17776     __ absr(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src$$reg));
17777   %}
17778   ins_pipe(vlogical128);
17779 %}
17780 
17781 instruct vabs2L(vecX dst, vecX src)
17782 %{
17783   predicate(n->as_Vector()->length() == 2);
17784   match(Set dst (AbsVL src));
17785   ins_cost(INSN_COST);
17786   format %{ "abs  $dst, $src\t# vector (2D)" %}
17787   ins_encode %{
17788     __ absr(as_FloatRegister($dst$$reg), __ T2D, as_FloatRegister($src$$reg));
17789   %}
17790   ins_pipe(vlogical128);
17791 %}
17792 
17793 instruct vabs2F(vecD dst, vecD src)
17794 %{
17795   predicate(n->as_Vector()->length() == 2);
17796   match(Set dst (AbsVF src));
17797   ins_cost(INSN_COST * 3);
17798   format %{ "fabs  $dst,$src\t# vector (2S)" %}
17799   ins_encode %{
17800     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
17801             as_FloatRegister($src$$reg));
17802   %}
17803   ins_pipe(vunop_fp64);
17804 %}
17805 
17806 instruct vabs4F(vecX dst, vecX src)
17807 %{
17808   predicate(n->as_Vector()->length() == 4);
17809   match(Set dst (AbsVF src));
17810   ins_cost(INSN_COST * 3);
17811   format %{ "fabs  $dst,$src\t# vector (4S)" %}
17812   ins_encode %{
17813     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
17814             as_FloatRegister($src$$reg));
17815   %}
17816   ins_pipe(vunop_fp128);
17817 %}
17818 
17819 instruct vabs2D(vecX dst, vecX src)
17820 %{
17821   predicate(n->as_Vector()->length() == 2);
17822   match(Set dst (AbsVD src));
17823   ins_cost(INSN_COST * 3);
17824   format %{ "fabs  $dst,$src\t# vector (2D)" %}
17825   ins_encode %{
17826     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
17827             as_FloatRegister($src$$reg));
17828   %}
17829   ins_pipe(vunop_fp128);
17830 %}
17831 
17832 // --------------------------------- NEG --------------------------------------
17833 
17834 instruct vneg2F(vecD dst, vecD src)
17835 %{
17836   predicate(n->as_Vector()->length() == 2);
17837   match(Set dst (NegVF src));
17838   ins_cost(INSN_COST * 3);
17839   format %{ "fneg  $dst,$src\t# vector (2S)" %}
17840   ins_encode %{
17841     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
17842             as_FloatRegister($src$$reg));
17843   %}
17844   ins_pipe(vunop_fp64);
17845 %}
17846 
17847 instruct vneg4F(vecX dst, vecX src)
17848 %{
17849   predicate(n->as_Vector()->length() == 4);
17850   match(Set dst (NegVF src));
17851   ins_cost(INSN_COST * 3);
17852   format %{ "fneg  $dst,$src\t# vector (4S)" %}
17853   ins_encode %{
17854     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
17855             as_FloatRegister($src$$reg));
17856   %}
17857   ins_pipe(vunop_fp128);
17858 %}
17859 
17860 instruct vneg2D(vecX dst, vecX src)
17861 %{
17862   predicate(n->as_Vector()->length() == 2);
17863   match(Set dst (NegVD src));
17864   ins_cost(INSN_COST * 3);
17865   format %{ "fneg  $dst,$src\t# vector (2D)" %}
17866   ins_encode %{
17867     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
17868             as_FloatRegister($src$$reg));
17869   %}
17870   ins_pipe(vunop_fp128);
17871 %}
17872 
17873 // --------------------------------- AND --------------------------------------
17874 
17875 instruct vand8B(vecD dst, vecD src1, vecD src2)
17876 %{
17877   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17878             n->as_Vector()->length_in_bytes() == 8);
17879   match(Set dst (AndV src1 src2));
17880   ins_cost(INSN_COST);
17881   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
17882   ins_encode %{
17883     __ andr(as_FloatRegister($dst$$reg), __ T8B,
17884             as_FloatRegister($src1$$reg),
17885             as_FloatRegister($src2$$reg));
17886   %}
17887   ins_pipe(vlogical64);
17888 %}
17889 
17890 instruct vand16B(vecX dst, vecX src1, vecX src2)
17891 %{
17892   predicate(n->as_Vector()->length_in_bytes() == 16);
17893   match(Set dst (AndV src1 src2));
17894   ins_cost(INSN_COST);
17895   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
17896   ins_encode %{
17897     __ andr(as_FloatRegister($dst$$reg), __ T16B,
17898             as_FloatRegister($src1$$reg),
17899             as_FloatRegister($src2$$reg));
17900   %}
17901   ins_pipe(vlogical128);
17902 %}
17903 
17904 // --------------------------------- OR ---------------------------------------
17905 
17906 instruct vor8B(vecD dst, vecD src1, vecD src2)
17907 %{
17908   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17909             n->as_Vector()->length_in_bytes() == 8);
17910   match(Set dst (OrV src1 src2));
17911   ins_cost(INSN_COST);
17912   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
17913   ins_encode %{
17914     __ orr(as_FloatRegister($dst$$reg), __ T8B,
17915             as_FloatRegister($src1$$reg),
17916             as_FloatRegister($src2$$reg));
17917   %}
17918   ins_pipe(vlogical64);
17919 %}
17920 
17921 instruct vor16B(vecX dst, vecX src1, vecX src2)
17922 %{
17923   predicate(n->as_Vector()->length_in_bytes() == 16);
17924   match(Set dst (OrV src1 src2));
17925   ins_cost(INSN_COST);
17926   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
17927   ins_encode %{
17928     __ orr(as_FloatRegister($dst$$reg), __ T16B,
17929             as_FloatRegister($src1$$reg),
17930             as_FloatRegister($src2$$reg));
17931   %}
17932   ins_pipe(vlogical128);
17933 %}
17934 
17935 // --------------------------------- XOR --------------------------------------
17936 
17937 instruct vxor8B(vecD dst, vecD src1, vecD src2)
17938 %{
17939   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17940             n->as_Vector()->length_in_bytes() == 8);
17941   match(Set dst (XorV src1 src2));
17942   ins_cost(INSN_COST);
17943   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
17944   ins_encode %{
17945     __ eor(as_FloatRegister($dst$$reg), __ T8B,
17946             as_FloatRegister($src1$$reg),
17947             as_FloatRegister($src2$$reg));
17948   %}
17949   ins_pipe(vlogical64);
17950 %}
17951 
17952 instruct vxor16B(vecX dst, vecX src1, vecX src2)
17953 %{
17954   predicate(n->as_Vector()->length_in_bytes() == 16);
17955   match(Set dst (XorV src1 src2));
17956   ins_cost(INSN_COST);
17957   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
17958   ins_encode %{
17959     __ eor(as_FloatRegister($dst$$reg), __ T16B,
17960             as_FloatRegister($src1$$reg),
17961             as_FloatRegister($src2$$reg));
17962   %}
17963   ins_pipe(vlogical128);
17964 %}
17965 
17966 // ------------------------------ Shift ---------------------------------------
17967 instruct vshiftcnt8B(vecD dst, iRegIorL2I cnt) %{
17968   predicate(n->as_Vector()->length_in_bytes() == 8);
17969   match(Set dst (LShiftCntV cnt));
17970   match(Set dst (RShiftCntV cnt));
17971   format %{ "dup  $dst, $cnt\t# shift count vector (8B)" %}
17972   ins_encode %{
17973     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($cnt$$reg));
17974   %}
17975   ins_pipe(vdup_reg_reg64);
17976 %}
17977 
17978 instruct vshiftcnt16B(vecX dst, iRegIorL2I cnt) %{
17979   predicate(n->as_Vector()->length_in_bytes() == 16);
17980   match(Set dst (LShiftCntV cnt));
17981   match(Set dst (RShiftCntV cnt));
17982   format %{ "dup  $dst, $cnt\t# shift count vector (16B)" %}
17983   ins_encode %{
17984     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
17985   %}
17986   ins_pipe(vdup_reg_reg128);
17987 %}
17988 
17989 instruct vsll8B(vecD dst, vecD src, vecD shift) %{
17990   predicate(n->as_Vector()->length() == 4 ||
17991             n->as_Vector()->length() == 8);
17992   match(Set dst (LShiftVB src shift));
17993   ins_cost(INSN_COST);
17994   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
17995   ins_encode %{
17996     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
17997             as_FloatRegister($src$$reg),
17998             as_FloatRegister($shift$$reg));
17999   %}
18000   ins_pipe(vshift64);
18001 %}
18002 
18003 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
18004   predicate(n->as_Vector()->length() == 16);
18005   match(Set dst (LShiftVB src shift));
18006   ins_cost(INSN_COST);
18007   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
18008   ins_encode %{
18009     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
18010             as_FloatRegister($src$$reg),
18011             as_FloatRegister($shift$$reg));
18012   %}
18013   ins_pipe(vshift128);
18014 %}
18015 
18016 // Right shifts with vector shift count on aarch64 SIMD are implemented
18017 // as left shift by negative shift count.
18018 // There are two cases for vector shift count.
18019 //
18020 // Case 1: The vector shift count is from replication.
18021 //        |            |
18022 //    LoadVector  RShiftCntV
18023 //        |       /
18024 //     RShiftVI
18025 // Note: In inner loop, multiple neg instructions are used, which can be
18026 // moved to outer loop and merge into one neg instruction.
18027 //
18028 // Case 2: The vector shift count is from loading.
18029 // This case isn't supported by middle-end now. But it's supported by
18030 // panama/vectorIntrinsics(JEP 338: Vector API).
18031 //        |            |
18032 //    LoadVector  LoadVector
18033 //        |       /
18034 //     RShiftVI
18035 //
18036 
18037 instruct vsra8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
18038   predicate(n->as_Vector()->length() == 4 ||
18039             n->as_Vector()->length() == 8);
18040   match(Set dst (RShiftVB src shift));
18041   ins_cost(INSN_COST);
18042   effect(TEMP tmp);
18043   format %{ "negr  $tmp,$shift\t"
18044             "sshl  $dst,$src,$tmp\t# vector (8B)" %}
18045   ins_encode %{
18046     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
18047             as_FloatRegister($shift$$reg));
18048     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
18049             as_FloatRegister($src$$reg),
18050             as_FloatRegister($tmp$$reg));
18051   %}
18052   ins_pipe(vshift64);
18053 %}
18054 
18055 instruct vsra16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
18056   predicate(n->as_Vector()->length() == 16);
18057   match(Set dst (RShiftVB src shift));
18058   ins_cost(INSN_COST);
18059   effect(TEMP tmp);
18060   format %{ "negr  $tmp,$shift\t"
18061             "sshl  $dst,$src,$tmp\t# vector (16B)" %}
18062   ins_encode %{
18063     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
18064             as_FloatRegister($shift$$reg));
18065     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
18066             as_FloatRegister($src$$reg),
18067             as_FloatRegister($tmp$$reg));
18068   %}
18069   ins_pipe(vshift128);
18070 %}
18071 
18072 instruct vsrl8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
18073   predicate(n->as_Vector()->length() == 4 ||
18074             n->as_Vector()->length() == 8);
18075   match(Set dst (URShiftVB src shift));
18076   ins_cost(INSN_COST);
18077   effect(TEMP tmp);
18078   format %{ "negr  $tmp,$shift\t"
18079             "ushl  $dst,$src,$tmp\t# vector (8B)" %}
18080   ins_encode %{
18081     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
18082             as_FloatRegister($shift$$reg));
18083     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
18084             as_FloatRegister($src$$reg),
18085             as_FloatRegister($tmp$$reg));
18086   %}
18087   ins_pipe(vshift64);
18088 %}
18089 
18090 instruct vsrl16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
18091   predicate(n->as_Vector()->length() == 16);
18092   match(Set dst (URShiftVB src shift));
18093   ins_cost(INSN_COST);
18094   effect(TEMP tmp);
18095   format %{ "negr  $tmp,$shift\t"
18096             "ushl  $dst,$src,$tmp\t# vector (16B)" %}
18097   ins_encode %{
18098     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
18099             as_FloatRegister($shift$$reg));
18100     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
18101             as_FloatRegister($src$$reg),
18102             as_FloatRegister($tmp$$reg));
18103   %}
18104   ins_pipe(vshift128);
18105 %}
18106 
18107 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
18108   predicate(n->as_Vector()->length() == 4 ||
18109             n->as_Vector()->length() == 8);
18110   match(Set dst (LShiftVB src (LShiftCntV shift)));
18111   ins_cost(INSN_COST);
18112   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
18113   ins_encode %{
18114     int sh = (int)$shift$$constant;
18115     if (sh >= 8) {
18116       __ eor(as_FloatRegister($dst$$reg), __ T8B,
18117              as_FloatRegister($src$$reg),
18118              as_FloatRegister($src$$reg));
18119     } else {
18120       __ shl(as_FloatRegister($dst$$reg), __ T8B,
18121              as_FloatRegister($src$$reg), sh);
18122     }
18123   %}
18124   ins_pipe(vshift64_imm);
18125 %}
18126 
18127 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
18128   predicate(n->as_Vector()->length() == 16);
18129   match(Set dst (LShiftVB src (LShiftCntV shift)));
18130   ins_cost(INSN_COST);
18131   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
18132   ins_encode %{
18133     int sh = (int)$shift$$constant;
18134     if (sh >= 8) {
18135       __ eor(as_FloatRegister($dst$$reg), __ T16B,
18136              as_FloatRegister($src$$reg),
18137              as_FloatRegister($src$$reg));
18138     } else {
18139       __ shl(as_FloatRegister($dst$$reg), __ T16B,
18140              as_FloatRegister($src$$reg), sh);
18141     }
18142   %}
18143   ins_pipe(vshift128_imm);
18144 %}
18145 
18146 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
18147   predicate(n->as_Vector()->length() == 4 ||
18148             n->as_Vector()->length() == 8);
18149   match(Set dst (RShiftVB src (RShiftCntV shift)));
18150   ins_cost(INSN_COST);
18151   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
18152   ins_encode %{
18153     int sh = (int)$shift$$constant;
18154     if (sh >= 8) sh = 7;
18155     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
18156            as_FloatRegister($src$$reg), sh);
18157   %}
18158   ins_pipe(vshift64_imm);
18159 %}
18160 
18161 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
18162   predicate(n->as_Vector()->length() == 16);
18163   match(Set dst (RShiftVB src (RShiftCntV shift)));
18164   ins_cost(INSN_COST);
18165   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
18166   ins_encode %{
18167     int sh = (int)$shift$$constant;
18168     if (sh >= 8) sh = 7;
18169     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
18170            as_FloatRegister($src$$reg), sh);
18171   %}
18172   ins_pipe(vshift128_imm);
18173 %}
18174 
18175 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
18176   predicate(n->as_Vector()->length() == 4 ||
18177             n->as_Vector()->length() == 8);
18178   match(Set dst (URShiftVB src (RShiftCntV shift)));
18179   ins_cost(INSN_COST);
18180   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
18181   ins_encode %{
18182     int sh = (int)$shift$$constant;
18183     if (sh >= 8) {
18184       __ eor(as_FloatRegister($dst$$reg), __ T8B,
18185              as_FloatRegister($src$$reg),
18186              as_FloatRegister($src$$reg));
18187     } else {
18188       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
18189              as_FloatRegister($src$$reg), sh);
18190     }
18191   %}
18192   ins_pipe(vshift64_imm);
18193 %}
18194 
18195 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
18196   predicate(n->as_Vector()->length() == 16);
18197   match(Set dst (URShiftVB src (RShiftCntV shift)));
18198   ins_cost(INSN_COST);
18199   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
18200   ins_encode %{
18201     int sh = (int)$shift$$constant;
18202     if (sh >= 8) {
18203       __ eor(as_FloatRegister($dst$$reg), __ T16B,
18204              as_FloatRegister($src$$reg),
18205              as_FloatRegister($src$$reg));
18206     } else {
18207       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
18208              as_FloatRegister($src$$reg), sh);
18209     }
18210   %}
18211   ins_pipe(vshift128_imm);
18212 %}
18213 
18214 instruct vsll4S(vecD dst, vecD src, vecD shift) %{
18215   predicate(n->as_Vector()->length() == 2 ||
18216             n->as_Vector()->length() == 4);
18217   match(Set dst (LShiftVS src shift));
18218   ins_cost(INSN_COST);
18219   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
18220   ins_encode %{
18221     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
18222             as_FloatRegister($src$$reg),
18223             as_FloatRegister($shift$$reg));
18224   %}
18225   ins_pipe(vshift64);
18226 %}
18227 
18228 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
18229   predicate(n->as_Vector()->length() == 8);
18230   match(Set dst (LShiftVS src shift));
18231   ins_cost(INSN_COST);
18232   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
18233   ins_encode %{
18234     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
18235             as_FloatRegister($src$$reg),
18236             as_FloatRegister($shift$$reg));
18237   %}
18238   ins_pipe(vshift128);
18239 %}
18240 
18241 instruct vsra4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
18242   predicate(n->as_Vector()->length() == 2 ||
18243             n->as_Vector()->length() == 4);
18244   match(Set dst (RShiftVS src shift));
18245   ins_cost(INSN_COST);
18246   effect(TEMP tmp);
18247   format %{ "negr  $tmp,$shift\t"
18248             "sshl  $dst,$src,$tmp\t# vector (4H)" %}
18249   ins_encode %{
18250     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
18251             as_FloatRegister($shift$$reg));
18252     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
18253             as_FloatRegister($src$$reg),
18254             as_FloatRegister($tmp$$reg));
18255   %}
18256   ins_pipe(vshift64);
18257 %}
18258 
18259 instruct vsra8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
18260   predicate(n->as_Vector()->length() == 8);
18261   match(Set dst (RShiftVS src shift));
18262   ins_cost(INSN_COST);
18263   effect(TEMP tmp);
18264   format %{ "negr  $tmp,$shift\t"
18265             "sshl  $dst,$src,$tmp\t# vector (8H)" %}
18266   ins_encode %{
18267     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
18268             as_FloatRegister($shift$$reg));
18269     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
18270             as_FloatRegister($src$$reg),
18271             as_FloatRegister($tmp$$reg));
18272   %}
18273   ins_pipe(vshift128);
18274 %}
18275 
18276 instruct vsrl4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
18277   predicate(n->as_Vector()->length() == 2 ||
18278             n->as_Vector()->length() == 4);
18279   match(Set dst (URShiftVS src shift));
18280   ins_cost(INSN_COST);
18281   effect(TEMP tmp);
18282   format %{ "negr  $tmp,$shift\t"
18283             "ushl  $dst,$src,$tmp\t# vector (4H)" %}
18284   ins_encode %{
18285     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
18286             as_FloatRegister($shift$$reg));
18287     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
18288             as_FloatRegister($src$$reg),
18289             as_FloatRegister($tmp$$reg));
18290   %}
18291   ins_pipe(vshift64);
18292 %}
18293 
18294 instruct vsrl8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
18295   predicate(n->as_Vector()->length() == 8);
18296   match(Set dst (URShiftVS src shift));
18297   ins_cost(INSN_COST);
18298   effect(TEMP tmp);
18299   format %{ "negr  $tmp,$shift\t"
18300             "ushl  $dst,$src,$tmp\t# vector (8H)" %}
18301   ins_encode %{
18302     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
18303             as_FloatRegister($shift$$reg));
18304     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
18305             as_FloatRegister($src$$reg),
18306             as_FloatRegister($tmp$$reg));
18307   %}
18308   ins_pipe(vshift128);
18309 %}
18310 
18311 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
18312   predicate(n->as_Vector()->length() == 2 ||
18313             n->as_Vector()->length() == 4);
18314   match(Set dst (LShiftVS src (LShiftCntV shift)));
18315   ins_cost(INSN_COST);
18316   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
18317   ins_encode %{
18318     int sh = (int)$shift$$constant;
18319     if (sh >= 16) {
18320       __ eor(as_FloatRegister($dst$$reg), __ T8B,
18321              as_FloatRegister($src$$reg),
18322              as_FloatRegister($src$$reg));
18323     } else {
18324       __ shl(as_FloatRegister($dst$$reg), __ T4H,
18325              as_FloatRegister($src$$reg), sh);
18326     }
18327   %}
18328   ins_pipe(vshift64_imm);
18329 %}
18330 
18331 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
18332   predicate(n->as_Vector()->length() == 8);
18333   match(Set dst (LShiftVS src (LShiftCntV shift)));
18334   ins_cost(INSN_COST);
18335   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
18336   ins_encode %{
18337     int sh = (int)$shift$$constant;
18338     if (sh >= 16) {
18339       __ eor(as_FloatRegister($dst$$reg), __ T16B,
18340              as_FloatRegister($src$$reg),
18341              as_FloatRegister($src$$reg));
18342     } else {
18343       __ shl(as_FloatRegister($dst$$reg), __ T8H,
18344              as_FloatRegister($src$$reg), sh);
18345     }
18346   %}
18347   ins_pipe(vshift128_imm);
18348 %}
18349 
18350 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
18351   predicate(n->as_Vector()->length() == 2 ||
18352             n->as_Vector()->length() == 4);
18353   match(Set dst (RShiftVS src (RShiftCntV shift)));
18354   ins_cost(INSN_COST);
18355   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
18356   ins_encode %{
18357     int sh = (int)$shift$$constant;
18358     if (sh >= 16) sh = 15;
18359     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
18360            as_FloatRegister($src$$reg), sh);
18361   %}
18362   ins_pipe(vshift64_imm);
18363 %}
18364 
18365 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
18366   predicate(n->as_Vector()->length() == 8);
18367   match(Set dst (RShiftVS src (RShiftCntV shift)));
18368   ins_cost(INSN_COST);
18369   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
18370   ins_encode %{
18371     int sh = (int)$shift$$constant;
18372     if (sh >= 16) sh = 15;
18373     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
18374            as_FloatRegister($src$$reg), sh);
18375   %}
18376   ins_pipe(vshift128_imm);
18377 %}
18378 
18379 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
18380   predicate(n->as_Vector()->length() == 2 ||
18381             n->as_Vector()->length() == 4);
18382   match(Set dst (URShiftVS src (RShiftCntV shift)));
18383   ins_cost(INSN_COST);
18384   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
18385   ins_encode %{
18386     int sh = (int)$shift$$constant;
18387     if (sh >= 16) {
18388       __ eor(as_FloatRegister($dst$$reg), __ T8B,
18389              as_FloatRegister($src$$reg),
18390              as_FloatRegister($src$$reg));
18391     } else {
18392       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
18393              as_FloatRegister($src$$reg), sh);
18394     }
18395   %}
18396   ins_pipe(vshift64_imm);
18397 %}
18398 
18399 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
18400   predicate(n->as_Vector()->length() == 8);
18401   match(Set dst (URShiftVS src (RShiftCntV shift)));
18402   ins_cost(INSN_COST);
18403   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
18404   ins_encode %{
18405     int sh = (int)$shift$$constant;
18406     if (sh >= 16) {
18407       __ eor(as_FloatRegister($dst$$reg), __ T16B,
18408              as_FloatRegister($src$$reg),
18409              as_FloatRegister($src$$reg));
18410     } else {
18411       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
18412              as_FloatRegister($src$$reg), sh);
18413     }
18414   %}
18415   ins_pipe(vshift128_imm);
18416 %}
18417 
18418 instruct vsll2I(vecD dst, vecD src, vecD shift) %{
18419   predicate(n->as_Vector()->length() == 2);
18420   match(Set dst (LShiftVI src shift));
18421   ins_cost(INSN_COST);
18422   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
18423   ins_encode %{
18424     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
18425             as_FloatRegister($src$$reg),
18426             as_FloatRegister($shift$$reg));
18427   %}
18428   ins_pipe(vshift64);
18429 %}
18430 
18431 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
18432   predicate(n->as_Vector()->length() == 4);
18433   match(Set dst (LShiftVI src shift));
18434   ins_cost(INSN_COST);
18435   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
18436   ins_encode %{
18437     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
18438             as_FloatRegister($src$$reg),
18439             as_FloatRegister($shift$$reg));
18440   %}
18441   ins_pipe(vshift128);
18442 %}
18443 
18444 instruct vsra2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
18445   predicate(n->as_Vector()->length() == 2);
18446   match(Set dst (RShiftVI src shift));
18447   ins_cost(INSN_COST);
18448   effect(TEMP tmp);
18449   format %{ "negr  $tmp,$shift\t"
18450             "sshl  $dst,$src,$tmp\t# vector (2S)" %}
18451   ins_encode %{
18452     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
18453             as_FloatRegister($shift$$reg));
18454     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
18455             as_FloatRegister($src$$reg),
18456             as_FloatRegister($tmp$$reg));
18457   %}
18458   ins_pipe(vshift64);
18459 %}
18460 
18461 instruct vsra4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
18462   predicate(n->as_Vector()->length() == 4);
18463   match(Set dst (RShiftVI src shift));
18464   ins_cost(INSN_COST);
18465   effect(TEMP tmp);
18466   format %{ "negr  $tmp,$shift\t"
18467             "sshl  $dst,$src,$tmp\t# vector (4S)" %}
18468   ins_encode %{
18469     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
18470             as_FloatRegister($shift$$reg));
18471     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
18472             as_FloatRegister($src$$reg),
18473             as_FloatRegister($tmp$$reg));
18474   %}
18475   ins_pipe(vshift128);
18476 %}
18477 
18478 instruct vsrl2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
18479   predicate(n->as_Vector()->length() == 2);
18480   match(Set dst (URShiftVI src shift));
18481   ins_cost(INSN_COST);
18482   effect(TEMP tmp);
18483   format %{ "negr  $tmp,$shift\t"
18484             "ushl  $dst,$src,$tmp\t# vector (2S)" %}
18485   ins_encode %{
18486     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
18487             as_FloatRegister($shift$$reg));
18488     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
18489             as_FloatRegister($src$$reg),
18490             as_FloatRegister($tmp$$reg));
18491   %}
18492   ins_pipe(vshift64);
18493 %}
18494 
18495 instruct vsrl4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
18496   predicate(n->as_Vector()->length() == 4);
18497   match(Set dst (URShiftVI src shift));
18498   ins_cost(INSN_COST);
18499   effect(TEMP tmp);
18500   format %{ "negr  $tmp,$shift\t"
18501             "ushl  $dst,$src,$tmp\t# vector (4S)" %}
18502   ins_encode %{
18503     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
18504             as_FloatRegister($shift$$reg));
18505     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
18506             as_FloatRegister($src$$reg),
18507             as_FloatRegister($tmp$$reg));
18508   %}
18509   ins_pipe(vshift128);
18510 %}
18511 
18512 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
18513   predicate(n->as_Vector()->length() == 2);
18514   match(Set dst (LShiftVI src (LShiftCntV shift)));
18515   ins_cost(INSN_COST);
18516   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
18517   ins_encode %{
18518     __ shl(as_FloatRegister($dst$$reg), __ T2S,
18519            as_FloatRegister($src$$reg),
18520            (int)$shift$$constant);
18521   %}
18522   ins_pipe(vshift64_imm);
18523 %}
18524 
18525 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
18526   predicate(n->as_Vector()->length() == 4);
18527   match(Set dst (LShiftVI src (LShiftCntV shift)));
18528   ins_cost(INSN_COST);
18529   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
18530   ins_encode %{
18531     __ shl(as_FloatRegister($dst$$reg), __ T4S,
18532            as_FloatRegister($src$$reg),
18533            (int)$shift$$constant);
18534   %}
18535   ins_pipe(vshift128_imm);
18536 %}
18537 
18538 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
18539   predicate(n->as_Vector()->length() == 2);
18540   match(Set dst (RShiftVI src (RShiftCntV shift)));
18541   ins_cost(INSN_COST);
18542   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
18543   ins_encode %{
18544     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
18545             as_FloatRegister($src$$reg),
18546             (int)$shift$$constant);
18547   %}
18548   ins_pipe(vshift64_imm);
18549 %}
18550 
18551 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
18552   predicate(n->as_Vector()->length() == 4);
18553   match(Set dst (RShiftVI src (RShiftCntV shift)));
18554   ins_cost(INSN_COST);
18555   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
18556   ins_encode %{
18557     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
18558             as_FloatRegister($src$$reg),
18559             (int)$shift$$constant);
18560   %}
18561   ins_pipe(vshift128_imm);
18562 %}
18563 
18564 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
18565   predicate(n->as_Vector()->length() == 2);
18566   match(Set dst (URShiftVI src (RShiftCntV shift)));
18567   ins_cost(INSN_COST);
18568   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
18569   ins_encode %{
18570     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
18571             as_FloatRegister($src$$reg),
18572             (int)$shift$$constant);
18573   %}
18574   ins_pipe(vshift64_imm);
18575 %}
18576 
18577 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
18578   predicate(n->as_Vector()->length() == 4);
18579   match(Set dst (URShiftVI src (RShiftCntV shift)));
18580   ins_cost(INSN_COST);
18581   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
18582   ins_encode %{
18583     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
18584             as_FloatRegister($src$$reg),
18585             (int)$shift$$constant);
18586   %}
18587   ins_pipe(vshift128_imm);
18588 %}
18589 
18590 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
18591   predicate(n->as_Vector()->length() == 2);
18592   match(Set dst (LShiftVL src shift));
18593   ins_cost(INSN_COST);
18594   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
18595   ins_encode %{
18596     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
18597             as_FloatRegister($src$$reg),
18598             as_FloatRegister($shift$$reg));
18599   %}
18600   ins_pipe(vshift128);
18601 %}
18602 
18603 instruct vsra2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
18604   predicate(n->as_Vector()->length() == 2);
18605   match(Set dst (RShiftVL src shift));
18606   ins_cost(INSN_COST);
18607   effect(TEMP tmp);
18608   format %{ "negr  $tmp,$shift\t"
18609             "sshl  $dst,$src,$tmp\t# vector (2D)" %}
18610   ins_encode %{
18611     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
18612             as_FloatRegister($shift$$reg));
18613     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
18614             as_FloatRegister($src$$reg),
18615             as_FloatRegister($tmp$$reg));
18616   %}
18617   ins_pipe(vshift128);
18618 %}
18619 
18620 instruct vsrl2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
18621   predicate(n->as_Vector()->length() == 2);
18622   match(Set dst (URShiftVL src shift));
18623   ins_cost(INSN_COST);
18624   effect(TEMP tmp);
18625   format %{ "negr  $tmp,$shift\t"
18626             "ushl  $dst,$src,$tmp\t# vector (2D)" %}
18627   ins_encode %{
18628     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
18629             as_FloatRegister($shift$$reg));
18630     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
18631             as_FloatRegister($src$$reg),
18632             as_FloatRegister($tmp$$reg));
18633   %}
18634   ins_pipe(vshift128);
18635 %}
18636 
18637 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
18638   predicate(n->as_Vector()->length() == 2);
18639   match(Set dst (LShiftVL src (LShiftCntV shift)));
18640   ins_cost(INSN_COST);
18641   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
18642   ins_encode %{
18643     __ shl(as_FloatRegister($dst$$reg), __ T2D,
18644            as_FloatRegister($src$$reg),
18645            (int)$shift$$constant);
18646   %}
18647   ins_pipe(vshift128_imm);
18648 %}
18649 
18650 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
18651   predicate(n->as_Vector()->length() == 2);
18652   match(Set dst (RShiftVL src (RShiftCntV shift)));
18653   ins_cost(INSN_COST);
18654   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
18655   ins_encode %{
18656     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
18657             as_FloatRegister($src$$reg),
18658             (int)$shift$$constant);
18659   %}
18660   ins_pipe(vshift128_imm);
18661 %}
18662 
18663 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
18664   predicate(n->as_Vector()->length() == 2);
18665   match(Set dst (URShiftVL src (RShiftCntV shift)));
18666   ins_cost(INSN_COST);
18667   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
18668   ins_encode %{
18669     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
18670             as_FloatRegister($src$$reg),
18671             (int)$shift$$constant);
18672   %}
18673   ins_pipe(vshift128_imm);
18674 %}
18675 
18676 instruct vmax2F(vecD dst, vecD src1, vecD src2)
18677 %{
18678   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18679   match(Set dst (MaxV src1 src2));
18680   ins_cost(INSN_COST);
18681   format %{ "fmax  $dst,$src1,$src2\t# vector (2F)" %}
18682   ins_encode %{
18683     __ fmax(as_FloatRegister($dst$$reg), __ T2S,
18684             as_FloatRegister($src1$$reg),
18685             as_FloatRegister($src2$$reg));
18686   %}
18687   ins_pipe(vdop_fp64);
18688 %}
18689 
18690 instruct vmax4F(vecX dst, vecX src1, vecX src2)
18691 %{
18692   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18693   match(Set dst (MaxV src1 src2));
18694   ins_cost(INSN_COST);
18695   format %{ "fmax  $dst,$src1,$src2\t# vector (4S)" %}
18696   ins_encode %{
18697     __ fmax(as_FloatRegister($dst$$reg), __ T4S,
18698             as_FloatRegister($src1$$reg),
18699             as_FloatRegister($src2$$reg));
18700   %}
18701   ins_pipe(vdop_fp128);
18702 %}
18703 
18704 instruct vmax2D(vecX dst, vecX src1, vecX src2)
18705 %{
18706   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
18707   match(Set dst (MaxV src1 src2));
18708   ins_cost(INSN_COST);
18709   format %{ "fmax  $dst,$src1,$src2\t# vector (2D)" %}
18710   ins_encode %{
18711     __ fmax(as_FloatRegister($dst$$reg), __ T2D,
18712             as_FloatRegister($src1$$reg),
18713             as_FloatRegister($src2$$reg));
18714   %}
18715   ins_pipe(vdop_fp128);
18716 %}
18717 
18718 instruct vmin2F(vecD dst, vecD src1, vecD src2)
18719 %{
18720   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18721   match(Set dst (MinV src1 src2));
18722   ins_cost(INSN_COST);
18723   format %{ "fmin  $dst,$src1,$src2\t# vector (2F)" %}
18724   ins_encode %{
18725     __ fmin(as_FloatRegister($dst$$reg), __ T2S,
18726             as_FloatRegister($src1$$reg),
18727             as_FloatRegister($src2$$reg));
18728   %}
18729   ins_pipe(vdop_fp64);
18730 %}
18731 
18732 instruct vmin4F(vecX dst, vecX src1, vecX src2)
18733 %{
18734   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18735   match(Set dst (MinV src1 src2));
18736   ins_cost(INSN_COST);
18737   format %{ "fmin  $dst,$src1,$src2\t# vector (4S)" %}
18738   ins_encode %{
18739     __ fmin(as_FloatRegister($dst$$reg), __ T4S,
18740             as_FloatRegister($src1$$reg),
18741             as_FloatRegister($src2$$reg));
18742   %}
18743   ins_pipe(vdop_fp128);
18744 %}
18745 
18746 instruct vmin2D(vecX dst, vecX src1, vecX src2)
18747 %{
18748   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
18749   match(Set dst (MinV src1 src2));
18750   ins_cost(INSN_COST);
18751   format %{ "fmin  $dst,$src1,$src2\t# vector (2D)" %}
18752   ins_encode %{
18753     __ fmin(as_FloatRegister($dst$$reg), __ T2D,
18754             as_FloatRegister($src1$$reg),
18755             as_FloatRegister($src2$$reg));
18756   %}
18757   ins_pipe(vdop_fp128);
18758 %}
18759 
18760 instruct vround2D_reg(vecX dst, vecX src, immI rmode) %{
18761   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
18762   match(Set dst (RoundDoubleModeV src rmode));
18763   format %{ "frint  $dst, $src, $rmode" %}
18764   ins_encode %{
18765     switch ($rmode$$constant) {
18766       case RoundDoubleModeNode::rmode_rint:
18767         __ frintn(as_FloatRegister($dst$$reg), __ T2D,
18768                   as_FloatRegister($src$$reg));
18769         break;
18770       case RoundDoubleModeNode::rmode_floor:
18771         __ frintm(as_FloatRegister($dst$$reg), __ T2D,
18772                   as_FloatRegister($src$$reg));
18773         break;
18774       case RoundDoubleModeNode::rmode_ceil:
18775         __ frintp(as_FloatRegister($dst$$reg), __ T2D,
18776                   as_FloatRegister($src$$reg));
18777         break;
18778     }
18779   %}
18780   ins_pipe(vdop_fp128);
18781 %}
18782 
18783 instruct vpopcount4I(vecX dst, vecX src) %{
18784   predicate(UsePopCountInstruction && n->as_Vector()->length() == 4);
18785   match(Set dst (PopCountVI src));
18786   format %{
18787     "cnt     $dst, $src\t# vector (16B)\n\t"
18788     "uaddlp  $dst, $dst\t# vector (16B)\n\t"
18789     "uaddlp  $dst, $dst\t# vector (8H)"
18790   %}
18791   ins_encode %{
18792      __ cnt(as_FloatRegister($dst$$reg), __ T16B,
18793             as_FloatRegister($src$$reg));
18794      __ uaddlp(as_FloatRegister($dst$$reg), __ T16B,
18795                as_FloatRegister($dst$$reg));
18796      __ uaddlp(as_FloatRegister($dst$$reg), __ T8H,
18797                as_FloatRegister($dst$$reg));
18798   %}
18799   ins_pipe(pipe_class_default);
18800 %}
18801 
18802 instruct vpopcount2I(vecD dst, vecD src) %{
18803   predicate(UsePopCountInstruction && n->as_Vector()->length() == 2);
18804   match(Set dst (PopCountVI src));
18805   format %{
18806     "cnt     $dst, $src\t# vector (8B)\n\t"
18807     "uaddlp  $dst, $dst\t# vector (8B)\n\t"
18808     "uaddlp  $dst, $dst\t# vector (4H)"
18809   %}
18810   ins_encode %{
18811      __ cnt(as_FloatRegister($dst$$reg), __ T8B,
18812             as_FloatRegister($src$$reg));
18813      __ uaddlp(as_FloatRegister($dst$$reg), __ T8B,
18814                as_FloatRegister($dst$$reg));
18815      __ uaddlp(as_FloatRegister($dst$$reg), __ T4H,
18816                as_FloatRegister($dst$$reg));
18817   %}
18818   ins_pipe(pipe_class_default);
18819 %}
18820 
18821 //----------PEEPHOLE RULES-----------------------------------------------------
18822 // These must follow all instruction definitions as they use the names
18823 // defined in the instructions definitions.
18824 //
18825 // peepmatch ( root_instr_name [preceding_instruction]* );
18826 //
18827 // peepconstraint %{
18828 // (instruction_number.operand_name relational_op instruction_number.operand_name
18829 //  [, ...] );
18830 // // instruction numbers are zero-based using left to right order in peepmatch
18831 //
18832 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
18833 // // provide an instruction_number.operand_name for each operand that appears
18834 // // in the replacement instruction's match rule
18835 //
18836 // ---------VM FLAGS---------------------------------------------------------
18837 //
18838 // All peephole optimizations can be turned off using -XX:-OptoPeephole
18839 //
18840 // Each peephole rule is given an identifying number starting with zero and
18841 // increasing by one in the order seen by the parser.  An individual peephole
18842 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
18843 // on the command-line.
18844 //
18845 // ---------CURRENT LIMITATIONS----------------------------------------------
18846 //
18847 // Only match adjacent instructions in same basic block
18848 // Only equality constraints
18849 // Only constraints between operands, not (0.dest_reg == RAX_enc)
18850 // Only one replacement instruction
18851 //
18852 // ---------EXAMPLE----------------------------------------------------------
18853 //
18854 // // pertinent parts of existing instructions in architecture description
18855 // instruct movI(iRegINoSp dst, iRegI src)
18856 // %{
18857 //   match(Set dst (CopyI src));
18858 // %}
18859 //
18860 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
18861 // %{
18862 //   match(Set dst (AddI dst src));
18863 //   effect(KILL cr);
18864 // %}
18865 //
18866 // // Change (inc mov) to lea
18867 // peephole %{
18868 //   // increment preceeded by register-register move
18869 //   peepmatch ( incI_iReg movI );
18870 //   // require that the destination register of the increment
18871 //   // match the destination register of the move
18872 //   peepconstraint ( 0.dst == 1.dst );
18873 //   // construct a replacement instruction that sets
18874 //   // the destination to ( move's source register + one )
18875 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
18876 // %}
18877 //
18878 
18879 // Implementation no longer uses movX instructions since
18880 // machine-independent system no longer uses CopyX nodes.
18881 //
18882 // peephole
18883 // %{
18884 //   peepmatch (incI_iReg movI);
18885 //   peepconstraint (0.dst == 1.dst);
18886 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18887 // %}
18888 
18889 // peephole
18890 // %{
18891 //   peepmatch (decI_iReg movI);
18892 //   peepconstraint (0.dst == 1.dst);
18893 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18894 // %}
18895 
18896 // peephole
18897 // %{
18898 //   peepmatch (addI_iReg_imm movI);
18899 //   peepconstraint (0.dst == 1.dst);
18900 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18901 // %}
18902 
18903 // peephole
18904 // %{
18905 //   peepmatch (incL_iReg movL);
18906 //   peepconstraint (0.dst == 1.dst);
18907 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
18908 // %}
18909 
18910 // peephole
18911 // %{
18912 //   peepmatch (decL_iReg movL);
18913 //   peepconstraint (0.dst == 1.dst);
18914 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
18915 // %}
18916 
18917 // peephole
18918 // %{
18919 //   peepmatch (addL_iReg_imm movL);
18920 //   peepconstraint (0.dst == 1.dst);
18921 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
18922 // %}
18923 
18924 // peephole
18925 // %{
18926 //   peepmatch (addP_iReg_imm movP);
18927 //   peepconstraint (0.dst == 1.dst);
18928 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
18929 // %}
18930 
18931 // // Change load of spilled value to only a spill
18932 // instruct storeI(memory mem, iRegI src)
18933 // %{
18934 //   match(Set mem (StoreI mem src));
18935 // %}
18936 //
18937 // instruct loadI(iRegINoSp dst, memory mem)
18938 // %{
18939 //   match(Set dst (LoadI mem));
18940 // %}
18941 //
18942 
18943 //----------SMARTSPILL RULES---------------------------------------------------
18944 // These must follow all instruction definitions as they use the names
18945 // defined in the instructions definitions.
18946 
18947 // Local Variables:
18948 // mode: c++
18949 // End: