1 //
    2 // Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
    3 // Copyright (c) 2014, 2020, Red Hat, Inc. All rights reserved.
    4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    5 //
    6 // This code is free software; you can redistribute it and/or modify it
    7 // under the terms of the GNU General Public License version 2 only, as
    8 // published by the Free Software Foundation.
    9 //
   10 // This code is distributed in the hope that it will be useful, but WITHOUT
   11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   13 // version 2 for more details (a copy is included in the LICENSE file that
   14 // accompanied this code).
   15 //
   16 // You should have received a copy of the GNU General Public License version
   17 // 2 along with this work; if not, write to the Free Software Foundation,
   18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   19 //
   20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   21 // or visit www.oracle.com if you need additional information or have any
   22 // questions.
   23 //
   24 //
   25 
   26 // AArch64 Architecture Description File
   27 
   28 //----------REGISTER DEFINITION BLOCK------------------------------------------
   29 // This information is used by the matcher and the register allocator to
   30 // describe individual registers and classes of registers within the target
   31 // archtecture.
   32 
   33 register %{
   34 //----------Architecture Description Register Definitions----------------------
   35 // General Registers
   36 // "reg_def"  name ( register save type, C convention save type,
   37 //                   ideal register type, encoding );
   38 // Register Save Types:
   39 //
   40 // NS  = No-Save:       The register allocator assumes that these registers
   41 //                      can be used without saving upon entry to the method, &
   42 //                      that they do not need to be saved at call sites.
   43 //
   44 // SOC = Save-On-Call:  The register allocator assumes that these registers
   45 //                      can be used without saving upon entry to the method,
   46 //                      but that they must be saved at call sites.
   47 //
   48 // SOE = Save-On-Entry: The register allocator assumes that these registers
   49 //                      must be saved before using them upon entry to the
   50 //                      method, but they do not need to be saved at call
   51 //                      sites.
   52 //
   53 // AS  = Always-Save:   The register allocator assumes that these registers
   54 //                      must be saved before using them upon entry to the
   55 //                      method, & that they must be saved at call sites.
   56 //
   57 // Ideal Register Type is used to determine how to save & restore a
   58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
   59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
   60 //
   61 // The encoding number is the actual bit-pattern placed into the opcodes.
   62 
   63 // We must define the 64 bit int registers in two 32 bit halves, the
   64 // real lower register and a virtual upper half register. upper halves
   65 // are used by the register allocator but are not actually supplied as
   66 // operands to memory ops.
   67 //
   68 // follow the C1 compiler in making registers
   69 //
   70 //   r0-r7,r10-r26 volatile (caller save)
   71 //   r27-r32 system (no save, no allocate)
   72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
   73 //
   74 // as regards Java usage. we don't use any callee save registers
   75 // because this makes it difficult to de-optimise a frame (see comment
   76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
   77 //
   78 
   79 // General Registers
   80 
   81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
   82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
   83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
   84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
   85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
   86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
   87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
   88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
   89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
   90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
   91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
   92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
   93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
   94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
   95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
   96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
   97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
   98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
   99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
  100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
  101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
  102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
  103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
  104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
  105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
  106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
  107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
  108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
  109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
  110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
  111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
  112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
  113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
  114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
  115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
  116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
  117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
  118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
  119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
  120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
  121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
  122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
  123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
  124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
  125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
  126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
  127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
  128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
  129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
  130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
  131 reg_def R27     ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
  132 reg_def R27_H   ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()->next());
  133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
  134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
  135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
  136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
  137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
  138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
  139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
  140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
  141 
  142 // ----------------------------
  143 // Float/Double Registers
  144 // ----------------------------
  145 
  146 // Double Registers
  147 
  148 // The rules of ADL require that double registers be defined in pairs.
  149 // Each pair must be two 32-bit values, but not necessarily a pair of
  150 // single float registers. In each pair, ADLC-assigned register numbers
  151 // must be adjacent, with the lower number even. Finally, when the
  152 // CPU stores such a register pair to memory, the word associated with
  153 // the lower ADLC-assigned number must be stored to the lower address.
  154 
  155 // AArch64 has 32 floating-point registers. Each can store a vector of
  156 // single or double precision floating-point values up to 8 * 32
  157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
  158 // use the first float or double element of the vector.
  159 
  160 // for Java use float registers v0-v15 are always save on call whereas
  161 // the platform ABI treats v8-v15 as callee save). float registers
  162 // v16-v31 are SOC as per the platform spec
  163 
  164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
  165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
  166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
  167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
  168 
  169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
  170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
  171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
  172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
  173 
  174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
  175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
  176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
  177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
  178 
  179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
  180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
  181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
  182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
  183 
  184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
  185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
  186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
  187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
  188 
  189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
  190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
  191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
  192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
  193 
  194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
  195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
  196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
  197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
  198 
  199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
  200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
  201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
  202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
  203 
  204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
  205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
  206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
  207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
  208 
  209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
  210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
  211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
  212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
  213 
  214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
  215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
  216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
  217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
  218 
  219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
  220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
  221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
  222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
  223 
  224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
  225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
  226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
  227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
  228 
  229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
  230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
  231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
  232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
  233 
  234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
  235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
  236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
  237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
  238 
  239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
  240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
  241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
  242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
  243 
  244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
  245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
  246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
  247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
  248 
  249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
  250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
  251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
  252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
  253 
  254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
  255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
  256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
  257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
  258 
  259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
  260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
  261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
  262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
  263 
  264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
  265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
  266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
  267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
  268 
  269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
  270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
  271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
  272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
  273 
  274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
  275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
  276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
  277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
  278 
  279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
  280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
  281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
  282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
  283 
  284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
  285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
  286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
  287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
  288 
  289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
  290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
  291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
  292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
  293 
  294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
  295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
  296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
  297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
  298 
  299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
  300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
  301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
  302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
  303 
  304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
  305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
  306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
  307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
  308 
  309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
  310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
  311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
  312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
  313 
  314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
  315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
  316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
  317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
  318 
  319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
  320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
  321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
  322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
  323 
  324 // ----------------------------
  325 // Special Registers
  326 // ----------------------------
  327 
  328 // the AArch64 CSPR status flag register is not directly acessible as
  329 // instruction operand. the FPSR status flag register is a system
  330 // register which can be written/read using MSR/MRS but again does not
  331 // appear as an operand (a code identifying the FSPR occurs as an
  332 // immediate value in the instruction).
  333 
  334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
  335 
  336 
  337 // Specify priority of register selection within phases of register
  338 // allocation.  Highest priority is first.  A useful heuristic is to
  339 // give registers a low priority when they are required by machine
  340 // instructions, like EAX and EDX on I486, and choose no-save registers
  341 // before save-on-call, & save-on-call before save-on-entry.  Registers
  342 // which participate in fixed calling sequences should come last.
  343 // Registers which are used as pairs must fall on an even boundary.
  344 
  345 alloc_class chunk0(
  346     // volatiles
  347     R10, R10_H,
  348     R11, R11_H,
  349     R12, R12_H,
  350     R13, R13_H,
  351     R14, R14_H,
  352     R15, R15_H,
  353     R16, R16_H,
  354     R17, R17_H,
  355     R18, R18_H,
  356 
  357     // arg registers
  358     R0, R0_H,
  359     R1, R1_H,
  360     R2, R2_H,
  361     R3, R3_H,
  362     R4, R4_H,
  363     R5, R5_H,
  364     R6, R6_H,
  365     R7, R7_H,
  366 
  367     // non-volatiles
  368     R19, R19_H,
  369     R20, R20_H,
  370     R21, R21_H,
  371     R22, R22_H,
  372     R23, R23_H,
  373     R24, R24_H,
  374     R25, R25_H,
  375     R26, R26_H,
  376 
  377     // non-allocatable registers
  378 
  379     R27, R27_H, // heapbase
  380     R28, R28_H, // thread
  381     R29, R29_H, // fp
  382     R30, R30_H, // lr
  383     R31, R31_H, // sp
  384 );
  385 
  386 alloc_class chunk1(
  387 
  388     // no save
  389     V16, V16_H, V16_J, V16_K,
  390     V17, V17_H, V17_J, V17_K,
  391     V18, V18_H, V18_J, V18_K,
  392     V19, V19_H, V19_J, V19_K,
  393     V20, V20_H, V20_J, V20_K,
  394     V21, V21_H, V21_J, V21_K,
  395     V22, V22_H, V22_J, V22_K,
  396     V23, V23_H, V23_J, V23_K,
  397     V24, V24_H, V24_J, V24_K,
  398     V25, V25_H, V25_J, V25_K,
  399     V26, V26_H, V26_J, V26_K,
  400     V27, V27_H, V27_J, V27_K,
  401     V28, V28_H, V28_J, V28_K,
  402     V29, V29_H, V29_J, V29_K,
  403     V30, V30_H, V30_J, V30_K,
  404     V31, V31_H, V31_J, V31_K,
  405 
  406     // arg registers
  407     V0, V0_H, V0_J, V0_K,
  408     V1, V1_H, V1_J, V1_K,
  409     V2, V2_H, V2_J, V2_K,
  410     V3, V3_H, V3_J, V3_K,
  411     V4, V4_H, V4_J, V4_K,
  412     V5, V5_H, V5_J, V5_K,
  413     V6, V6_H, V6_J, V6_K,
  414     V7, V7_H, V7_J, V7_K,
  415 
  416     // non-volatiles
  417     V8, V8_H, V8_J, V8_K,
  418     V9, V9_H, V9_J, V9_K,
  419     V10, V10_H, V10_J, V10_K,
  420     V11, V11_H, V11_J, V11_K,
  421     V12, V12_H, V12_J, V12_K,
  422     V13, V13_H, V13_J, V13_K,
  423     V14, V14_H, V14_J, V14_K,
  424     V15, V15_H, V15_J, V15_K,
  425 );
  426 
  427 alloc_class chunk2(RFLAGS);
  428 
  429 //----------Architecture Description Register Classes--------------------------
  430 // Several register classes are automatically defined based upon information in
  431 // this architecture description.
  432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
  433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
  434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
  435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
  436 //
  437 
  438 // Class for all 32 bit general purpose registers
  439 reg_class all_reg32(
  440     R0,
  441     R1,
  442     R2,
  443     R3,
  444     R4,
  445     R5,
  446     R6,
  447     R7,
  448     R10,
  449     R11,
  450     R12,
  451     R13,
  452     R14,
  453     R15,
  454     R16,
  455     R17,
  456     R18,
  457     R19,
  458     R20,
  459     R21,
  460     R22,
  461     R23,
  462     R24,
  463     R25,
  464     R26,
  465     R27,
  466     R28,
  467     R29,
  468     R30,
  469     R31
  470 );
  471 
  472 
  473 // Class for all 32 bit integer registers (excluding SP which
  474 // will never be used as an integer register)
  475 reg_class any_reg32 %{
  476   return _ANY_REG32_mask;
  477 %}
  478 
  479 // Singleton class for R0 int register
  480 reg_class int_r0_reg(R0);
  481 
  482 // Singleton class for R2 int register
  483 reg_class int_r2_reg(R2);
  484 
  485 // Singleton class for R3 int register
  486 reg_class int_r3_reg(R3);
  487 
  488 // Singleton class for R4 int register
  489 reg_class int_r4_reg(R4);
  490 
  491 // Singleton class for R31 int register
  492 reg_class int_r31_reg(R31);
  493 
  494 // Class for all 64 bit general purpose registers
  495 reg_class all_reg(
  496     R0, R0_H,
  497     R1, R1_H,
  498     R2, R2_H,
  499     R3, R3_H,
  500     R4, R4_H,
  501     R5, R5_H,
  502     R6, R6_H,
  503     R7, R7_H,
  504     R10, R10_H,
  505     R11, R11_H,
  506     R12, R12_H,
  507     R13, R13_H,
  508     R14, R14_H,
  509     R15, R15_H,
  510     R16, R16_H,
  511     R17, R17_H,
  512     R18, R18_H,
  513     R19, R19_H,
  514     R20, R20_H,
  515     R21, R21_H,
  516     R22, R22_H,
  517     R23, R23_H,
  518     R24, R24_H,
  519     R25, R25_H,
  520     R26, R26_H,
  521     R27, R27_H,
  522     R28, R28_H,
  523     R29, R29_H,
  524     R30, R30_H,
  525     R31, R31_H
  526 );
  527 
  528 // Class for all long integer registers (including SP)
  529 reg_class any_reg %{
  530   return _ANY_REG_mask;
  531 %}
  532 
  533 // Class for non-allocatable 32 bit registers
  534 reg_class non_allocatable_reg32(
  535     R28,                        // thread
  536     R30,                        // lr
  537     R31                         // sp
  538 );
  539 
  540 // Class for non-allocatable 64 bit registers
  541 reg_class non_allocatable_reg(
  542     R28, R28_H,                 // thread
  543     R30, R30_H,                 // lr
  544     R31, R31_H                  // sp
  545 );
  546 
  547 // Class for all non-special integer registers
  548 reg_class no_special_reg32 %{
  549   return _NO_SPECIAL_REG32_mask;
  550 %}
  551 
  552 // Class for all non-special long integer registers
  553 reg_class no_special_reg %{
  554   return _NO_SPECIAL_REG_mask;
  555 %}
  556 
  557 // Class for 64 bit register r0
  558 reg_class r0_reg(
  559     R0, R0_H
  560 );
  561 
  562 // Class for 64 bit register r1
  563 reg_class r1_reg(
  564     R1, R1_H
  565 );
  566 
  567 // Class for 64 bit register r2
  568 reg_class r2_reg(
  569     R2, R2_H
  570 );
  571 
  572 // Class for 64 bit register r3
  573 reg_class r3_reg(
  574     R3, R3_H
  575 );
  576 
  577 // Class for 64 bit register r4
  578 reg_class r4_reg(
  579     R4, R4_H
  580 );
  581 
  582 // Class for 64 bit register r5
  583 reg_class r5_reg(
  584     R5, R5_H
  585 );
  586 
  587 // Class for 64 bit register r10
  588 reg_class r10_reg(
  589     R10, R10_H
  590 );
  591 
  592 // Class for 64 bit register r11
  593 reg_class r11_reg(
  594     R11, R11_H
  595 );
  596 
  597 // Class for method register
  598 reg_class method_reg(
  599     R12, R12_H
  600 );
  601 
  602 // Class for heapbase register
  603 reg_class heapbase_reg(
  604     R27, R27_H
  605 );
  606 
  607 // Class for thread register
  608 reg_class thread_reg(
  609     R28, R28_H
  610 );
  611 
  612 // Class for frame pointer register
  613 reg_class fp_reg(
  614     R29, R29_H
  615 );
  616 
  617 // Class for link register
  618 reg_class lr_reg(
  619     R30, R30_H
  620 );
  621 
  622 // Class for long sp register
  623 reg_class sp_reg(
  624   R31, R31_H
  625 );
  626 
  627 // Class for all pointer registers
  628 reg_class ptr_reg %{
  629   return _PTR_REG_mask;
  630 %}
  631 
  632 // Class for all non_special pointer registers
  633 reg_class no_special_ptr_reg %{
  634   return _NO_SPECIAL_PTR_REG_mask;
  635 %}
  636 
  637 // Class for all float registers
  638 reg_class float_reg(
  639     V0,
  640     V1,
  641     V2,
  642     V3,
  643     V4,
  644     V5,
  645     V6,
  646     V7,
  647     V8,
  648     V9,
  649     V10,
  650     V11,
  651     V12,
  652     V13,
  653     V14,
  654     V15,
  655     V16,
  656     V17,
  657     V18,
  658     V19,
  659     V20,
  660     V21,
  661     V22,
  662     V23,
  663     V24,
  664     V25,
  665     V26,
  666     V27,
  667     V28,
  668     V29,
  669     V30,
  670     V31
  671 );
  672 
  673 // Double precision float registers have virtual `high halves' that
  674 // are needed by the allocator.
  675 // Class for all double registers
  676 reg_class double_reg(
  677     V0, V0_H,
  678     V1, V1_H,
  679     V2, V2_H,
  680     V3, V3_H,
  681     V4, V4_H,
  682     V5, V5_H,
  683     V6, V6_H,
  684     V7, V7_H,
  685     V8, V8_H,
  686     V9, V9_H,
  687     V10, V10_H,
  688     V11, V11_H,
  689     V12, V12_H,
  690     V13, V13_H,
  691     V14, V14_H,
  692     V15, V15_H,
  693     V16, V16_H,
  694     V17, V17_H,
  695     V18, V18_H,
  696     V19, V19_H,
  697     V20, V20_H,
  698     V21, V21_H,
  699     V22, V22_H,
  700     V23, V23_H,
  701     V24, V24_H,
  702     V25, V25_H,
  703     V26, V26_H,
  704     V27, V27_H,
  705     V28, V28_H,
  706     V29, V29_H,
  707     V30, V30_H,
  708     V31, V31_H
  709 );
  710 
  711 // Class for all 64bit vector registers
  712 reg_class vectord_reg(
  713     V0, V0_H,
  714     V1, V1_H,
  715     V2, V2_H,
  716     V3, V3_H,
  717     V4, V4_H,
  718     V5, V5_H,
  719     V6, V6_H,
  720     V7, V7_H,
  721     V8, V8_H,
  722     V9, V9_H,
  723     V10, V10_H,
  724     V11, V11_H,
  725     V12, V12_H,
  726     V13, V13_H,
  727     V14, V14_H,
  728     V15, V15_H,
  729     V16, V16_H,
  730     V17, V17_H,
  731     V18, V18_H,
  732     V19, V19_H,
  733     V20, V20_H,
  734     V21, V21_H,
  735     V22, V22_H,
  736     V23, V23_H,
  737     V24, V24_H,
  738     V25, V25_H,
  739     V26, V26_H,
  740     V27, V27_H,
  741     V28, V28_H,
  742     V29, V29_H,
  743     V30, V30_H,
  744     V31, V31_H
  745 );
  746 
  747 // Class for all 128bit vector registers
  748 reg_class vectorx_reg(
  749     V0, V0_H, V0_J, V0_K,
  750     V1, V1_H, V1_J, V1_K,
  751     V2, V2_H, V2_J, V2_K,
  752     V3, V3_H, V3_J, V3_K,
  753     V4, V4_H, V4_J, V4_K,
  754     V5, V5_H, V5_J, V5_K,
  755     V6, V6_H, V6_J, V6_K,
  756     V7, V7_H, V7_J, V7_K,
  757     V8, V8_H, V8_J, V8_K,
  758     V9, V9_H, V9_J, V9_K,
  759     V10, V10_H, V10_J, V10_K,
  760     V11, V11_H, V11_J, V11_K,
  761     V12, V12_H, V12_J, V12_K,
  762     V13, V13_H, V13_J, V13_K,
  763     V14, V14_H, V14_J, V14_K,
  764     V15, V15_H, V15_J, V15_K,
  765     V16, V16_H, V16_J, V16_K,
  766     V17, V17_H, V17_J, V17_K,
  767     V18, V18_H, V18_J, V18_K,
  768     V19, V19_H, V19_J, V19_K,
  769     V20, V20_H, V20_J, V20_K,
  770     V21, V21_H, V21_J, V21_K,
  771     V22, V22_H, V22_J, V22_K,
  772     V23, V23_H, V23_J, V23_K,
  773     V24, V24_H, V24_J, V24_K,
  774     V25, V25_H, V25_J, V25_K,
  775     V26, V26_H, V26_J, V26_K,
  776     V27, V27_H, V27_J, V27_K,
  777     V28, V28_H, V28_J, V28_K,
  778     V29, V29_H, V29_J, V29_K,
  779     V30, V30_H, V30_J, V30_K,
  780     V31, V31_H, V31_J, V31_K
  781 );
  782 
  783 // Class for 128 bit register v0
  784 reg_class v0_reg(
  785     V0, V0_H
  786 );
  787 
  788 // Class for 128 bit register v1
  789 reg_class v1_reg(
  790     V1, V1_H
  791 );
  792 
  793 // Class for 128 bit register v2
  794 reg_class v2_reg(
  795     V2, V2_H
  796 );
  797 
  798 // Class for 128 bit register v3
  799 reg_class v3_reg(
  800     V3, V3_H
  801 );
  802 
  803 // Class for 128 bit register v4
  804 reg_class v4_reg(
  805     V4, V4_H
  806 );
  807 
  808 // Class for 128 bit register v5
  809 reg_class v5_reg(
  810     V5, V5_H
  811 );
  812 
  813 // Class for 128 bit register v6
  814 reg_class v6_reg(
  815     V6, V6_H
  816 );
  817 
  818 // Class for 128 bit register v7
  819 reg_class v7_reg(
  820     V7, V7_H
  821 );
  822 
  823 // Class for 128 bit register v8
  824 reg_class v8_reg(
  825     V8, V8_H
  826 );
  827 
  828 // Class for 128 bit register v9
  829 reg_class v9_reg(
  830     V9, V9_H
  831 );
  832 
  833 // Class for 128 bit register v10
  834 reg_class v10_reg(
  835     V10, V10_H
  836 );
  837 
  838 // Class for 128 bit register v11
  839 reg_class v11_reg(
  840     V11, V11_H
  841 );
  842 
  843 // Class for 128 bit register v12
  844 reg_class v12_reg(
  845     V12, V12_H
  846 );
  847 
  848 // Class for 128 bit register v13
  849 reg_class v13_reg(
  850     V13, V13_H
  851 );
  852 
  853 // Class for 128 bit register v14
  854 reg_class v14_reg(
  855     V14, V14_H
  856 );
  857 
  858 // Class for 128 bit register v15
  859 reg_class v15_reg(
  860     V15, V15_H
  861 );
  862 
  863 // Class for 128 bit register v16
  864 reg_class v16_reg(
  865     V16, V16_H
  866 );
  867 
  868 // Class for 128 bit register v17
  869 reg_class v17_reg(
  870     V17, V17_H
  871 );
  872 
  873 // Class for 128 bit register v18
  874 reg_class v18_reg(
  875     V18, V18_H
  876 );
  877 
  878 // Class for 128 bit register v19
  879 reg_class v19_reg(
  880     V19, V19_H
  881 );
  882 
  883 // Class for 128 bit register v20
  884 reg_class v20_reg(
  885     V20, V20_H
  886 );
  887 
  888 // Class for 128 bit register v21
  889 reg_class v21_reg(
  890     V21, V21_H
  891 );
  892 
  893 // Class for 128 bit register v22
  894 reg_class v22_reg(
  895     V22, V22_H
  896 );
  897 
  898 // Class for 128 bit register v23
  899 reg_class v23_reg(
  900     V23, V23_H
  901 );
  902 
  903 // Class for 128 bit register v24
  904 reg_class v24_reg(
  905     V24, V24_H
  906 );
  907 
  908 // Class for 128 bit register v25
  909 reg_class v25_reg(
  910     V25, V25_H
  911 );
  912 
  913 // Class for 128 bit register v26
  914 reg_class v26_reg(
  915     V26, V26_H
  916 );
  917 
  918 // Class for 128 bit register v27
  919 reg_class v27_reg(
  920     V27, V27_H
  921 );
  922 
  923 // Class for 128 bit register v28
  924 reg_class v28_reg(
  925     V28, V28_H
  926 );
  927 
  928 // Class for 128 bit register v29
  929 reg_class v29_reg(
  930     V29, V29_H
  931 );
  932 
  933 // Class for 128 bit register v30
  934 reg_class v30_reg(
  935     V30, V30_H
  936 );
  937 
  938 // Class for 128 bit register v31
  939 reg_class v31_reg(
  940     V31, V31_H
  941 );
  942 
  943 // Singleton class for condition codes
  944 reg_class int_flags(RFLAGS);
  945 
  946 %}
  947 
  948 //----------DEFINITION BLOCK---------------------------------------------------
  949 // Define name --> value mappings to inform the ADLC of an integer valued name
  950 // Current support includes integer values in the range [0, 0x7FFFFFFF]
  951 // Format:
  952 //        int_def  <name>         ( <int_value>, <expression>);
  953 // Generated Code in ad_<arch>.hpp
  954 //        #define  <name>   (<expression>)
  955 //        // value == <int_value>
  956 // Generated code in ad_<arch>.cpp adlc_verification()
  957 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
  958 //
  959 
  960 // we follow the ppc-aix port in using a simple cost model which ranks
  961 // register operations as cheap, memory ops as more expensive and
  962 // branches as most expensive. the first two have a low as well as a
  963 // normal cost. huge cost appears to be a way of saying don't do
  964 // something
  965 
  966 definitions %{
  967   // The default cost (of a register move instruction).
  968   int_def INSN_COST            (    100,     100);
  969   int_def BRANCH_COST          (    200,     2 * INSN_COST);
  970   int_def CALL_COST            (    200,     2 * INSN_COST);
  971   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
  972 %}
  973 
  974 
  975 //----------SOURCE BLOCK-------------------------------------------------------
  976 // This is a block of C++ code which provides values, functions, and
  977 // definitions necessary in the rest of the architecture description
  978 
  979 source_hpp %{
  980 
  981 #include "asm/macroAssembler.hpp"
  982 #include "gc/shared/cardTable.hpp"
  983 #include "gc/shared/cardTableBarrierSet.hpp"
  984 #include "gc/shared/collectedHeap.hpp"
  985 #include "opto/addnode.hpp"
  986 #include "opto/convertnode.hpp"
  987 
  988 extern RegMask _ANY_REG32_mask;
  989 extern RegMask _ANY_REG_mask;
  990 extern RegMask _PTR_REG_mask;
  991 extern RegMask _NO_SPECIAL_REG32_mask;
  992 extern RegMask _NO_SPECIAL_REG_mask;
  993 extern RegMask _NO_SPECIAL_PTR_REG_mask;
  994 
  995 class CallStubImpl {
  996 
  997   //--------------------------------------------------------------
  998   //---<  Used for optimization in Compile::shorten_branches  >---
  999   //--------------------------------------------------------------
 1000 
 1001  public:
 1002   // Size of call trampoline stub.
 1003   static uint size_call_trampoline() {
 1004     return 0; // no call trampolines on this platform
 1005   }
 1006 
 1007   // number of relocations needed by a call trampoline stub
 1008   static uint reloc_call_trampoline() {
 1009     return 0; // no call trampolines on this platform
 1010   }
 1011 };
 1012 
 1013 class HandlerImpl {
 1014 
 1015  public:
 1016 
 1017   static int emit_exception_handler(CodeBuffer &cbuf);
 1018   static int emit_deopt_handler(CodeBuffer& cbuf);
 1019 
 1020   static uint size_exception_handler() {
 1021     return MacroAssembler::far_branch_size();
 1022   }
 1023 
 1024   static uint size_deopt_handler() {
 1025     // count one adr and one far branch instruction
 1026     return 4 * NativeInstruction::instruction_size;
 1027   }
 1028 };
 1029 
 1030 class Node::PD {
 1031 public:
 1032   enum NodeFlags {
 1033     _last_flag = Node::_last_flag
 1034   };
 1035 };
 1036 
 1037  bool is_CAS(int opcode, bool maybe_volatile);
 1038 
 1039   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
 1040 
 1041   bool unnecessary_acquire(const Node *barrier);
 1042   bool needs_acquiring_load(const Node *load);
 1043 
 1044   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
 1045 
 1046   bool unnecessary_release(const Node *barrier);
 1047   bool unnecessary_volatile(const Node *barrier);
 1048   bool needs_releasing_store(const Node *store);
 1049 
 1050   // predicate controlling translation of CompareAndSwapX
 1051   bool needs_acquiring_load_exclusive(const Node *load);
 1052 
 1053   // predicate controlling addressing modes
 1054   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
 1055 %}
 1056 
 1057 source %{
 1058 
 1059   // Derived RegMask with conditionally allocatable registers
 1060 
 1061   void PhaseOutput::pd_perform_mach_node_analysis() {
 1062   }
 1063 
 1064   int MachNode::pd_alignment_required() const {
 1065     return 1;
 1066   }
 1067 
 1068   int MachNode::compute_padding(int current_offset) const {
 1069     return 0;
 1070   }
 1071 
 1072   RegMask _ANY_REG32_mask;
 1073   RegMask _ANY_REG_mask;
 1074   RegMask _PTR_REG_mask;
 1075   RegMask _NO_SPECIAL_REG32_mask;
 1076   RegMask _NO_SPECIAL_REG_mask;
 1077   RegMask _NO_SPECIAL_PTR_REG_mask;
 1078 
 1079   void reg_mask_init() {
 1080     // We derive below RegMask(s) from the ones which are auto-generated from
 1081     // adlc register classes to make AArch64 rheapbase (r27) and rfp (r29)
 1082     // registers conditionally reserved.
 1083 
 1084     _ANY_REG32_mask = _ALL_REG32_mask;
 1085     _ANY_REG32_mask.Remove(OptoReg::as_OptoReg(r31_sp->as_VMReg()));
 1086 
 1087     _ANY_REG_mask = _ALL_REG_mask;
 1088 
 1089     _PTR_REG_mask = _ALL_REG_mask;
 1090 
 1091     _NO_SPECIAL_REG32_mask = _ALL_REG32_mask;
 1092     _NO_SPECIAL_REG32_mask.SUBTRACT(_NON_ALLOCATABLE_REG32_mask);
 1093 
 1094     _NO_SPECIAL_REG_mask = _ALL_REG_mask;
 1095     _NO_SPECIAL_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1096 
 1097     _NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask;
 1098     _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1099 
 1100     // r27 is not allocatable when compressed oops is on and heapbase is not
 1101     // zero, compressed klass pointers doesn't use r27 after JDK-8234794
 1102     if (UseCompressedOops && CompressedOops::ptrs_base() != NULL) {
 1103       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1104       _NO_SPECIAL_REG_mask.SUBTRACT(_HEAPBASE_REG_mask);
 1105       _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_HEAPBASE_REG_mask);
 1106     }
 1107 
 1108     // r29 is not allocatable when PreserveFramePointer is on
 1109     if (PreserveFramePointer) {
 1110       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1111       _NO_SPECIAL_REG_mask.SUBTRACT(_FP_REG_mask);
 1112       _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_FP_REG_mask);
 1113     }
 1114   }
 1115 
 1116   // Optimizaton of volatile gets and puts
 1117   // -------------------------------------
 1118   //
 1119   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
 1120   // use to implement volatile reads and writes. For a volatile read
 1121   // we simply need
 1122   //
 1123   //   ldar<x>
 1124   //
 1125   // and for a volatile write we need
 1126   //
 1127   //   stlr<x>
 1128   //
 1129   // Alternatively, we can implement them by pairing a normal
 1130   // load/store with a memory barrier. For a volatile read we need
 1131   //
 1132   //   ldr<x>
 1133   //   dmb ishld
 1134   //
 1135   // for a volatile write
 1136   //
 1137   //   dmb ish
 1138   //   str<x>
 1139   //   dmb ish
 1140   //
 1141   // We can also use ldaxr and stlxr to implement compare and swap CAS
 1142   // sequences. These are normally translated to an instruction
 1143   // sequence like the following
 1144   //
 1145   //   dmb      ish
 1146   // retry:
 1147   //   ldxr<x>   rval raddr
 1148   //   cmp       rval rold
 1149   //   b.ne done
 1150   //   stlxr<x>  rval, rnew, rold
 1151   //   cbnz      rval retry
 1152   // done:
 1153   //   cset      r0, eq
 1154   //   dmb ishld
 1155   //
 1156   // Note that the exclusive store is already using an stlxr
 1157   // instruction. That is required to ensure visibility to other
 1158   // threads of the exclusive write (assuming it succeeds) before that
 1159   // of any subsequent writes.
 1160   //
 1161   // The following instruction sequence is an improvement on the above
 1162   //
 1163   // retry:
 1164   //   ldaxr<x>  rval raddr
 1165   //   cmp       rval rold
 1166   //   b.ne done
 1167   //   stlxr<x>  rval, rnew, rold
 1168   //   cbnz      rval retry
 1169   // done:
 1170   //   cset      r0, eq
 1171   //
 1172   // We don't need the leading dmb ish since the stlxr guarantees
 1173   // visibility of prior writes in the case that the swap is
 1174   // successful. Crucially we don't have to worry about the case where
 1175   // the swap is not successful since no valid program should be
 1176   // relying on visibility of prior changes by the attempting thread
 1177   // in the case where the CAS fails.
 1178   //
 1179   // Similarly, we don't need the trailing dmb ishld if we substitute
 1180   // an ldaxr instruction since that will provide all the guarantees we
 1181   // require regarding observation of changes made by other threads
 1182   // before any change to the CAS address observed by the load.
 1183   //
 1184   // In order to generate the desired instruction sequence we need to
 1185   // be able to identify specific 'signature' ideal graph node
 1186   // sequences which i) occur as a translation of a volatile reads or
 1187   // writes or CAS operations and ii) do not occur through any other
 1188   // translation or graph transformation. We can then provide
 1189   // alternative aldc matching rules which translate these node
 1190   // sequences to the desired machine code sequences. Selection of the
 1191   // alternative rules can be implemented by predicates which identify
 1192   // the relevant node sequences.
 1193   //
 1194   // The ideal graph generator translates a volatile read to the node
 1195   // sequence
 1196   //
 1197   //   LoadX[mo_acquire]
 1198   //   MemBarAcquire
 1199   //
 1200   // As a special case when using the compressed oops optimization we
 1201   // may also see this variant
 1202   //
 1203   //   LoadN[mo_acquire]
 1204   //   DecodeN
 1205   //   MemBarAcquire
 1206   //
 1207   // A volatile write is translated to the node sequence
 1208   //
 1209   //   MemBarRelease
 1210   //   StoreX[mo_release] {CardMark}-optional
 1211   //   MemBarVolatile
 1212   //
 1213   // n.b. the above node patterns are generated with a strict
 1214   // 'signature' configuration of input and output dependencies (see
 1215   // the predicates below for exact details). The card mark may be as
 1216   // simple as a few extra nodes or, in a few GC configurations, may
 1217   // include more complex control flow between the leading and
 1218   // trailing memory barriers. However, whatever the card mark
 1219   // configuration these signatures are unique to translated volatile
 1220   // reads/stores -- they will not appear as a result of any other
 1221   // bytecode translation or inlining nor as a consequence of
 1222   // optimizing transforms.
 1223   //
 1224   // We also want to catch inlined unsafe volatile gets and puts and
 1225   // be able to implement them using either ldar<x>/stlr<x> or some
 1226   // combination of ldr<x>/stlr<x> and dmb instructions.
 1227   //
 1228   // Inlined unsafe volatiles puts manifest as a minor variant of the
 1229   // normal volatile put node sequence containing an extra cpuorder
 1230   // membar
 1231   //
 1232   //   MemBarRelease
 1233   //   MemBarCPUOrder
 1234   //   StoreX[mo_release] {CardMark}-optional
 1235   //   MemBarCPUOrder
 1236   //   MemBarVolatile
 1237   //
 1238   // n.b. as an aside, a cpuorder membar is not itself subject to
 1239   // matching and translation by adlc rules.  However, the rule
 1240   // predicates need to detect its presence in order to correctly
 1241   // select the desired adlc rules.
 1242   //
 1243   // Inlined unsafe volatile gets manifest as a slightly different
 1244   // node sequence to a normal volatile get because of the
 1245   // introduction of some CPUOrder memory barriers to bracket the
 1246   // Load. However, but the same basic skeleton of a LoadX feeding a
 1247   // MemBarAcquire, possibly thorugh an optional DecodeN, is still
 1248   // present
 1249   //
 1250   //   MemBarCPUOrder
 1251   //        ||       \\
 1252   //   MemBarCPUOrder LoadX[mo_acquire]
 1253   //        ||            |
 1254   //        ||       {DecodeN} optional
 1255   //        ||       /
 1256   //     MemBarAcquire
 1257   //
 1258   // In this case the acquire membar does not directly depend on the
 1259   // load. However, we can be sure that the load is generated from an
 1260   // inlined unsafe volatile get if we see it dependent on this unique
 1261   // sequence of membar nodes. Similarly, given an acquire membar we
 1262   // can know that it was added because of an inlined unsafe volatile
 1263   // get if it is fed and feeds a cpuorder membar and if its feed
 1264   // membar also feeds an acquiring load.
 1265   //
 1266   // Finally an inlined (Unsafe) CAS operation is translated to the
 1267   // following ideal graph
 1268   //
 1269   //   MemBarRelease
 1270   //   MemBarCPUOrder
 1271   //   CompareAndSwapX {CardMark}-optional
 1272   //   MemBarCPUOrder
 1273   //   MemBarAcquire
 1274   //
 1275   // So, where we can identify these volatile read and write
 1276   // signatures we can choose to plant either of the above two code
 1277   // sequences. For a volatile read we can simply plant a normal
 1278   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
 1279   // also choose to inhibit translation of the MemBarAcquire and
 1280   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
 1281   //
 1282   // When we recognise a volatile store signature we can choose to
 1283   // plant at a dmb ish as a translation for the MemBarRelease, a
 1284   // normal str<x> and then a dmb ish for the MemBarVolatile.
 1285   // Alternatively, we can inhibit translation of the MemBarRelease
 1286   // and MemBarVolatile and instead plant a simple stlr<x>
 1287   // instruction.
 1288   //
 1289   // when we recognise a CAS signature we can choose to plant a dmb
 1290   // ish as a translation for the MemBarRelease, the conventional
 1291   // macro-instruction sequence for the CompareAndSwap node (which
 1292   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
 1293   // Alternatively, we can elide generation of the dmb instructions
 1294   // and plant the alternative CompareAndSwap macro-instruction
 1295   // sequence (which uses ldaxr<x>).
 1296   //
 1297   // Of course, the above only applies when we see these signature
 1298   // configurations. We still want to plant dmb instructions in any
 1299   // other cases where we may see a MemBarAcquire, MemBarRelease or
 1300   // MemBarVolatile. For example, at the end of a constructor which
 1301   // writes final/volatile fields we will see a MemBarRelease
 1302   // instruction and this needs a 'dmb ish' lest we risk the
 1303   // constructed object being visible without making the
 1304   // final/volatile field writes visible.
 1305   //
 1306   // n.b. the translation rules below which rely on detection of the
 1307   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
 1308   // If we see anything other than the signature configurations we
 1309   // always just translate the loads and stores to ldr<x> and str<x>
 1310   // and translate acquire, release and volatile membars to the
 1311   // relevant dmb instructions.
 1312   //
 1313 
 1314   // is_CAS(int opcode, bool maybe_volatile)
 1315   //
 1316   // return true if opcode is one of the possible CompareAndSwapX
 1317   // values otherwise false.
 1318 
 1319   bool is_CAS(int opcode, bool maybe_volatile)
 1320   {
 1321     switch(opcode) {
 1322       // We handle these
 1323     case Op_CompareAndSwapI:
 1324     case Op_CompareAndSwapL:
 1325     case Op_CompareAndSwapP:
 1326     case Op_CompareAndSwapN:
 1327     case Op_ShenandoahCompareAndSwapP:
 1328     case Op_ShenandoahCompareAndSwapN:
 1329     case Op_CompareAndSwapB:
 1330     case Op_CompareAndSwapS:
 1331     case Op_GetAndSetI:
 1332     case Op_GetAndSetL:
 1333     case Op_GetAndSetP:
 1334     case Op_GetAndSetN:
 1335     case Op_GetAndAddI:
 1336     case Op_GetAndAddL:
 1337       return true;
 1338     case Op_CompareAndExchangeI:
 1339     case Op_CompareAndExchangeN:
 1340     case Op_CompareAndExchangeB:
 1341     case Op_CompareAndExchangeS:
 1342     case Op_CompareAndExchangeL:
 1343     case Op_CompareAndExchangeP:
 1344     case Op_WeakCompareAndSwapB:
 1345     case Op_WeakCompareAndSwapS:
 1346     case Op_WeakCompareAndSwapI:
 1347     case Op_WeakCompareAndSwapL:
 1348     case Op_WeakCompareAndSwapP:
 1349     case Op_WeakCompareAndSwapN:
 1350     case Op_ShenandoahWeakCompareAndSwapP:
 1351     case Op_ShenandoahWeakCompareAndSwapN:
 1352     case Op_ShenandoahCompareAndExchangeP:
 1353     case Op_ShenandoahCompareAndExchangeN:
 1354       return maybe_volatile;
 1355     default:
 1356       return false;
 1357     }
 1358   }
 1359 
 1360   // helper to determine the maximum number of Phi nodes we may need to
 1361   // traverse when searching from a card mark membar for the merge mem
 1362   // feeding a trailing membar or vice versa
 1363 
 1364 // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
 1365 
 1366 bool unnecessary_acquire(const Node *barrier)
 1367 {
 1368   assert(barrier->is_MemBar(), "expecting a membar");
 1369 
 1370   if (UseBarriersForVolatile) {
 1371     // we need to plant a dmb
 1372     return false;
 1373   }
 1374 
 1375   MemBarNode* mb = barrier->as_MemBar();
 1376 
 1377   if (mb->trailing_load()) {
 1378     return true;
 1379   }
 1380 
 1381   if (mb->trailing_load_store()) {
 1382     Node* load_store = mb->in(MemBarNode::Precedent);
 1383     assert(load_store->is_LoadStore(), "unexpected graph shape");
 1384     return is_CAS(load_store->Opcode(), true);
 1385   }
 1386 
 1387   return false;
 1388 }
 1389 
 1390 bool needs_acquiring_load(const Node *n)
 1391 {
 1392   assert(n->is_Load(), "expecting a load");
 1393   if (UseBarriersForVolatile) {
 1394     // we use a normal load and a dmb
 1395     return false;
 1396   }
 1397 
 1398   LoadNode *ld = n->as_Load();
 1399 
 1400   return ld->is_acquire();
 1401 }
 1402 
 1403 bool unnecessary_release(const Node *n)
 1404 {
 1405   assert((n->is_MemBar() &&
 1406 	  n->Opcode() == Op_MemBarRelease),
 1407 	 "expecting a release membar");
 1408 
 1409   if (UseBarriersForVolatile) {
 1410     // we need to plant a dmb
 1411     return false;
 1412   }
 1413 
 1414   MemBarNode *barrier = n->as_MemBar();
 1415   if (!barrier->leading()) {
 1416     return false;
 1417   } else {
 1418     Node* trailing = barrier->trailing_membar();
 1419     MemBarNode* trailing_mb = trailing->as_MemBar();
 1420     assert(trailing_mb->trailing(), "Not a trailing membar?");
 1421     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
 1422 
 1423     Node* mem = trailing_mb->in(MemBarNode::Precedent);
 1424     if (mem->is_Store()) {
 1425       assert(mem->as_Store()->is_release(), "");
 1426       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
 1427       return true;
 1428     } else {
 1429       assert(mem->is_LoadStore(), "");
 1430       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
 1431       return is_CAS(mem->Opcode(), true);
 1432     }
 1433   }
 1434   return false;
 1435 }
 1436 
 1437 bool unnecessary_volatile(const Node *n)
 1438 {
 1439   // assert n->is_MemBar();
 1440   if (UseBarriersForVolatile) {
 1441     // we need to plant a dmb
 1442     return false;
 1443   }
 1444 
 1445   MemBarNode *mbvol = n->as_MemBar();
 1446 
 1447   bool release = mbvol->trailing_store();
 1448   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
 1449 #ifdef ASSERT
 1450   if (release) {
 1451     Node* leading = mbvol->leading_membar();
 1452     assert(leading->Opcode() == Op_MemBarRelease, "");
 1453     assert(leading->as_MemBar()->leading_store(), "");
 1454     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
 1455   }
 1456 #endif
 1457 
 1458   return release;
 1459 }
 1460 
 1461 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
 1462 
 1463 bool needs_releasing_store(const Node *n)
 1464 {
 1465   // assert n->is_Store();
 1466   if (UseBarriersForVolatile) {
 1467     // we use a normal store and dmb combination
 1468     return false;
 1469   }
 1470 
 1471   StoreNode *st = n->as_Store();
 1472 
 1473   return st->trailing_membar() != NULL;
 1474 }
 1475 
 1476 // predicate controlling translation of CAS
 1477 //
 1478 // returns true if CAS needs to use an acquiring load otherwise false
 1479 
 1480 bool needs_acquiring_load_exclusive(const Node *n)
 1481 {
 1482   assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
 1483   if (UseBarriersForVolatile) {
 1484     return false;
 1485   }
 1486 
 1487   LoadStoreNode* ldst = n->as_LoadStore();
 1488   if (is_CAS(n->Opcode(), false)) {
 1489     assert(ldst->trailing_membar() != NULL, "expected trailing membar");
 1490   } else {
 1491     return ldst->trailing_membar() != NULL;
 1492   }
 1493 
 1494   // so we can just return true here
 1495   return true;
 1496 }
 1497 
 1498 #define __ _masm.
 1499 
 1500 // advance declarations for helper functions to convert register
 1501 // indices to register objects
 1502 
 1503 // the ad file has to provide implementations of certain methods
 1504 // expected by the generic code
 1505 //
 1506 // REQUIRED FUNCTIONALITY
 1507 
 1508 //=============================================================================
 1509 
 1510 // !!!!! Special hack to get all types of calls to specify the byte offset
 1511 //       from the start of the call to the point where the return address
 1512 //       will point.
 1513 
 1514 int MachCallStaticJavaNode::ret_addr_offset()
 1515 {
 1516   // call should be a simple bl
 1517   int off = 4;
 1518   return off;
 1519 }
 1520 
 1521 int MachCallDynamicJavaNode::ret_addr_offset()
 1522 {
 1523   return 16; // movz, movk, movk, bl
 1524 }
 1525 
 1526 int MachCallRuntimeNode::ret_addr_offset() {
 1527   // for generated stubs the call will be
 1528   //   far_call(addr)
 1529   // for real runtime callouts it will be six instructions
 1530   // see aarch64_enc_java_to_runtime
 1531   //   adr(rscratch2, retaddr)
 1532   //   lea(rscratch1, RuntimeAddress(addr)
 1533   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
 1534   //   blr(rscratch1)
 1535   CodeBlob *cb = CodeCache::find_blob(_entry_point);
 1536   if (cb) {
 1537     return MacroAssembler::far_branch_size();
 1538   } else {
 1539     return 6 * NativeInstruction::instruction_size;
 1540   }
 1541 }
 1542 
 1543 // Indicate if the safepoint node needs the polling page as an input
 1544 
 1545 // the shared code plants the oop data at the start of the generated
 1546 // code for the safepoint node and that needs ot be at the load
 1547 // instruction itself. so we cannot plant a mov of the safepoint poll
 1548 // address followed by a load. setting this to true means the mov is
 1549 // scheduled as a prior instruction. that's better for scheduling
 1550 // anyway.
 1551 
 1552 bool SafePointNode::needs_polling_address_input()
 1553 {
 1554   return true;
 1555 }
 1556 
 1557 //=============================================================================
 1558 
 1559 #ifndef PRODUCT
 1560 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1561   st->print("BREAKPOINT");
 1562 }
 1563 #endif
 1564 
 1565 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1566   C2_MacroAssembler _masm(&cbuf);
 1567   __ brk(0);
 1568 }
 1569 
 1570 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
 1571   return MachNode::size(ra_);
 1572 }
 1573 
 1574 //=============================================================================
 1575 
 1576 #ifndef PRODUCT
 1577   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
 1578     st->print("nop \t# %d bytes pad for loops and calls", _count);
 1579   }
 1580 #endif
 1581 
 1582   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
 1583     C2_MacroAssembler _masm(&cbuf);
 1584     for (int i = 0; i < _count; i++) {
 1585       __ nop();
 1586     }
 1587   }
 1588 
 1589   uint MachNopNode::size(PhaseRegAlloc*) const {
 1590     return _count * NativeInstruction::instruction_size;
 1591   }
 1592 
 1593 //=============================================================================
 1594 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
 1595 
 1596 int ConstantTable::calculate_table_base_offset() const {
 1597   return 0;  // absolute addressing, no offset
 1598 }
 1599 
 1600 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
 1601 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
 1602   ShouldNotReachHere();
 1603 }
 1604 
 1605 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
 1606   // Empty encoding
 1607 }
 1608 
 1609 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
 1610   return 0;
 1611 }
 1612 
 1613 #ifndef PRODUCT
 1614 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
 1615   st->print("-- \t// MachConstantBaseNode (empty encoding)");
 1616 }
 1617 #endif
 1618 
 1619 #ifndef PRODUCT
 1620 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1621   Compile* C = ra_->C;
 1622 
 1623   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1624 
 1625   if (C->output()->need_stack_bang(framesize))
 1626     st->print("# stack bang size=%d\n\t", framesize);
 1627 
 1628   if (framesize < ((1 << 9) + 2 * wordSize)) {
 1629     st->print("sub  sp, sp, #%d\n\t", framesize);
 1630     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
 1631     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
 1632   } else {
 1633     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
 1634     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
 1635     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1636     st->print("sub  sp, sp, rscratch1");
 1637   }
 1638   if (C->stub_function() == NULL && BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) {
 1639     st->print("\n\t");
 1640     st->print("ldr  rscratch1, [guard]\n\t");
 1641     st->print("dmb ishld\n\t");
 1642     st->print("ldr  rscratch2, [rthread, #thread_disarmed_offset]\n\t");
 1643     st->print("cmp  rscratch1, rscratch2\n\t");
 1644     st->print("b.eq skip");
 1645     st->print("\n\t");
 1646     st->print("blr #nmethod_entry_barrier_stub\n\t");
 1647     st->print("b skip\n\t");
 1648     st->print("guard: int\n\t");
 1649     st->print("\n\t");
 1650     st->print("skip:\n\t");
 1651   }
 1652 }
 1653 #endif
 1654 
 1655 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1656   Compile* C = ra_->C;
 1657   C2_MacroAssembler _masm(&cbuf);
 1658 
 1659   // n.b. frame size includes space for return pc and rfp
 1660   const long framesize = C->output()->frame_size_in_bytes();
 1661   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
 1662 
 1663   // insert a nop at the start of the prolog so we can patch in a
 1664   // branch if we need to invalidate the method later
 1665   __ nop();
 1666 
 1667   if (C->clinit_barrier_on_entry()) {
 1668     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
 1669 
 1670     Label L_skip_barrier;
 1671 
 1672     __ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
 1673     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 1674     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 1675     __ bind(L_skip_barrier);
 1676   }
 1677 
 1678   int bangsize = C->output()->bang_size_in_bytes();
 1679   if (C->output()->need_stack_bang(bangsize) && UseStackBanging)
 1680     __ generate_stack_overflow_check(bangsize);
 1681 
 1682   __ build_frame(framesize);
 1683 
 1684   if (C->stub_function() == NULL) {
 1685     BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 1686     bs->nmethod_entry_barrier(&_masm);
 1687   }
 1688 
 1689   if (VerifyStackAtCalls) {
 1690     Unimplemented();
 1691   }
 1692 
 1693   C->output()->set_frame_complete(cbuf.insts_size());
 1694 
 1695   if (C->has_mach_constant_base_node()) {
 1696     // NOTE: We set the table base offset here because users might be
 1697     // emitted before MachConstantBaseNode.
 1698     ConstantTable& constant_table = C->output()->constant_table();
 1699     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
 1700   }
 1701 }
 1702 
 1703 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
 1704 {
 1705   return MachNode::size(ra_); // too many variables; just compute it
 1706                               // the hard way
 1707 }
 1708 
 1709 int MachPrologNode::reloc() const
 1710 {
 1711   return 0;
 1712 }
 1713 
 1714 //=============================================================================
 1715 
 1716 #ifndef PRODUCT
 1717 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1718   Compile* C = ra_->C;
 1719   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1720 
 1721   st->print("# pop frame %d\n\t",framesize);
 1722 
 1723   if (framesize == 0) {
 1724     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1725   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
 1726     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
 1727     st->print("add  sp, sp, #%d\n\t", framesize);
 1728   } else {
 1729     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1730     st->print("add  sp, sp, rscratch1\n\t");
 1731     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1732   }
 1733 
 1734   if (do_polling() && C->is_method_compilation()) {
 1735     st->print("# touch polling page\n\t");
 1736     st->print("ldr rscratch1, [rthread],#polling_page_offset\n\t");
 1737     st->print("ldr zr, [rscratch1]");
 1738   }
 1739 }
 1740 #endif
 1741 
 1742 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1743   Compile* C = ra_->C;
 1744   C2_MacroAssembler _masm(&cbuf);
 1745   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1746 
 1747   __ remove_frame(framesize);
 1748 
 1749   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
 1750     __ reserved_stack_check();
 1751   }
 1752 
 1753   if (do_polling() && C->is_method_compilation()) {
 1754     __ fetch_and_read_polling_page(rscratch1, relocInfo::poll_return_type);
 1755   }
 1756 }
 1757 
 1758 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 1759   // Variable size. Determine dynamically.
 1760   return MachNode::size(ra_);
 1761 }
 1762 
 1763 int MachEpilogNode::reloc() const {
 1764   // Return number of relocatable values contained in this instruction.
 1765   return 1; // 1 for polling page.
 1766 }
 1767 
 1768 const Pipeline * MachEpilogNode::pipeline() const {
 1769   return MachNode::pipeline_class();
 1770 }
 1771 
 1772 //=============================================================================
 1773 
 1774 // Figure out which register class each belongs in: rc_int, rc_float or
 1775 // rc_stack.
 1776 enum RC { rc_bad, rc_int, rc_float, rc_stack };
 1777 
 1778 static enum RC rc_class(OptoReg::Name reg) {
 1779 
 1780   if (reg == OptoReg::Bad) {
 1781     return rc_bad;
 1782   }
 1783 
 1784   // we have 30 int registers * 2 halves
 1785   // (rscratch1 and rscratch2 are omitted)
 1786   int slots_of_int_registers = RegisterImpl::max_slots_per_register * (RegisterImpl::number_of_registers - 2);
 1787 
 1788   if (reg < slots_of_int_registers) {
 1789     return rc_int;
 1790   }
 1791 
 1792   // we have 32 float register * 4 halves
 1793   if (reg < slots_of_int_registers + FloatRegisterImpl::max_slots_per_register * FloatRegisterImpl::number_of_registers) {
 1794     return rc_float;
 1795   }
 1796 
 1797   // Between float regs & stack is the flags regs.
 1798   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
 1799 
 1800   return rc_stack;
 1801 }
 1802 
 1803 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
 1804   Compile* C = ra_->C;
 1805 
 1806   // Get registers to move.
 1807   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
 1808   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
 1809   OptoReg::Name dst_hi = ra_->get_reg_second(this);
 1810   OptoReg::Name dst_lo = ra_->get_reg_first(this);
 1811 
 1812   enum RC src_hi_rc = rc_class(src_hi);
 1813   enum RC src_lo_rc = rc_class(src_lo);
 1814   enum RC dst_hi_rc = rc_class(dst_hi);
 1815   enum RC dst_lo_rc = rc_class(dst_lo);
 1816 
 1817   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
 1818 
 1819   if (src_hi != OptoReg::Bad) {
 1820     assert((src_lo&1)==0 && src_lo+1==src_hi &&
 1821            (dst_lo&1)==0 && dst_lo+1==dst_hi,
 1822            "expected aligned-adjacent pairs");
 1823   }
 1824 
 1825   if (src_lo == dst_lo && src_hi == dst_hi) {
 1826     return 0;            // Self copy, no move.
 1827   }
 1828 
 1829   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
 1830               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
 1831   int src_offset = ra_->reg2offset(src_lo);
 1832   int dst_offset = ra_->reg2offset(dst_lo);
 1833 
 1834   if (bottom_type()->isa_vect() != NULL) {
 1835     uint ireg = ideal_reg();
 1836     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
 1837     if (cbuf) {
 1838       C2_MacroAssembler _masm(cbuf);
 1839       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
 1840       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 1841         // stack->stack
 1842         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
 1843         if (ireg == Op_VecD) {
 1844           __ unspill(rscratch1, true, src_offset);
 1845           __ spill(rscratch1, true, dst_offset);
 1846         } else {
 1847           __ spill_copy128(src_offset, dst_offset);
 1848         }
 1849       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 1850         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1851                ireg == Op_VecD ? __ T8B : __ T16B,
 1852                as_FloatRegister(Matcher::_regEncode[src_lo]));
 1853       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 1854         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 1855                        ireg == Op_VecD ? __ D : __ Q,
 1856                        ra_->reg2offset(dst_lo));
 1857       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 1858         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1859                        ireg == Op_VecD ? __ D : __ Q,
 1860                        ra_->reg2offset(src_lo));
 1861       } else {
 1862         ShouldNotReachHere();
 1863       }
 1864     }
 1865   } else if (cbuf) {
 1866     C2_MacroAssembler _masm(cbuf);
 1867     switch (src_lo_rc) {
 1868     case rc_int:
 1869       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
 1870         if (is64) {
 1871             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
 1872                    as_Register(Matcher::_regEncode[src_lo]));
 1873         } else {
 1874             C2_MacroAssembler _masm(cbuf);
 1875             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
 1876                     as_Register(Matcher::_regEncode[src_lo]));
 1877         }
 1878       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
 1879         if (is64) {
 1880             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1881                      as_Register(Matcher::_regEncode[src_lo]));
 1882         } else {
 1883             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1884                      as_Register(Matcher::_regEncode[src_lo]));
 1885         }
 1886       } else {                    // gpr --> stack spill
 1887         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 1888         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
 1889       }
 1890       break;
 1891     case rc_float:
 1892       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
 1893         if (is64) {
 1894             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
 1895                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 1896         } else {
 1897             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
 1898                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 1899         }
 1900       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
 1901           if (cbuf) {
 1902             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1903                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 1904         } else {
 1905             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1906                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 1907         }
 1908       } else {                    // fpr --> stack spill
 1909         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 1910         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 1911                  is64 ? __ D : __ S, dst_offset);
 1912       }
 1913       break;
 1914     case rc_stack:
 1915       if (dst_lo_rc == rc_int) {  // stack --> gpr load
 1916         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
 1917       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
 1918         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1919                    is64 ? __ D : __ S, src_offset);
 1920       } else {                    // stack --> stack copy
 1921         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 1922         __ unspill(rscratch1, is64, src_offset);
 1923         __ spill(rscratch1, is64, dst_offset);
 1924       }
 1925       break;
 1926     default:
 1927       assert(false, "bad rc_class for spill");
 1928       ShouldNotReachHere();
 1929     }
 1930   }
 1931 
 1932   if (st) {
 1933     st->print("spill ");
 1934     if (src_lo_rc == rc_stack) {
 1935       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
 1936     } else {
 1937       st->print("%s -> ", Matcher::regName[src_lo]);
 1938     }
 1939     if (dst_lo_rc == rc_stack) {
 1940       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
 1941     } else {
 1942       st->print("%s", Matcher::regName[dst_lo]);
 1943     }
 1944     if (bottom_type()->isa_vect() != NULL) {
 1945       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
 1946     } else {
 1947       st->print("\t# spill size = %d", is64 ? 64:32);
 1948     }
 1949   }
 1950 
 1951   return 0;
 1952 
 1953 }
 1954 
 1955 #ifndef PRODUCT
 1956 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1957   if (!ra_)
 1958     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
 1959   else
 1960     implementation(NULL, ra_, false, st);
 1961 }
 1962 #endif
 1963 
 1964 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1965   implementation(&cbuf, ra_, false, NULL);
 1966 }
 1967 
 1968 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
 1969   return MachNode::size(ra_);
 1970 }
 1971 
 1972 //=============================================================================
 1973 
 1974 #ifndef PRODUCT
 1975 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1976   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 1977   int reg = ra_->get_reg_first(this);
 1978   st->print("add %s, rsp, #%d]\t# box lock",
 1979             Matcher::regName[reg], offset);
 1980 }
 1981 #endif
 1982 
 1983 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1984   C2_MacroAssembler _masm(&cbuf);
 1985 
 1986   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 1987   int reg    = ra_->get_encode(this);
 1988 
 1989   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
 1990     __ add(as_Register(reg), sp, offset);
 1991   } else {
 1992     ShouldNotReachHere();
 1993   }
 1994 }
 1995 
 1996 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
 1997   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
 1998   return 4;
 1999 }
 2000 
 2001 //=============================================================================
 2002 
 2003 #ifndef PRODUCT
 2004 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 2005 {
 2006   st->print_cr("# MachUEPNode");
 2007   if (UseCompressedClassPointers) {
 2008     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2009     if (CompressedKlassPointers::shift() != 0) {
 2010       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
 2011     }
 2012   } else {
 2013    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2014   }
 2015   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
 2016   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
 2017 }
 2018 #endif
 2019 
 2020 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
 2021 {
 2022   // This is the unverified entry point.
 2023   C2_MacroAssembler _masm(&cbuf);
 2024 
 2025   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
 2026   Label skip;
 2027   // TODO
 2028   // can we avoid this skip and still use a reloc?
 2029   __ br(Assembler::EQ, skip);
 2030   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 2031   __ bind(skip);
 2032 }
 2033 
 2034 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
 2035 {
 2036   return MachNode::size(ra_);
 2037 }
 2038 
 2039 // REQUIRED EMIT CODE
 2040 
 2041 //=============================================================================
 2042 
 2043 // Emit exception handler code.
 2044 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
 2045 {
 2046   // mov rscratch1 #exception_blob_entry_point
 2047   // br rscratch1
 2048   // Note that the code buffer's insts_mark is always relative to insts.
 2049   // That's why we must use the macroassembler to generate a handler.
 2050   C2_MacroAssembler _masm(&cbuf);
 2051   address base = __ start_a_stub(size_exception_handler());
 2052   if (base == NULL) {
 2053     ciEnv::current()->record_failure("CodeCache is full");
 2054     return 0;  // CodeBuffer::expand failed
 2055   }
 2056   int offset = __ offset();
 2057   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
 2058   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
 2059   __ end_a_stub();
 2060   return offset;
 2061 }
 2062 
 2063 // Emit deopt handler code.
 2064 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
 2065 {
 2066   // Note that the code buffer's insts_mark is always relative to insts.
 2067   // That's why we must use the macroassembler to generate a handler.
 2068   C2_MacroAssembler _masm(&cbuf);
 2069   address base = __ start_a_stub(size_deopt_handler());
 2070   if (base == NULL) {
 2071     ciEnv::current()->record_failure("CodeCache is full");
 2072     return 0;  // CodeBuffer::expand failed
 2073   }
 2074   int offset = __ offset();
 2075 
 2076   __ adr(lr, __ pc());
 2077   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 2078 
 2079   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
 2080   __ end_a_stub();
 2081   return offset;
 2082 }
 2083 
 2084 // REQUIRED MATCHER CODE
 2085 
 2086 //=============================================================================
 2087 
 2088 const bool Matcher::match_rule_supported(int opcode) {
 2089   if (!has_match_rule(opcode))
 2090     return false;
 2091 
 2092   bool ret_value = true;
 2093   switch (opcode) {
 2094     case Op_CacheWB:
 2095     case Op_CacheWBPreSync:
 2096     case Op_CacheWBPostSync:
 2097       if (!VM_Version::supports_data_cache_line_flush()) {
 2098         ret_value = false;
 2099       }
 2100       break;
 2101   }
 2102 
 2103   return ret_value; // Per default match rules are supported.
 2104 }
 2105 
 2106 // Identify extra cases that we might want to provide match rules for vector nodes and
 2107 // other intrinsics guarded with vector length (vlen) and element type (bt).
 2108 const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
 2109   int bit_size = vlen * type2aelembytes(bt) * 8;
 2110   if (bit_size > 128) {
 2111     return false;
 2112   }
 2113 
 2114   if (!match_rule_supported(opcode) || !vector_size_supported(bt, vlen)) {
 2115     return false;
 2116   }
 2117 
 2118   // Special cases which require vector length
 2119   switch (opcode) {
 2120     case Op_MulAddVS2VI: {
 2121       if (vlen != 4) {
 2122         return false;
 2123       }
 2124       break;
 2125     }
 2126     case Op_VectorLoadShuffle:
 2127     case Op_VectorRearrange:
 2128       if (vlen < 4) {
 2129         return false;
 2130       }
 2131       break;
 2132   }
 2133 
 2134   return true; // Per default match rules are supported.
 2135 }
 2136 
 2137 const bool Matcher::has_predicated_vectors(void) {
 2138   return false;
 2139 }
 2140 
 2141 bool Matcher::supports_vector_variable_shifts(void) {
 2142   return true;
 2143 }
 2144 
 2145 // Vector calling convention not yet implemented.
 2146 const bool Matcher::supports_vector_calling_convention(void) {
 2147   return false;
 2148 }
 2149 
 2150 void Matcher::vector_calling_convention(VMRegPair *regs, uint num_bits, uint total_args_passed) {
 2151   (void) SharedRuntime::vector_calling_convention(regs, num_bits, total_args_passed);
 2152 }
 2153 
 2154 OptoRegPair Matcher::vector_return_value(uint ideal_reg) {
 2155   Unimplemented();
 2156   return OptoRegPair(0, 0);
 2157 }
 2158 
 2159 const int Matcher::float_pressure(int default_pressure_threshold) {
 2160   return default_pressure_threshold;
 2161 }
 2162 
 2163 int Matcher::regnum_to_fpu_offset(int regnum)
 2164 {
 2165   Unimplemented();
 2166   return 0;
 2167 }
 2168 
 2169 // Is this branch offset short enough that a short branch can be used?
 2170 //
 2171 // NOTE: If the platform does not provide any short branch variants, then
 2172 //       this method should return false for offset 0.
 2173 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
 2174   // The passed offset is relative to address of the branch.
 2175 
 2176   return (-32768 <= offset && offset < 32768);
 2177 }
 2178 
 2179 const bool Matcher::isSimpleConstant64(jlong value) {
 2180   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
 2181   // Probably always true, even if a temp register is required.
 2182   return true;
 2183 }
 2184 
 2185 // true just means we have fast l2f conversion
 2186 const bool Matcher::convL2FSupported(void) {
 2187   return true;
 2188 }
 2189 
 2190 // Vector width in bytes.
 2191 const int Matcher::vector_width_in_bytes(BasicType bt) {
 2192   int size = MIN2((UseSVE > 0) ? 256 : 16, (int)MaxVectorSize);
 2193   // Minimum 2 values in vector
 2194   if (size < 2*type2aelembytes(bt)) size = 0;
 2195   // But never < 4
 2196   if (size < 4) size = 0;
 2197   return size;
 2198 }
 2199 
 2200 // Limits on vector size (number of elements) loaded into vector.
 2201 const int Matcher::max_vector_size(const BasicType bt) {
 2202   return vector_width_in_bytes(bt)/type2aelembytes(bt);
 2203 }
 2204 const int Matcher::min_vector_size(const BasicType bt) {
 2205   int max_size = max_vector_size(bt);
 2206   // Limit the vector size to 8 bytes
 2207   int size = 8 / type2aelembytes(bt);
 2208   if (bt == T_BYTE) {
 2209     // To support vector api shuffle/rearrange.
 2210     size = 4;
 2211   } else if (bt == T_BOOLEAN) {
 2212     // To support vector api load/store mask.
 2213     size = 2;
 2214   }
 2215   if (size < 2) size = 2;
 2216   return MIN2(size,max_size);
 2217 }
 2218 
 2219 // Vector ideal reg.
 2220 const uint Matcher::vector_ideal_reg(int len) {
 2221   switch(len) {
 2222     // For 16-bit/32-bit mask vector, reuse VecD.
 2223     case  2:
 2224     case  4:
 2225     case  8: return Op_VecD;
 2226     case 16: return Op_VecX;
 2227   }
 2228   ShouldNotReachHere();
 2229   return 0;
 2230 }
 2231 
 2232 // AES support not yet implemented
 2233 const bool Matcher::pass_original_key_for_aes() {
 2234   return false;
 2235 }
 2236 
 2237 // aarch64 supports misaligned vectors store/load.
 2238 const bool Matcher::misaligned_vectors_ok() {
 2239   return true;
 2240 }
 2241 
 2242 // false => size gets scaled to BytesPerLong, ok.
 2243 const bool Matcher::init_array_count_is_in_bytes = false;
 2244 
 2245 // Use conditional move (CMOVL)
 2246 const int Matcher::long_cmove_cost() {
 2247   // long cmoves are no more expensive than int cmoves
 2248   return 0;
 2249 }
 2250 
 2251 const int Matcher::float_cmove_cost() {
 2252   // float cmoves are no more expensive than int cmoves
 2253   return 0;
 2254 }
 2255 
 2256 // Does the CPU require late expand (see block.cpp for description of late expand)?
 2257 const bool Matcher::require_postalloc_expand = false;
 2258 
 2259 // Do we need to mask the count passed to shift instructions or does
 2260 // the cpu only look at the lower 5/6 bits anyway?
 2261 const bool Matcher::need_masked_shift_count = false;
 2262 
 2263 // No support for generic vector operands.
 2264 const bool Matcher::supports_generic_vector_operands  = false;
 2265 
 2266 MachOper* Matcher::pd_specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg, bool is_temp) {
 2267   ShouldNotReachHere(); // generic vector operands not supported
 2268   return NULL;
 2269 }
 2270 
 2271 bool Matcher::is_generic_reg2reg_move(MachNode* m) {
 2272   ShouldNotReachHere();  // generic vector operands not supported
 2273   return false;
 2274 }
 2275 
 2276 bool Matcher::is_generic_vector(MachOper* opnd)  {
 2277   ShouldNotReachHere();  // generic vector operands not supported
 2278   return false;
 2279 }
 2280 
 2281 // This affects two different things:
 2282 //  - how Decode nodes are matched
 2283 //  - how ImplicitNullCheck opportunities are recognized
 2284 // If true, the matcher will try to remove all Decodes and match them
 2285 // (as operands) into nodes. NullChecks are not prepared to deal with
 2286 // Decodes by final_graph_reshaping().
 2287 // If false, final_graph_reshaping() forces the decode behind the Cmp
 2288 // for a NullCheck. The matcher matches the Decode node into a register.
 2289 // Implicit_null_check optimization moves the Decode along with the
 2290 // memory operation back up before the NullCheck.
 2291 bool Matcher::narrow_oop_use_complex_address() {
 2292   return CompressedOops::shift() == 0;
 2293 }
 2294 
 2295 bool Matcher::narrow_klass_use_complex_address() {
 2296 // TODO
 2297 // decide whether we need to set this to true
 2298   return false;
 2299 }
 2300 
 2301 bool Matcher::const_oop_prefer_decode() {
 2302   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
 2303   return CompressedOops::base() == NULL;
 2304 }
 2305 
 2306 bool Matcher::const_klass_prefer_decode() {
 2307   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
 2308   return CompressedKlassPointers::base() == NULL;
 2309 }
 2310 
 2311 // Is it better to copy float constants, or load them directly from
 2312 // memory?  Intel can load a float constant from a direct address,
 2313 // requiring no extra registers.  Most RISCs will have to materialize
 2314 // an address into a register first, so they would do better to copy
 2315 // the constant from stack.
 2316 const bool Matcher::rematerialize_float_constants = false;
 2317 
 2318 // If CPU can load and store mis-aligned doubles directly then no
 2319 // fixup is needed.  Else we split the double into 2 integer pieces
 2320 // and move it piece-by-piece.  Only happens when passing doubles into
 2321 // C code as the Java calling convention forces doubles to be aligned.
 2322 const bool Matcher::misaligned_doubles_ok = true;
 2323 
 2324 // No-op on amd64
 2325 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
 2326   Unimplemented();
 2327 }
 2328 
 2329 // Advertise here if the CPU requires explicit rounding operations to implement strictfp mode.
 2330 const bool Matcher::strict_fp_requires_explicit_rounding = false;
 2331 
 2332 // Are floats converted to double when stored to stack during
 2333 // deoptimization?
 2334 bool Matcher::float_in_double() { return false; }
 2335 
 2336 // Do ints take an entire long register or just half?
 2337 // The relevant question is how the int is callee-saved:
 2338 // the whole long is written but de-opt'ing will have to extract
 2339 // the relevant 32 bits.
 2340 const bool Matcher::int_in_long = true;
 2341 
 2342 // Return whether or not this register is ever used as an argument.
 2343 // This function is used on startup to build the trampoline stubs in
 2344 // generateOptoStub.  Registers not mentioned will be killed by the VM
 2345 // call in the trampoline, and arguments in those registers not be
 2346 // available to the callee.
 2347 bool Matcher::can_be_java_arg(int reg)
 2348 {
 2349   return
 2350     reg ==  R0_num || reg == R0_H_num ||
 2351     reg ==  R1_num || reg == R1_H_num ||
 2352     reg ==  R2_num || reg == R2_H_num ||
 2353     reg ==  R3_num || reg == R3_H_num ||
 2354     reg ==  R4_num || reg == R4_H_num ||
 2355     reg ==  R5_num || reg == R5_H_num ||
 2356     reg ==  R6_num || reg == R6_H_num ||
 2357     reg ==  R7_num || reg == R7_H_num ||
 2358     reg ==  V0_num || reg == V0_H_num ||
 2359     reg ==  V1_num || reg == V1_H_num ||
 2360     reg ==  V2_num || reg == V2_H_num ||
 2361     reg ==  V3_num || reg == V3_H_num ||
 2362     reg ==  V4_num || reg == V4_H_num ||
 2363     reg ==  V5_num || reg == V5_H_num ||
 2364     reg ==  V6_num || reg == V6_H_num ||
 2365     reg ==  V7_num || reg == V7_H_num;
 2366 }
 2367 
 2368 bool Matcher::is_spillable_arg(int reg)
 2369 {
 2370   return can_be_java_arg(reg);
 2371 }
 2372 
 2373 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
 2374   return false;
 2375 }
 2376 
 2377 RegMask Matcher::divI_proj_mask() {
 2378   ShouldNotReachHere();
 2379   return RegMask();
 2380 }
 2381 
 2382 // Register for MODI projection of divmodI.
 2383 RegMask Matcher::modI_proj_mask() {
 2384   ShouldNotReachHere();
 2385   return RegMask();
 2386 }
 2387 
 2388 // Register for DIVL projection of divmodL.
 2389 RegMask Matcher::divL_proj_mask() {
 2390   ShouldNotReachHere();
 2391   return RegMask();
 2392 }
 2393 
 2394 // Register for MODL projection of divmodL.
 2395 RegMask Matcher::modL_proj_mask() {
 2396   ShouldNotReachHere();
 2397   return RegMask();
 2398 }
 2399 
 2400 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
 2401   return FP_REG_mask();
 2402 }
 2403 
 2404 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
 2405   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
 2406     Node* u = addp->fast_out(i);
 2407     if (u->is_Mem()) {
 2408       int opsize = u->as_Mem()->memory_size();
 2409       assert(opsize > 0, "unexpected memory operand size");
 2410       if (u->as_Mem()->memory_size() != (1<<shift)) {
 2411         return false;
 2412       }
 2413     }
 2414   }
 2415   return true;
 2416 }
 2417 
 2418 const bool Matcher::convi2l_type_required = false;
 2419 
 2420 // Should the matcher clone input 'm' of node 'n'?
 2421 bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
 2422   if (is_vshift_con_pattern(n, m)) { // ShiftV src (ShiftCntV con)
 2423     mstack.push(m, Visit);           // m = ShiftCntV
 2424     return true;
 2425   }
 2426   return false;
 2427 }
 2428 
 2429 // Should the Matcher clone shifts on addressing modes, expecting them
 2430 // to be subsumed into complex addressing expressions or compute them
 2431 // into registers?
 2432 bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
 2433   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
 2434     return true;
 2435   }
 2436 
 2437   Node *off = m->in(AddPNode::Offset);
 2438   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
 2439       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
 2440       // Are there other uses besides address expressions?
 2441       !is_visited(off)) {
 2442     address_visited.set(off->_idx); // Flag as address_visited
 2443     mstack.push(off->in(2), Visit);
 2444     Node *conv = off->in(1);
 2445     if (conv->Opcode() == Op_ConvI2L &&
 2446         // Are there other uses besides address expressions?
 2447         !is_visited(conv)) {
 2448       address_visited.set(conv->_idx); // Flag as address_visited
 2449       mstack.push(conv->in(1), Pre_Visit);
 2450     } else {
 2451       mstack.push(conv, Pre_Visit);
 2452     }
 2453     address_visited.test_set(m->_idx); // Flag as address_visited
 2454     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2455     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2456     return true;
 2457   } else if (off->Opcode() == Op_ConvI2L &&
 2458              // Are there other uses besides address expressions?
 2459              !is_visited(off)) {
 2460     address_visited.test_set(m->_idx); // Flag as address_visited
 2461     address_visited.set(off->_idx); // Flag as address_visited
 2462     mstack.push(off->in(1), Pre_Visit);
 2463     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2464     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2465     return true;
 2466   }
 2467   return false;
 2468 }
 2469 
 2470 void Compile::reshape_address(AddPNode* addp) {
 2471 }
 2472 
 2473 
 2474 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
 2475   C2_MacroAssembler _masm(&cbuf);                                       \
 2476   {                                                                     \
 2477     guarantee(INDEX == -1, "mode not permitted for volatile");          \
 2478     guarantee(DISP == 0, "mode not permitted for volatile");            \
 2479     guarantee(SCALE == 0, "mode not permitted for volatile");           \
 2480     __ INSN(REG, as_Register(BASE));                                    \
 2481   }
 2482 
 2483 
 2484 static Address mem2address(int opcode, Register base, int index, int size, int disp)
 2485   {
 2486     Address::extend scale;
 2487 
 2488     // Hooboy, this is fugly.  We need a way to communicate to the
 2489     // encoder that the index needs to be sign extended, so we have to
 2490     // enumerate all the cases.
 2491     switch (opcode) {
 2492     case INDINDEXSCALEDI2L:
 2493     case INDINDEXSCALEDI2LN:
 2494     case INDINDEXI2L:
 2495     case INDINDEXI2LN:
 2496       scale = Address::sxtw(size);
 2497       break;
 2498     default:
 2499       scale = Address::lsl(size);
 2500     }
 2501 
 2502     if (index == -1) {
 2503       return Address(base, disp);
 2504     } else {
 2505       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2506       return Address(base, as_Register(index), scale);
 2507     }
 2508   }
 2509 
 2510 
 2511 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
 2512 typedef void (MacroAssembler::* mem_insn2)(Register Rt, Register adr);
 2513 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
 2514 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
 2515                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
 2516 
 2517   // Used for all non-volatile memory accesses.  The use of
 2518   // $mem->opcode() to discover whether this pattern uses sign-extended
 2519   // offsets is something of a kludge.
 2520   static void loadStore(C2_MacroAssembler masm, mem_insn insn,
 2521                         Register reg, int opcode,
 2522                         Register base, int index, int scale, int disp,
 2523                         int size_in_memory)
 2524   {
 2525     Address addr = mem2address(opcode, base, index, scale, disp);
 2526     if (addr.getMode() == Address::base_plus_offset) {
 2527       /* If we get an out-of-range offset it is a bug in the compiler,
 2528          so we assert here. */
 2529       assert(Address::offset_ok_for_immed(addr.offset(), exact_log2(size_in_memory)),
 2530              "c2 compiler bug");
 2531       /* Fix up any out-of-range offsets. */
 2532       assert_different_registers(rscratch1, base);
 2533       assert_different_registers(rscratch1, reg);
 2534       addr = masm.legitimize_address(addr, size_in_memory, rscratch1);
 2535     }
 2536     (masm.*insn)(reg, addr);
 2537   }
 2538 
 2539   static void loadStore(C2_MacroAssembler masm, mem_float_insn insn,
 2540                         FloatRegister reg, int opcode,
 2541                         Register base, int index, int size, int disp,
 2542                         int size_in_memory)
 2543   {
 2544     Address::extend scale;
 2545 
 2546     switch (opcode) {
 2547     case INDINDEXSCALEDI2L:
 2548     case INDINDEXSCALEDI2LN:
 2549       scale = Address::sxtw(size);
 2550       break;
 2551     default:
 2552       scale = Address::lsl(size);
 2553     }
 2554 
 2555     if (index == -1) {
 2556       /* If we get an out-of-range offset it is a bug in the compiler,
 2557          so we assert here. */
 2558       assert(Address::offset_ok_for_immed(disp, exact_log2(size_in_memory)), "c2 compiler bug");
 2559       /* Fix up any out-of-range offsets. */
 2560       assert_different_registers(rscratch1, base);
 2561       Address addr = Address(base, disp);
 2562       addr = masm.legitimize_address(addr, size_in_memory, rscratch1);
 2563       (masm.*insn)(reg, addr);
 2564     } else {
 2565       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2566       (masm.*insn)(reg, Address(base, as_Register(index), scale));
 2567     }
 2568   }
 2569 
 2570   static void loadStore(C2_MacroAssembler masm, mem_vector_insn insn,
 2571                         FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
 2572                         int opcode, Register base, int index, int size, int disp)
 2573   {
 2574     if (index == -1) {
 2575       (masm.*insn)(reg, T, Address(base, disp));
 2576     } else {
 2577       assert(disp == 0, "unsupported address mode");
 2578       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
 2579     }
 2580   }
 2581 
 2582 %}
 2583 
 2584 
 2585 
 2586 //----------ENCODING BLOCK-----------------------------------------------------
 2587 // This block specifies the encoding classes used by the compiler to
 2588 // output byte streams.  Encoding classes are parameterized macros
 2589 // used by Machine Instruction Nodes in order to generate the bit
 2590 // encoding of the instruction.  Operands specify their base encoding
 2591 // interface with the interface keyword.  There are currently
 2592 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
 2593 // COND_INTER.  REG_INTER causes an operand to generate a function
 2594 // which returns its register number when queried.  CONST_INTER causes
 2595 // an operand to generate a function which returns the value of the
 2596 // constant when queried.  MEMORY_INTER causes an operand to generate
 2597 // four functions which return the Base Register, the Index Register,
 2598 // the Scale Value, and the Offset Value of the operand when queried.
 2599 // COND_INTER causes an operand to generate six functions which return
 2600 // the encoding code (ie - encoding bits for the instruction)
 2601 // associated with each basic boolean condition for a conditional
 2602 // instruction.
 2603 //
 2604 // Instructions specify two basic values for encoding.  Again, a
 2605 // function is available to check if the constant displacement is an
 2606 // oop. They use the ins_encode keyword to specify their encoding
 2607 // classes (which must be a sequence of enc_class names, and their
 2608 // parameters, specified in the encoding block), and they use the
 2609 // opcode keyword to specify, in order, their primary, secondary, and
 2610 // tertiary opcode.  Only the opcode sections which a particular
 2611 // instruction needs for encoding need to be specified.
 2612 encode %{
 2613   // Build emit functions for each basic byte or larger field in the
 2614   // intel encoding scheme (opcode, rm, sib, immediate), and call them
 2615   // from C++ code in the enc_class source block.  Emit functions will
 2616   // live in the main source block for now.  In future, we can
 2617   // generalize this by adding a syntax that specifies the sizes of
 2618   // fields in an order, so that the adlc can build the emit functions
 2619   // automagically
 2620 
 2621   // catch all for unimplemented encodings
 2622   enc_class enc_unimplemented %{
 2623     C2_MacroAssembler _masm(&cbuf);
 2624     __ unimplemented("C2 catch all");
 2625   %}
 2626 
 2627   // BEGIN Non-volatile memory access
 2628 
 2629   // This encoding class is generated automatically from ad_encode.m4.
 2630   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2631   enc_class aarch64_enc_ldrsbw(iRegI dst, memory1 mem) %{
 2632     Register dst_reg = as_Register($dst$$reg);
 2633     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
 2634                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2635   %}
 2636 
 2637   // This encoding class is generated automatically from ad_encode.m4.
 2638   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2639   enc_class aarch64_enc_ldrsb(iRegI dst, memory1 mem) %{
 2640     Register dst_reg = as_Register($dst$$reg);
 2641     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
 2642                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2643   %}
 2644 
 2645   // This encoding class is generated automatically from ad_encode.m4.
 2646   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2647   enc_class aarch64_enc_ldrb(iRegI dst, memory1 mem) %{
 2648     Register dst_reg = as_Register($dst$$reg);
 2649     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2650                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2651   %}
 2652 
 2653   // This encoding class is generated automatically from ad_encode.m4.
 2654   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2655   enc_class aarch64_enc_ldrb(iRegL dst, memory1 mem) %{
 2656     Register dst_reg = as_Register($dst$$reg);
 2657     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2658                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2659   %}
 2660 
 2661   // This encoding class is generated automatically from ad_encode.m4.
 2662   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2663   enc_class aarch64_enc_ldrshw(iRegI dst, memory2 mem) %{
 2664     Register dst_reg = as_Register($dst$$reg);
 2665     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
 2666                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2667   %}
 2668 
 2669   // This encoding class is generated automatically from ad_encode.m4.
 2670   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2671   enc_class aarch64_enc_ldrsh(iRegI dst, memory2 mem) %{
 2672     Register dst_reg = as_Register($dst$$reg);
 2673     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
 2674                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2675   %}
 2676 
 2677   // This encoding class is generated automatically from ad_encode.m4.
 2678   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2679   enc_class aarch64_enc_ldrh(iRegI dst, memory2 mem) %{
 2680     Register dst_reg = as_Register($dst$$reg);
 2681     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2682                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2683   %}
 2684 
 2685   // This encoding class is generated automatically from ad_encode.m4.
 2686   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2687   enc_class aarch64_enc_ldrh(iRegL dst, memory2 mem) %{
 2688     Register dst_reg = as_Register($dst$$reg);
 2689     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2690                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2691   %}
 2692 
 2693   // This encoding class is generated automatically from ad_encode.m4.
 2694   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2695   enc_class aarch64_enc_ldrw(iRegI dst, memory4 mem) %{
 2696     Register dst_reg = as_Register($dst$$reg);
 2697     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2698                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2699   %}
 2700 
 2701   // This encoding class is generated automatically from ad_encode.m4.
 2702   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2703   enc_class aarch64_enc_ldrw(iRegL dst, memory4 mem) %{
 2704     Register dst_reg = as_Register($dst$$reg);
 2705     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2706                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2707   %}
 2708 
 2709   // This encoding class is generated automatically from ad_encode.m4.
 2710   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2711   enc_class aarch64_enc_ldrsw(iRegL dst, memory4 mem) %{
 2712     Register dst_reg = as_Register($dst$$reg);
 2713     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
 2714                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2715   %}
 2716 
 2717   // This encoding class is generated automatically from ad_encode.m4.
 2718   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2719   enc_class aarch64_enc_ldr(iRegL dst, memory8 mem) %{
 2720     Register dst_reg = as_Register($dst$$reg);
 2721     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
 2722                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 2723   %}
 2724 
 2725   // This encoding class is generated automatically from ad_encode.m4.
 2726   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2727   enc_class aarch64_enc_ldrs(vRegF dst, memory4 mem) %{
 2728     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 2729     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
 2730                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2731   %}
 2732 
 2733   // This encoding class is generated automatically from ad_encode.m4.
 2734   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2735   enc_class aarch64_enc_ldrd(vRegD dst, memory8 mem) %{
 2736     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 2737     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
 2738                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 2739   %}
 2740 
 2741   // This encoding class is generated automatically from ad_encode.m4.
 2742   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2743   enc_class aarch64_enc_strb(iRegI src, memory1 mem) %{
 2744     Register src_reg = as_Register($src$$reg);
 2745     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
 2746                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2747   %}
 2748 
 2749   // This encoding class is generated automatically from ad_encode.m4.
 2750   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2751   enc_class aarch64_enc_strb0(memory1 mem) %{
 2752     C2_MacroAssembler _masm(&cbuf);
 2753     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
 2754                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2755   %}
 2756 
 2757   // This encoding class is generated automatically from ad_encode.m4.
 2758   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2759   enc_class aarch64_enc_strh(iRegI src, memory2 mem) %{
 2760     Register src_reg = as_Register($src$$reg);
 2761     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
 2762                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2763   %}
 2764 
 2765   // This encoding class is generated automatically from ad_encode.m4.
 2766   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2767   enc_class aarch64_enc_strh0(memory2 mem) %{
 2768     C2_MacroAssembler _masm(&cbuf);
 2769     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
 2770                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2771   %}
 2772 
 2773   // This encoding class is generated automatically from ad_encode.m4.
 2774   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2775   enc_class aarch64_enc_strw(iRegI src, memory4 mem) %{
 2776     Register src_reg = as_Register($src$$reg);
 2777     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
 2778                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2779   %}
 2780 
 2781   // This encoding class is generated automatically from ad_encode.m4.
 2782   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2783   enc_class aarch64_enc_strw0(memory4 mem) %{
 2784     C2_MacroAssembler _masm(&cbuf);
 2785     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
 2786                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2787   %}
 2788 
 2789   // This encoding class is generated automatically from ad_encode.m4.
 2790   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2791   enc_class aarch64_enc_str(iRegL src, memory8 mem) %{
 2792     Register src_reg = as_Register($src$$reg);
 2793     // we sometimes get asked to store the stack pointer into the
 2794     // current thread -- we cannot do that directly on AArch64
 2795     if (src_reg == r31_sp) {
 2796       C2_MacroAssembler _masm(&cbuf);
 2797       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 2798       __ mov(rscratch2, sp);
 2799       src_reg = rscratch2;
 2800     }
 2801     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
 2802                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 2803   %}
 2804 
 2805   // This encoding class is generated automatically from ad_encode.m4.
 2806   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2807   enc_class aarch64_enc_str0(memory8 mem) %{
 2808     C2_MacroAssembler _masm(&cbuf);
 2809     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
 2810                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 2811   %}
 2812 
 2813   // This encoding class is generated automatically from ad_encode.m4.
 2814   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2815   enc_class aarch64_enc_strs(vRegF src, memory4 mem) %{
 2816     FloatRegister src_reg = as_FloatRegister($src$$reg);
 2817     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
 2818                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2819   %}
 2820 
 2821   // This encoding class is generated automatically from ad_encode.m4.
 2822   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2823   enc_class aarch64_enc_strd(vRegD src, memory8 mem) %{
 2824     FloatRegister src_reg = as_FloatRegister($src$$reg);
 2825     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
 2826                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 2827   %}
 2828 
 2829   // This encoding class is generated automatically from ad_encode.m4.
 2830   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2831   enc_class aarch64_enc_strw_immn(immN src, memory1 mem) %{
 2832     C2_MacroAssembler _masm(&cbuf);
 2833     address con = (address)$src$$constant;
 2834     // need to do this the hard way until we can manage relocs
 2835     // for 32 bit constants
 2836     __ movoop(rscratch2, (jobject)con);
 2837     if (con) __ encode_heap_oop_not_null(rscratch2);
 2838     loadStore(_masm, &MacroAssembler::strw, rscratch2, $mem->opcode(),
 2839                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2840   %}
 2841 
 2842   // This encoding class is generated automatically from ad_encode.m4.
 2843   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2844   enc_class aarch64_enc_strw_immnk(immN src, memory4 mem) %{
 2845     C2_MacroAssembler _masm(&cbuf);
 2846     address con = (address)$src$$constant;
 2847     // need to do this the hard way until we can manage relocs
 2848     // for 32 bit constants
 2849     __ movoop(rscratch2, (jobject)con);
 2850     __ encode_klass_not_null(rscratch2);
 2851     loadStore(_masm, &MacroAssembler::strw, rscratch2, $mem->opcode(),
 2852                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2853   %}
 2854 
 2855   // This encoding class is generated automatically from ad_encode.m4.
 2856   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2857   enc_class aarch64_enc_strb0_ordered(memory4 mem) %{
 2858       C2_MacroAssembler _masm(&cbuf);
 2859       __ membar(Assembler::StoreStore);
 2860       loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
 2861                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2862   %}
 2863 
 2864   // END Non-volatile memory access
 2865 
 2866   // Vector loads and stores
 2867   enc_class aarch64_enc_ldrvH(vecD dst, memory mem) %{
 2868     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 2869     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::H,
 2870        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 2871   %}
 2872 
 2873   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
 2874     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 2875     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
 2876        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 2877   %}
 2878 
 2879   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
 2880     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 2881     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
 2882        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 2883   %}
 2884 
 2885   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
 2886     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 2887     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
 2888        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 2889   %}
 2890 
 2891   enc_class aarch64_enc_strvH(vecD src, memory mem) %{
 2892     FloatRegister src_reg = as_FloatRegister($src$$reg);
 2893     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::H,
 2894        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 2895   %}
 2896 
 2897   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
 2898     FloatRegister src_reg = as_FloatRegister($src$$reg);
 2899     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
 2900        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 2901   %}
 2902 
 2903   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
 2904     FloatRegister src_reg = as_FloatRegister($src$$reg);
 2905     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
 2906        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 2907   %}
 2908 
 2909   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
 2910     FloatRegister src_reg = as_FloatRegister($src$$reg);
 2911     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
 2912        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 2913   %}
 2914 
 2915   // volatile loads and stores
 2916 
 2917   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
 2918     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 2919                  rscratch1, stlrb);
 2920   %}
 2921 
 2922   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
 2923     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 2924                  rscratch1, stlrh);
 2925   %}
 2926 
 2927   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
 2928     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 2929                  rscratch1, stlrw);
 2930   %}
 2931 
 2932 
 2933   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
 2934     Register dst_reg = as_Register($dst$$reg);
 2935     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 2936              rscratch1, ldarb);
 2937     __ sxtbw(dst_reg, dst_reg);
 2938   %}
 2939 
 2940   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
 2941     Register dst_reg = as_Register($dst$$reg);
 2942     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 2943              rscratch1, ldarb);
 2944     __ sxtb(dst_reg, dst_reg);
 2945   %}
 2946 
 2947   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
 2948     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 2949              rscratch1, ldarb);
 2950   %}
 2951 
 2952   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
 2953     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 2954              rscratch1, ldarb);
 2955   %}
 2956 
 2957   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
 2958     Register dst_reg = as_Register($dst$$reg);
 2959     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 2960              rscratch1, ldarh);
 2961     __ sxthw(dst_reg, dst_reg);
 2962   %}
 2963 
 2964   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
 2965     Register dst_reg = as_Register($dst$$reg);
 2966     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 2967              rscratch1, ldarh);
 2968     __ sxth(dst_reg, dst_reg);
 2969   %}
 2970 
 2971   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
 2972     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 2973              rscratch1, ldarh);
 2974   %}
 2975 
 2976   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
 2977     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 2978              rscratch1, ldarh);
 2979   %}
 2980 
 2981   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
 2982     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 2983              rscratch1, ldarw);
 2984   %}
 2985 
 2986   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
 2987     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 2988              rscratch1, ldarw);
 2989   %}
 2990 
 2991   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
 2992     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 2993              rscratch1, ldar);
 2994   %}
 2995 
 2996   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
 2997     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 2998              rscratch1, ldarw);
 2999     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
 3000   %}
 3001 
 3002   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
 3003     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3004              rscratch1, ldar);
 3005     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
 3006   %}
 3007 
 3008   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
 3009     Register src_reg = as_Register($src$$reg);
 3010     // we sometimes get asked to store the stack pointer into the
 3011     // current thread -- we cannot do that directly on AArch64
 3012     if (src_reg == r31_sp) {
 3013       C2_MacroAssembler _masm(&cbuf);
 3014       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 3015       __ mov(rscratch2, sp);
 3016       src_reg = rscratch2;
 3017     }
 3018     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3019                  rscratch1, stlr);
 3020   %}
 3021 
 3022   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
 3023     {
 3024       C2_MacroAssembler _masm(&cbuf);
 3025       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3026       __ fmovs(rscratch2, src_reg);
 3027     }
 3028     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3029                  rscratch1, stlrw);
 3030   %}
 3031 
 3032   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
 3033     {
 3034       C2_MacroAssembler _masm(&cbuf);
 3035       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3036       __ fmovd(rscratch2, src_reg);
 3037     }
 3038     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3039                  rscratch1, stlr);
 3040   %}
 3041 
 3042   // synchronized read/update encodings
 3043 
 3044   enc_class aarch64_enc_ldaxr(iRegL dst, memory8 mem) %{
 3045     C2_MacroAssembler _masm(&cbuf);
 3046     Register dst_reg = as_Register($dst$$reg);
 3047     Register base = as_Register($mem$$base);
 3048     int index = $mem$$index;
 3049     int scale = $mem$$scale;
 3050     int disp = $mem$$disp;
 3051     if (index == -1) {
 3052        if (disp != 0) {
 3053         __ lea(rscratch1, Address(base, disp));
 3054         __ ldaxr(dst_reg, rscratch1);
 3055       } else {
 3056         // TODO
 3057         // should we ever get anything other than this case?
 3058         __ ldaxr(dst_reg, base);
 3059       }
 3060     } else {
 3061       Register index_reg = as_Register(index);
 3062       if (disp == 0) {
 3063         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
 3064         __ ldaxr(dst_reg, rscratch1);
 3065       } else {
 3066         __ lea(rscratch1, Address(base, disp));
 3067         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
 3068         __ ldaxr(dst_reg, rscratch1);
 3069       }
 3070     }
 3071   %}
 3072 
 3073   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory8 mem) %{
 3074     C2_MacroAssembler _masm(&cbuf);
 3075     Register src_reg = as_Register($src$$reg);
 3076     Register base = as_Register($mem$$base);
 3077     int index = $mem$$index;
 3078     int scale = $mem$$scale;
 3079     int disp = $mem$$disp;
 3080     if (index == -1) {
 3081        if (disp != 0) {
 3082         __ lea(rscratch2, Address(base, disp));
 3083         __ stlxr(rscratch1, src_reg, rscratch2);
 3084       } else {
 3085         // TODO
 3086         // should we ever get anything other than this case?
 3087         __ stlxr(rscratch1, src_reg, base);
 3088       }
 3089     } else {
 3090       Register index_reg = as_Register(index);
 3091       if (disp == 0) {
 3092         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
 3093         __ stlxr(rscratch1, src_reg, rscratch2);
 3094       } else {
 3095         __ lea(rscratch2, Address(base, disp));
 3096         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
 3097         __ stlxr(rscratch1, src_reg, rscratch2);
 3098       }
 3099     }
 3100     __ cmpw(rscratch1, zr);
 3101   %}
 3102 
 3103   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 3104     C2_MacroAssembler _masm(&cbuf);
 3105     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3106     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3107                Assembler::xword, /*acquire*/ false, /*release*/ true,
 3108                /*weak*/ false, noreg);
 3109   %}
 3110 
 3111   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3112     C2_MacroAssembler _masm(&cbuf);
 3113     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3114     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3115                Assembler::word, /*acquire*/ false, /*release*/ true,
 3116                /*weak*/ false, noreg);
 3117   %}
 3118 
 3119   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3120     C2_MacroAssembler _masm(&cbuf);
 3121     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3122     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3123                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 3124                /*weak*/ false, noreg);
 3125   %}
 3126 
 3127   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3128     C2_MacroAssembler _masm(&cbuf);
 3129     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3130     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3131                Assembler::byte, /*acquire*/ false, /*release*/ true,
 3132                /*weak*/ false, noreg);
 3133   %}
 3134 
 3135 
 3136   // The only difference between aarch64_enc_cmpxchg and
 3137   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
 3138   // CompareAndSwap sequence to serve as a barrier on acquiring a
 3139   // lock.
 3140   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 3141     C2_MacroAssembler _masm(&cbuf);
 3142     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3143     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3144                Assembler::xword, /*acquire*/ true, /*release*/ true,
 3145                /*weak*/ false, noreg);
 3146   %}
 3147 
 3148   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3149     C2_MacroAssembler _masm(&cbuf);
 3150     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3151     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3152                Assembler::word, /*acquire*/ true, /*release*/ true,
 3153                /*weak*/ false, noreg);
 3154   %}
 3155 
 3156   enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3157     C2_MacroAssembler _masm(&cbuf);
 3158     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3159     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3160                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 3161                /*weak*/ false, noreg);
 3162   %}
 3163 
 3164   enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3165     C2_MacroAssembler _masm(&cbuf);
 3166     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3167     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3168                Assembler::byte, /*acquire*/ true, /*release*/ true,
 3169                /*weak*/ false, noreg);
 3170   %}
 3171 
 3172   // auxiliary used for CompareAndSwapX to set result register
 3173   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
 3174     C2_MacroAssembler _masm(&cbuf);
 3175     Register res_reg = as_Register($res$$reg);
 3176     __ cset(res_reg, Assembler::EQ);
 3177   %}
 3178 
 3179   // prefetch encodings
 3180 
 3181   enc_class aarch64_enc_prefetchw(memory mem) %{
 3182     C2_MacroAssembler _masm(&cbuf);
 3183     Register base = as_Register($mem$$base);
 3184     int index = $mem$$index;
 3185     int scale = $mem$$scale;
 3186     int disp = $mem$$disp;
 3187     if (index == -1) {
 3188       __ prfm(Address(base, disp), PSTL1KEEP);
 3189     } else {
 3190       Register index_reg = as_Register(index);
 3191       if (disp == 0) {
 3192         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3193       } else {
 3194         __ lea(rscratch1, Address(base, disp));
 3195 	__ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3196       }
 3197     }
 3198   %}
 3199 
 3200   /// mov envcodings
 3201 
 3202   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
 3203     C2_MacroAssembler _masm(&cbuf);
 3204     u_int32_t con = (u_int32_t)$src$$constant;
 3205     Register dst_reg = as_Register($dst$$reg);
 3206     if (con == 0) {
 3207       __ movw(dst_reg, zr);
 3208     } else {
 3209       __ movw(dst_reg, con);
 3210     }
 3211   %}
 3212 
 3213   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
 3214     C2_MacroAssembler _masm(&cbuf);
 3215     Register dst_reg = as_Register($dst$$reg);
 3216     u_int64_t con = (u_int64_t)$src$$constant;
 3217     if (con == 0) {
 3218       __ mov(dst_reg, zr);
 3219     } else {
 3220       __ mov(dst_reg, con);
 3221     }
 3222   %}
 3223 
 3224   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
 3225     C2_MacroAssembler _masm(&cbuf);
 3226     Register dst_reg = as_Register($dst$$reg);
 3227     address con = (address)$src$$constant;
 3228     if (con == NULL || con == (address)1) {
 3229       ShouldNotReachHere();
 3230     } else {
 3231       relocInfo::relocType rtype = $src->constant_reloc();
 3232       if (rtype == relocInfo::oop_type) {
 3233         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
 3234       } else if (rtype == relocInfo::metadata_type) {
 3235         __ mov_metadata(dst_reg, (Metadata*)con);
 3236       } else {
 3237         assert(rtype == relocInfo::none, "unexpected reloc type");
 3238         if (con < (address)(uintptr_t)os::vm_page_size()) {
 3239           __ mov(dst_reg, con);
 3240         } else {
 3241           unsigned long offset;
 3242           __ adrp(dst_reg, con, offset);
 3243           __ add(dst_reg, dst_reg, offset);
 3244         }
 3245       }
 3246     }
 3247   %}
 3248 
 3249   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
 3250     C2_MacroAssembler _masm(&cbuf);
 3251     Register dst_reg = as_Register($dst$$reg);
 3252     __ mov(dst_reg, zr);
 3253   %}
 3254 
 3255   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
 3256     C2_MacroAssembler _masm(&cbuf);
 3257     Register dst_reg = as_Register($dst$$reg);
 3258     __ mov(dst_reg, (u_int64_t)1);
 3259   %}
 3260 
 3261   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
 3262     C2_MacroAssembler _masm(&cbuf);
 3263     __ load_byte_map_base($dst$$Register);
 3264   %}
 3265 
 3266   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
 3267     C2_MacroAssembler _masm(&cbuf);
 3268     Register dst_reg = as_Register($dst$$reg);
 3269     address con = (address)$src$$constant;
 3270     if (con == NULL) {
 3271       ShouldNotReachHere();
 3272     } else {
 3273       relocInfo::relocType rtype = $src->constant_reloc();
 3274       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
 3275       __ set_narrow_oop(dst_reg, (jobject)con);
 3276     }
 3277   %}
 3278 
 3279   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
 3280     C2_MacroAssembler _masm(&cbuf);
 3281     Register dst_reg = as_Register($dst$$reg);
 3282     __ mov(dst_reg, zr);
 3283   %}
 3284 
 3285   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
 3286     C2_MacroAssembler _masm(&cbuf);
 3287     Register dst_reg = as_Register($dst$$reg);
 3288     address con = (address)$src$$constant;
 3289     if (con == NULL) {
 3290       ShouldNotReachHere();
 3291     } else {
 3292       relocInfo::relocType rtype = $src->constant_reloc();
 3293       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
 3294       __ set_narrow_klass(dst_reg, (Klass *)con);
 3295     }
 3296   %}
 3297 
 3298   // arithmetic encodings
 3299 
 3300   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
 3301     C2_MacroAssembler _masm(&cbuf);
 3302     Register dst_reg = as_Register($dst$$reg);
 3303     Register src_reg = as_Register($src1$$reg);
 3304     int32_t con = (int32_t)$src2$$constant;
 3305     // add has primary == 0, subtract has primary == 1
 3306     if ($primary) { con = -con; }
 3307     if (con < 0) {
 3308       __ subw(dst_reg, src_reg, -con);
 3309     } else {
 3310       __ addw(dst_reg, src_reg, con);
 3311     }
 3312   %}
 3313 
 3314   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
 3315     C2_MacroAssembler _masm(&cbuf);
 3316     Register dst_reg = as_Register($dst$$reg);
 3317     Register src_reg = as_Register($src1$$reg);
 3318     int32_t con = (int32_t)$src2$$constant;
 3319     // add has primary == 0, subtract has primary == 1
 3320     if ($primary) { con = -con; }
 3321     if (con < 0) {
 3322       __ sub(dst_reg, src_reg, -con);
 3323     } else {
 3324       __ add(dst_reg, src_reg, con);
 3325     }
 3326   %}
 3327 
 3328   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
 3329     C2_MacroAssembler _masm(&cbuf);
 3330    Register dst_reg = as_Register($dst$$reg);
 3331    Register src1_reg = as_Register($src1$$reg);
 3332    Register src2_reg = as_Register($src2$$reg);
 3333     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3334   %}
 3335 
 3336   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
 3337     C2_MacroAssembler _masm(&cbuf);
 3338    Register dst_reg = as_Register($dst$$reg);
 3339    Register src1_reg = as_Register($src1$$reg);
 3340    Register src2_reg = as_Register($src2$$reg);
 3341     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3342   %}
 3343 
 3344   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
 3345     C2_MacroAssembler _masm(&cbuf);
 3346    Register dst_reg = as_Register($dst$$reg);
 3347    Register src1_reg = as_Register($src1$$reg);
 3348    Register src2_reg = as_Register($src2$$reg);
 3349     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3350   %}
 3351 
 3352   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
 3353     C2_MacroAssembler _masm(&cbuf);
 3354    Register dst_reg = as_Register($dst$$reg);
 3355    Register src1_reg = as_Register($src1$$reg);
 3356    Register src2_reg = as_Register($src2$$reg);
 3357     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3358   %}
 3359 
 3360   // compare instruction encodings
 3361 
 3362   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
 3363     C2_MacroAssembler _masm(&cbuf);
 3364     Register reg1 = as_Register($src1$$reg);
 3365     Register reg2 = as_Register($src2$$reg);
 3366     __ cmpw(reg1, reg2);
 3367   %}
 3368 
 3369   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
 3370     C2_MacroAssembler _masm(&cbuf);
 3371     Register reg = as_Register($src1$$reg);
 3372     int32_t val = $src2$$constant;
 3373     if (val >= 0) {
 3374       __ subsw(zr, reg, val);
 3375     } else {
 3376       __ addsw(zr, reg, -val);
 3377     }
 3378   %}
 3379 
 3380   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
 3381     C2_MacroAssembler _masm(&cbuf);
 3382     Register reg1 = as_Register($src1$$reg);
 3383     u_int32_t val = (u_int32_t)$src2$$constant;
 3384     __ movw(rscratch1, val);
 3385     __ cmpw(reg1, rscratch1);
 3386   %}
 3387 
 3388   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
 3389     C2_MacroAssembler _masm(&cbuf);
 3390     Register reg1 = as_Register($src1$$reg);
 3391     Register reg2 = as_Register($src2$$reg);
 3392     __ cmp(reg1, reg2);
 3393   %}
 3394 
 3395   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
 3396     C2_MacroAssembler _masm(&cbuf);
 3397     Register reg = as_Register($src1$$reg);
 3398     int64_t val = $src2$$constant;
 3399     if (val >= 0) {
 3400       __ subs(zr, reg, val);
 3401     } else if (val != -val) {
 3402       __ adds(zr, reg, -val);
 3403     } else {
 3404     // aargh, Long.MIN_VALUE is a special case
 3405       __ orr(rscratch1, zr, (u_int64_t)val);
 3406       __ subs(zr, reg, rscratch1);
 3407     }
 3408   %}
 3409 
 3410   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
 3411     C2_MacroAssembler _masm(&cbuf);
 3412     Register reg1 = as_Register($src1$$reg);
 3413     u_int64_t val = (u_int64_t)$src2$$constant;
 3414     __ mov(rscratch1, val);
 3415     __ cmp(reg1, rscratch1);
 3416   %}
 3417 
 3418   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
 3419     C2_MacroAssembler _masm(&cbuf);
 3420     Register reg1 = as_Register($src1$$reg);
 3421     Register reg2 = as_Register($src2$$reg);
 3422     __ cmp(reg1, reg2);
 3423   %}
 3424 
 3425   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
 3426     C2_MacroAssembler _masm(&cbuf);
 3427     Register reg1 = as_Register($src1$$reg);
 3428     Register reg2 = as_Register($src2$$reg);
 3429     __ cmpw(reg1, reg2);
 3430   %}
 3431 
 3432   enc_class aarch64_enc_testp(iRegP src) %{
 3433     C2_MacroAssembler _masm(&cbuf);
 3434     Register reg = as_Register($src$$reg);
 3435     __ cmp(reg, zr);
 3436   %}
 3437 
 3438   enc_class aarch64_enc_testn(iRegN src) %{
 3439     C2_MacroAssembler _masm(&cbuf);
 3440     Register reg = as_Register($src$$reg);
 3441     __ cmpw(reg, zr);
 3442   %}
 3443 
 3444   enc_class aarch64_enc_b(label lbl) %{
 3445     C2_MacroAssembler _masm(&cbuf);
 3446     Label *L = $lbl$$label;
 3447     __ b(*L);
 3448   %}
 3449 
 3450   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
 3451     C2_MacroAssembler _masm(&cbuf);
 3452     Label *L = $lbl$$label;
 3453     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3454   %}
 3455 
 3456   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
 3457     C2_MacroAssembler _masm(&cbuf);
 3458     Label *L = $lbl$$label;
 3459     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3460   %}
 3461 
 3462   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
 3463   %{
 3464      Register sub_reg = as_Register($sub$$reg);
 3465      Register super_reg = as_Register($super$$reg);
 3466      Register temp_reg = as_Register($temp$$reg);
 3467      Register result_reg = as_Register($result$$reg);
 3468 
 3469      Label miss;
 3470      C2_MacroAssembler _masm(&cbuf);
 3471      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
 3472                                      NULL, &miss,
 3473                                      /*set_cond_codes:*/ true);
 3474      if ($primary) {
 3475        __ mov(result_reg, zr);
 3476      }
 3477      __ bind(miss);
 3478   %}
 3479 
 3480   enc_class aarch64_enc_java_static_call(method meth) %{
 3481     C2_MacroAssembler _masm(&cbuf);
 3482 
 3483     address addr = (address)$meth$$method;
 3484     address call;
 3485     if (!_method) {
 3486       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
 3487       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
 3488     } else {
 3489       int method_index = resolved_method_index(cbuf);
 3490       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
 3491                                                   : static_call_Relocation::spec(method_index);
 3492       call = __ trampoline_call(Address(addr, rspec), &cbuf);
 3493 
 3494       // Emit stub for static call
 3495       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
 3496       if (stub == NULL) {
 3497         ciEnv::current()->record_failure("CodeCache is full");
 3498         return;
 3499       }
 3500     }
 3501     if (call == NULL) {
 3502       ciEnv::current()->record_failure("CodeCache is full");
 3503       return;
 3504     }
 3505   %}
 3506 
 3507   enc_class aarch64_enc_java_dynamic_call(method meth) %{
 3508     C2_MacroAssembler _masm(&cbuf);
 3509     int method_index = resolved_method_index(cbuf);
 3510     address call = __ ic_call((address)$meth$$method, method_index);
 3511     if (call == NULL) {
 3512       ciEnv::current()->record_failure("CodeCache is full");
 3513       return;
 3514     }
 3515   %}
 3516 
 3517   enc_class aarch64_enc_call_epilog() %{
 3518     C2_MacroAssembler _masm(&cbuf);
 3519     if (VerifyStackAtCalls) {
 3520       // Check that stack depth is unchanged: find majik cookie on stack
 3521       __ call_Unimplemented();
 3522     }
 3523   %}
 3524 
 3525   enc_class aarch64_enc_java_to_runtime(method meth) %{
 3526     C2_MacroAssembler _masm(&cbuf);
 3527 
 3528     // some calls to generated routines (arraycopy code) are scheduled
 3529     // by C2 as runtime calls. if so we can call them using a br (they
 3530     // will be in a reachable segment) otherwise we have to use a blr
 3531     // which loads the absolute address into a register.
 3532     address entry = (address)$meth$$method;
 3533     CodeBlob *cb = CodeCache::find_blob(entry);
 3534     if (cb) {
 3535       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
 3536       if (call == NULL) {
 3537         ciEnv::current()->record_failure("CodeCache is full");
 3538         return;
 3539       }
 3540     } else {
 3541       Label retaddr;
 3542       __ adr(rscratch2, retaddr);
 3543       __ lea(rscratch1, RuntimeAddress(entry));
 3544       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
 3545       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
 3546       __ blr(rscratch1);
 3547       __ bind(retaddr);
 3548       __ add(sp, sp, 2 * wordSize);
 3549     }
 3550   %}
 3551 
 3552   enc_class aarch64_enc_rethrow() %{
 3553     C2_MacroAssembler _masm(&cbuf);
 3554     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
 3555   %}
 3556 
 3557   enc_class aarch64_enc_ret() %{
 3558     C2_MacroAssembler _masm(&cbuf);
 3559     __ ret(lr);
 3560   %}
 3561 
 3562   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
 3563     C2_MacroAssembler _masm(&cbuf);
 3564     Register target_reg = as_Register($jump_target$$reg);
 3565     __ br(target_reg);
 3566   %}
 3567 
 3568   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
 3569     C2_MacroAssembler _masm(&cbuf);
 3570     Register target_reg = as_Register($jump_target$$reg);
 3571     // exception oop should be in r0
 3572     // ret addr has been popped into lr
 3573     // callee expects it in r3
 3574     __ mov(r3, lr);
 3575     __ br(target_reg);
 3576   %}
 3577 
 3578   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
 3579     C2_MacroAssembler _masm(&cbuf);
 3580     Register oop = as_Register($object$$reg);
 3581     Register box = as_Register($box$$reg);
 3582     Register disp_hdr = as_Register($tmp$$reg);
 3583     Register tmp = as_Register($tmp2$$reg);
 3584     Label cont;
 3585     Label object_has_monitor;
 3586     Label cas_failed;
 3587 
 3588     assert_different_registers(oop, box, tmp, disp_hdr);
 3589 
 3590     // Load markWord from object into displaced_header.
 3591     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
 3592 
 3593     if (UseBiasedLocking && !UseOptoBiasInlining) {
 3594       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
 3595     }
 3596 
 3597     // Check for existing monitor
 3598     __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
 3599 
 3600     // Set tmp to be (markWord of object | UNLOCK_VALUE).
 3601     __ orr(tmp, disp_hdr, markWord::unlocked_value);
 3602 
 3603     // Initialize the box. (Must happen before we update the object mark!)
 3604     __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 3605 
 3606     // Compare object markWord with an unlocked value (tmp) and if
 3607     // equal exchange the stack address of our box with object markWord.
 3608     // On failure disp_hdr contains the possibly locked markWord.
 3609     __ cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
 3610                /*release*/ true, /*weak*/ false, disp_hdr);
 3611     __ br(Assembler::EQ, cont);
 3612 
 3613     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
 3614 
 3615     // If the compare-and-exchange succeeded, then we found an unlocked
 3616     // object, will have now locked it will continue at label cont
 3617 
 3618     __ bind(cas_failed);
 3619     // We did not see an unlocked object so try the fast recursive case.
 3620 
 3621     // Check if the owner is self by comparing the value in the
 3622     // markWord of object (disp_hdr) with the stack pointer.
 3623     __ mov(rscratch1, sp);
 3624     __ sub(disp_hdr, disp_hdr, rscratch1);
 3625     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
 3626     // If condition is true we are cont and hence we can store 0 as the
 3627     // displaced header in the box, which indicates that it is a recursive lock.
 3628     __ ands(tmp/*==0?*/, disp_hdr, tmp);   // Sets flags for result
 3629     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 3630 
 3631     __ b(cont);
 3632 
 3633     // Handle existing monitor.
 3634     __ bind(object_has_monitor);
 3635 
 3636     // The object's monitor m is unlocked iff m->owner == NULL,
 3637     // otherwise m->owner may contain a thread or a stack address.
 3638     //
 3639     // Try to CAS m->owner from NULL to current thread.
 3640     __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markWord::monitor_value));
 3641     __ cmpxchg(tmp, zr, rthread, Assembler::xword, /*acquire*/ true,
 3642                /*release*/ true, /*weak*/ false, noreg); // Sets flags for result
 3643 
 3644     // Store a non-null value into the box to avoid looking like a re-entrant
 3645     // lock. The fast-path monitor unlock code checks for
 3646     // markWord::monitor_value so use markWord::unused_mark which has the
 3647     // relevant bit set, and also matches ObjectSynchronizer::enter.
 3648     __ mov(tmp, (address)markWord::unused_mark().value());
 3649     __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 3650 
 3651     __ bind(cont);
 3652     // flag == EQ indicates success
 3653     // flag == NE indicates failure
 3654   %}
 3655 
 3656   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
 3657     C2_MacroAssembler _masm(&cbuf);
 3658     Register oop = as_Register($object$$reg);
 3659     Register box = as_Register($box$$reg);
 3660     Register disp_hdr = as_Register($tmp$$reg);
 3661     Register tmp = as_Register($tmp2$$reg);
 3662     Label cont;
 3663     Label object_has_monitor;
 3664 
 3665     assert_different_registers(oop, box, tmp, disp_hdr);
 3666 
 3667     if (UseBiasedLocking && !UseOptoBiasInlining) {
 3668       __ biased_locking_exit(oop, tmp, cont);
 3669     }
 3670 
 3671     // Find the lock address and load the displaced header from the stack.
 3672     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 3673 
 3674     // If the displaced header is 0, we have a recursive unlock.
 3675     __ cmp(disp_hdr, zr);
 3676     __ br(Assembler::EQ, cont);
 3677 
 3678     // Handle existing monitor.
 3679     __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
 3680     __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
 3681 
 3682     // Check if it is still a light weight lock, this is is true if we
 3683     // see the stack address of the basicLock in the markWord of the
 3684     // object.
 3685 
 3686     __ cmpxchg(oop, box, disp_hdr, Assembler::xword, /*acquire*/ false,
 3687                /*release*/ true, /*weak*/ false, tmp);
 3688     __ b(cont);
 3689 
 3690     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
 3691 
 3692     // Handle existing monitor.
 3693     __ bind(object_has_monitor);
 3694     STATIC_ASSERT(markWord::monitor_value <= INT_MAX);
 3695     __ add(tmp, tmp, -(int)markWord::monitor_value); // monitor
 3696     __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
 3697     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
 3698     __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
 3699     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
 3700     __ cmp(rscratch1, zr); // Sets flags for result
 3701     __ br(Assembler::NE, cont);
 3702 
 3703     __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
 3704     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
 3705     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
 3706     __ cmp(rscratch1, zr); // Sets flags for result
 3707     __ cbnz(rscratch1, cont);
 3708     // need a release store here
 3709     __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
 3710     __ stlr(zr, tmp); // set unowned
 3711 
 3712     __ bind(cont);
 3713     // flag == EQ indicates success
 3714     // flag == NE indicates failure
 3715   %}
 3716 
 3717 %}
 3718 
 3719 //----------FRAME--------------------------------------------------------------
 3720 // Definition of frame structure and management information.
 3721 //
 3722 //  S T A C K   L A Y O U T    Allocators stack-slot number
 3723 //                             |   (to get allocators register number
 3724 //  G  Owned by    |        |  v    add OptoReg::stack0())
 3725 //  r   CALLER     |        |
 3726 //  o     |        +--------+      pad to even-align allocators stack-slot
 3727 //  w     V        |  pad0  |        numbers; owned by CALLER
 3728 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
 3729 //  h     ^        |   in   |  5
 3730 //        |        |  args  |  4   Holes in incoming args owned by SELF
 3731 //  |     |        |        |  3
 3732 //  |     |        +--------+
 3733 //  V     |        | old out|      Empty on Intel, window on Sparc
 3734 //        |    old |preserve|      Must be even aligned.
 3735 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
 3736 //        |        |   in   |  3   area for Intel ret address
 3737 //     Owned by    |preserve|      Empty on Sparc.
 3738 //       SELF      +--------+
 3739 //        |        |  pad2  |  2   pad to align old SP
 3740 //        |        +--------+  1
 3741 //        |        | locks  |  0
 3742 //        |        +--------+----> OptoReg::stack0(), even aligned
 3743 //        |        |  pad1  | 11   pad to align new SP
 3744 //        |        +--------+
 3745 //        |        |        | 10
 3746 //        |        | spills |  9   spills
 3747 //        V        |        |  8   (pad0 slot for callee)
 3748 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
 3749 //        ^        |  out   |  7
 3750 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
 3751 //     Owned by    +--------+
 3752 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
 3753 //        |    new |preserve|      Must be even-aligned.
 3754 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
 3755 //        |        |        |
 3756 //
 3757 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
 3758 //         known from SELF's arguments and the Java calling convention.
 3759 //         Region 6-7 is determined per call site.
 3760 // Note 2: If the calling convention leaves holes in the incoming argument
 3761 //         area, those holes are owned by SELF.  Holes in the outgoing area
 3762 //         are owned by the CALLEE.  Holes should not be nessecary in the
 3763 //         incoming area, as the Java calling convention is completely under
 3764 //         the control of the AD file.  Doubles can be sorted and packed to
 3765 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
 3766 //         varargs C calling conventions.
 3767 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
 3768 //         even aligned with pad0 as needed.
 3769 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
 3770 //           (the latter is true on Intel but is it false on AArch64?)
 3771 //         region 6-11 is even aligned; it may be padded out more so that
 3772 //         the region from SP to FP meets the minimum stack alignment.
 3773 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
 3774 //         alignment.  Region 11, pad1, may be dynamically extended so that
 3775 //         SP meets the minimum alignment.
 3776 
 3777 frame %{
 3778   // What direction does stack grow in (assumed to be same for C & Java)
 3779   stack_direction(TOWARDS_LOW);
 3780 
 3781   // These three registers define part of the calling convention
 3782   // between compiled code and the interpreter.
 3783 
 3784   // Inline Cache Register or methodOop for I2C.
 3785   inline_cache_reg(R12);
 3786 
 3787   // Method Oop Register when calling interpreter.
 3788   interpreter_method_oop_reg(R12);
 3789 
 3790   // Number of stack slots consumed by locking an object
 3791   sync_stack_slots(2);
 3792 
 3793   // Compiled code's Frame Pointer
 3794   frame_pointer(R31);
 3795 
 3796   // Interpreter stores its frame pointer in a register which is
 3797   // stored to the stack by I2CAdaptors.
 3798   // I2CAdaptors convert from interpreted java to compiled java.
 3799   interpreter_frame_pointer(R29);
 3800 
 3801   // Stack alignment requirement
 3802   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
 3803 
 3804   // Number of stack slots between incoming argument block and the start of
 3805   // a new frame.  The PROLOG must add this many slots to the stack.  The
 3806   // EPILOG must remove this many slots. aarch64 needs two slots for
 3807   // return address and fp.
 3808   // TODO think this is correct but check
 3809   in_preserve_stack_slots(4);
 3810 
 3811   // Number of outgoing stack slots killed above the out_preserve_stack_slots
 3812   // for calls to C.  Supports the var-args backing area for register parms.
 3813   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
 3814 
 3815   // The after-PROLOG location of the return address.  Location of
 3816   // return address specifies a type (REG or STACK) and a number
 3817   // representing the register number (i.e. - use a register name) or
 3818   // stack slot.
 3819   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
 3820   // Otherwise, it is above the locks and verification slot and alignment word
 3821   // TODO this may well be correct but need to check why that - 2 is there
 3822   // ppc port uses 0 but we definitely need to allow for fixed_slots
 3823   // which folds in the space used for monitors
 3824   return_addr(STACK - 2 +
 3825               align_up((Compile::current()->in_preserve_stack_slots() +
 3826                         Compile::current()->fixed_slots()),
 3827                        stack_alignment_in_slots()));
 3828 
 3829   // Body of function which returns an integer array locating
 3830   // arguments either in registers or in stack slots.  Passed an array
 3831   // of ideal registers called "sig" and a "length" count.  Stack-slot
 3832   // offsets are based on outgoing arguments, i.e. a CALLER setting up
 3833   // arguments for a CALLEE.  Incoming stack arguments are
 3834   // automatically biased by the preserve_stack_slots field above.
 3835 
 3836   calling_convention
 3837   %{
 3838     // No difference between ingoing/outgoing just pass false
 3839     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
 3840   %}
 3841 
 3842   c_calling_convention
 3843   %{
 3844     // This is obviously always outgoing
 3845     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
 3846   %}
 3847 
 3848   // Location of compiled Java return values.  Same as C for now.
 3849   return_value
 3850   %{
 3851     // TODO do we allow ideal_reg == Op_RegN???
 3852     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
 3853            "only return normal values");
 3854 
 3855     static const int lo[Op_RegL + 1] = { // enum name
 3856       0,                                 // Op_Node
 3857       0,                                 // Op_Set
 3858       R0_num,                            // Op_RegN
 3859       R0_num,                            // Op_RegI
 3860       R0_num,                            // Op_RegP
 3861       V0_num,                            // Op_RegF
 3862       V0_num,                            // Op_RegD
 3863       R0_num                             // Op_RegL
 3864     };
 3865 
 3866     static const int hi[Op_RegL + 1] = { // enum name
 3867       0,                                 // Op_Node
 3868       0,                                 // Op_Set
 3869       OptoReg::Bad,                      // Op_RegN
 3870       OptoReg::Bad,                      // Op_RegI
 3871       R0_H_num,                          // Op_RegP
 3872       OptoReg::Bad,                      // Op_RegF
 3873       V0_H_num,                          // Op_RegD
 3874       R0_H_num                           // Op_RegL
 3875     };
 3876 
 3877     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
 3878   %}
 3879 %}
 3880 
 3881 //----------ATTRIBUTES---------------------------------------------------------
 3882 //----------Operand Attributes-------------------------------------------------
 3883 op_attrib op_cost(1);        // Required cost attribute
 3884 
 3885 //----------Instruction Attributes---------------------------------------------
 3886 ins_attrib ins_cost(INSN_COST); // Required cost attribute
 3887 ins_attrib ins_size(32);        // Required size attribute (in bits)
 3888 ins_attrib ins_short_branch(0); // Required flag: is this instruction
 3889                                 // a non-matching short branch variant
 3890                                 // of some long branch?
 3891 ins_attrib ins_alignment(4);    // Required alignment attribute (must
 3892                                 // be a power of 2) specifies the
 3893                                 // alignment that some part of the
 3894                                 // instruction (not necessarily the
 3895                                 // start) requires.  If > 1, a
 3896                                 // compute_padding() function must be
 3897                                 // provided for the instruction
 3898 
 3899 //----------OPERANDS-----------------------------------------------------------
 3900 // Operand definitions must precede instruction definitions for correct parsing
 3901 // in the ADLC because operands constitute user defined types which are used in
 3902 // instruction definitions.
 3903 
 3904 //----------Simple Operands----------------------------------------------------
 3905 
 3906 // Integer operands 32 bit
 3907 // 32 bit immediate
 3908 operand immI()
 3909 %{
 3910   match(ConI);
 3911 
 3912   op_cost(0);
 3913   format %{ %}
 3914   interface(CONST_INTER);
 3915 %}
 3916 
 3917 // 32 bit zero
 3918 operand immI0()
 3919 %{
 3920   predicate(n->get_int() == 0);
 3921   match(ConI);
 3922 
 3923   op_cost(0);
 3924   format %{ %}
 3925   interface(CONST_INTER);
 3926 %}
 3927 
 3928 // 32 bit unit increment
 3929 operand immI_1()
 3930 %{
 3931   predicate(n->get_int() == 1);
 3932   match(ConI);
 3933 
 3934   op_cost(0);
 3935   format %{ %}
 3936   interface(CONST_INTER);
 3937 %}
 3938 
 3939 // 32 bit unit decrement
 3940 operand immI_M1()
 3941 %{
 3942   predicate(n->get_int() == -1);
 3943   match(ConI);
 3944 
 3945   op_cost(0);
 3946   format %{ %}
 3947   interface(CONST_INTER);
 3948 %}
 3949 
 3950 // Shift values for add/sub extension shift
 3951 operand immIExt()
 3952 %{
 3953   predicate(0 <= n->get_int() && (n->get_int() <= 4));
 3954   match(ConI);
 3955 
 3956   op_cost(0);
 3957   format %{ %}
 3958   interface(CONST_INTER);
 3959 %}
 3960 
 3961 operand immI_le_4()
 3962 %{
 3963   predicate(n->get_int() <= 4);
 3964   match(ConI);
 3965 
 3966   op_cost(0);
 3967   format %{ %}
 3968   interface(CONST_INTER);
 3969 %}
 3970 
 3971 operand immI_31()
 3972 %{
 3973   predicate(n->get_int() == 31);
 3974   match(ConI);
 3975 
 3976   op_cost(0);
 3977   format %{ %}
 3978   interface(CONST_INTER);
 3979 %}
 3980 
 3981 operand immI_2()
 3982 %{
 3983   predicate(n->get_int() == 2);
 3984   match(ConI);
 3985 
 3986   op_cost(0);
 3987   format %{ %}
 3988   interface(CONST_INTER);
 3989 %}
 3990 
 3991 operand immI_4()
 3992 %{
 3993   predicate(n->get_int() == 4);
 3994   match(ConI);
 3995 
 3996   op_cost(0);
 3997   format %{ %}
 3998   interface(CONST_INTER);
 3999 %}
 4000 
 4001 operand immI_8()
 4002 %{
 4003   predicate(n->get_int() == 8);
 4004   match(ConI);
 4005 
 4006   op_cost(0);
 4007   format %{ %}
 4008   interface(CONST_INTER);
 4009 %}
 4010 
 4011 operand immI_16()
 4012 %{
 4013   predicate(n->get_int() == 16);
 4014   match(ConI);
 4015 
 4016   op_cost(0);
 4017   format %{ %}
 4018   interface(CONST_INTER);
 4019 %}
 4020 
 4021 operand immI_24()
 4022 %{
 4023   predicate(n->get_int() == 24);
 4024   match(ConI);
 4025 
 4026   op_cost(0);
 4027   format %{ %}
 4028   interface(CONST_INTER);
 4029 %}
 4030 
 4031 operand immI_32()
 4032 %{
 4033   predicate(n->get_int() == 32);
 4034   match(ConI);
 4035 
 4036   op_cost(0);
 4037   format %{ %}
 4038   interface(CONST_INTER);
 4039 %}
 4040 
 4041 operand immI_48()
 4042 %{
 4043   predicate(n->get_int() == 48);
 4044   match(ConI);
 4045 
 4046   op_cost(0);
 4047   format %{ %}
 4048   interface(CONST_INTER);
 4049 %}
 4050 
 4051 operand immI_56()
 4052 %{
 4053   predicate(n->get_int() == 56);
 4054   match(ConI);
 4055 
 4056   op_cost(0);
 4057   format %{ %}
 4058   interface(CONST_INTER);
 4059 %}
 4060 
 4061 operand immI_63()
 4062 %{
 4063   predicate(n->get_int() == 63);
 4064   match(ConI);
 4065 
 4066   op_cost(0);
 4067   format %{ %}
 4068   interface(CONST_INTER);
 4069 %}
 4070 
 4071 operand immI_64()
 4072 %{
 4073   predicate(n->get_int() == 64);
 4074   match(ConI);
 4075 
 4076   op_cost(0);
 4077   format %{ %}
 4078   interface(CONST_INTER);
 4079 %}
 4080 
 4081 operand immI_255()
 4082 %{
 4083   predicate(n->get_int() == 255);
 4084   match(ConI);
 4085 
 4086   op_cost(0);
 4087   format %{ %}
 4088   interface(CONST_INTER);
 4089 %}
 4090 
 4091 operand immI_65535()
 4092 %{
 4093   predicate(n->get_int() == 65535);
 4094   match(ConI);
 4095 
 4096   op_cost(0);
 4097   format %{ %}
 4098   interface(CONST_INTER);
 4099 %}
 4100 
 4101 operand immL_255()
 4102 %{
 4103   predicate(n->get_long() == 255L);
 4104   match(ConL);
 4105 
 4106   op_cost(0);
 4107   format %{ %}
 4108   interface(CONST_INTER);
 4109 %}
 4110 
 4111 operand immL_65535()
 4112 %{
 4113   predicate(n->get_long() == 65535L);
 4114   match(ConL);
 4115 
 4116   op_cost(0);
 4117   format %{ %}
 4118   interface(CONST_INTER);
 4119 %}
 4120 
 4121 operand immL_4294967295()
 4122 %{
 4123   predicate(n->get_long() == 4294967295L);
 4124   match(ConL);
 4125 
 4126   op_cost(0);
 4127   format %{ %}
 4128   interface(CONST_INTER);
 4129 %}
 4130 
 4131 operand immL_bitmask()
 4132 %{
 4133   predicate((n->get_long() != 0)
 4134             && ((n->get_long() & 0xc000000000000000l) == 0)
 4135             && is_power_of_2(n->get_long() + 1));
 4136   match(ConL);
 4137 
 4138   op_cost(0);
 4139   format %{ %}
 4140   interface(CONST_INTER);
 4141 %}
 4142 
 4143 operand immI_bitmask()
 4144 %{
 4145   predicate((n->get_int() != 0)
 4146             && ((n->get_int() & 0xc0000000) == 0)
 4147             && is_power_of_2(n->get_int() + 1));
 4148   match(ConI);
 4149 
 4150   op_cost(0);
 4151   format %{ %}
 4152   interface(CONST_INTER);
 4153 %}
 4154 
 4155 // Scale values for scaled offset addressing modes (up to long but not quad)
 4156 operand immIScale()
 4157 %{
 4158   predicate(0 <= n->get_int() && (n->get_int() <= 3));
 4159   match(ConI);
 4160 
 4161   op_cost(0);
 4162   format %{ %}
 4163   interface(CONST_INTER);
 4164 %}
 4165 
 4166 // 26 bit signed offset -- for pc-relative branches
 4167 operand immI26()
 4168 %{
 4169   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
 4170   match(ConI);
 4171 
 4172   op_cost(0);
 4173   format %{ %}
 4174   interface(CONST_INTER);
 4175 %}
 4176 
 4177 // 19 bit signed offset -- for pc-relative loads
 4178 operand immI19()
 4179 %{
 4180   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
 4181   match(ConI);
 4182 
 4183   op_cost(0);
 4184   format %{ %}
 4185   interface(CONST_INTER);
 4186 %}
 4187 
 4188 // 12 bit unsigned offset -- for base plus immediate loads
 4189 operand immIU12()
 4190 %{
 4191   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
 4192   match(ConI);
 4193 
 4194   op_cost(0);
 4195   format %{ %}
 4196   interface(CONST_INTER);
 4197 %}
 4198 
 4199 operand immLU12()
 4200 %{
 4201   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
 4202   match(ConL);
 4203 
 4204   op_cost(0);
 4205   format %{ %}
 4206   interface(CONST_INTER);
 4207 %}
 4208 
 4209 // Offset for scaled or unscaled immediate loads and stores
 4210 operand immIOffset()
 4211 %{
 4212   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4213   match(ConI);
 4214 
 4215   op_cost(0);
 4216   format %{ %}
 4217   interface(CONST_INTER);
 4218 %}
 4219 
 4220 operand immIOffset1()
 4221 %{
 4222   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4223   match(ConI);
 4224 
 4225   op_cost(0);
 4226   format %{ %}
 4227   interface(CONST_INTER);
 4228 %}
 4229 
 4230 operand immIOffset2()
 4231 %{
 4232   predicate(Address::offset_ok_for_immed(n->get_int(), 1));
 4233   match(ConI);
 4234 
 4235   op_cost(0);
 4236   format %{ %}
 4237   interface(CONST_INTER);
 4238 %}
 4239 
 4240 operand immIOffset4()
 4241 %{
 4242   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
 4243   match(ConI);
 4244 
 4245   op_cost(0);
 4246   format %{ %}
 4247   interface(CONST_INTER);
 4248 %}
 4249 
 4250 operand immIOffset8()
 4251 %{
 4252   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
 4253   match(ConI);
 4254 
 4255   op_cost(0);
 4256   format %{ %}
 4257   interface(CONST_INTER);
 4258 %}
 4259 
 4260 operand immIOffset16()
 4261 %{
 4262   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
 4263   match(ConI);
 4264 
 4265   op_cost(0);
 4266   format %{ %}
 4267   interface(CONST_INTER);
 4268 %}
 4269 
 4270 operand immLoffset()
 4271 %{
 4272   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
 4273   match(ConL);
 4274 
 4275   op_cost(0);
 4276   format %{ %}
 4277   interface(CONST_INTER);
 4278 %}
 4279 
 4280 operand immLoffset1()
 4281 %{
 4282   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
 4283   match(ConL);
 4284 
 4285   op_cost(0);
 4286   format %{ %}
 4287   interface(CONST_INTER);
 4288 %}
 4289 
 4290 operand immLoffset2()
 4291 %{
 4292   predicate(Address::offset_ok_for_immed(n->get_long(), 1));
 4293   match(ConL);
 4294 
 4295   op_cost(0);
 4296   format %{ %}
 4297   interface(CONST_INTER);
 4298 %}
 4299 
 4300 operand immLoffset4()
 4301 %{
 4302   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
 4303   match(ConL);
 4304 
 4305   op_cost(0);
 4306   format %{ %}
 4307   interface(CONST_INTER);
 4308 %}
 4309 
 4310 operand immLoffset8()
 4311 %{
 4312   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
 4313   match(ConL);
 4314 
 4315   op_cost(0);
 4316   format %{ %}
 4317   interface(CONST_INTER);
 4318 %}
 4319 
 4320 operand immLoffset16()
 4321 %{
 4322   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
 4323   match(ConL);
 4324 
 4325   op_cost(0);
 4326   format %{ %}
 4327   interface(CONST_INTER);
 4328 %}
 4329 
 4330 // 32 bit integer valid for add sub immediate
 4331 operand immIAddSub()
 4332 %{
 4333   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
 4334   match(ConI);
 4335   op_cost(0);
 4336   format %{ %}
 4337   interface(CONST_INTER);
 4338 %}
 4339 
 4340 // 32 bit unsigned integer valid for logical immediate
 4341 // TODO -- check this is right when e.g the mask is 0x80000000
 4342 operand immILog()
 4343 %{
 4344   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
 4345   match(ConI);
 4346 
 4347   op_cost(0);
 4348   format %{ %}
 4349   interface(CONST_INTER);
 4350 %}
 4351 
 4352 // Integer operands 64 bit
 4353 // 64 bit immediate
 4354 operand immL()
 4355 %{
 4356   match(ConL);
 4357 
 4358   op_cost(0);
 4359   format %{ %}
 4360   interface(CONST_INTER);
 4361 %}
 4362 
 4363 // 64 bit zero
 4364 operand immL0()
 4365 %{
 4366   predicate(n->get_long() == 0);
 4367   match(ConL);
 4368 
 4369   op_cost(0);
 4370   format %{ %}
 4371   interface(CONST_INTER);
 4372 %}
 4373 
 4374 // 64 bit unit increment
 4375 operand immL_1()
 4376 %{
 4377   predicate(n->get_long() == 1);
 4378   match(ConL);
 4379 
 4380   op_cost(0);
 4381   format %{ %}
 4382   interface(CONST_INTER);
 4383 %}
 4384 
 4385 // 64 bit unit decrement
 4386 operand immL_M1()
 4387 %{
 4388   predicate(n->get_long() == -1);
 4389   match(ConL);
 4390 
 4391   op_cost(0);
 4392   format %{ %}
 4393   interface(CONST_INTER);
 4394 %}
 4395 
 4396 // 32 bit offset of pc in thread anchor
 4397 
 4398 operand immL_pc_off()
 4399 %{
 4400   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
 4401                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
 4402   match(ConL);
 4403 
 4404   op_cost(0);
 4405   format %{ %}
 4406   interface(CONST_INTER);
 4407 %}
 4408 
 4409 // 64 bit integer valid for add sub immediate
 4410 operand immLAddSub()
 4411 %{
 4412   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
 4413   match(ConL);
 4414   op_cost(0);
 4415   format %{ %}
 4416   interface(CONST_INTER);
 4417 %}
 4418 
 4419 // 64 bit integer valid for logical immediate
 4420 operand immLLog()
 4421 %{
 4422   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
 4423   match(ConL);
 4424   op_cost(0);
 4425   format %{ %}
 4426   interface(CONST_INTER);
 4427 %}
 4428 
 4429 // Long Immediate: low 32-bit mask
 4430 operand immL_32bits()
 4431 %{
 4432   predicate(n->get_long() == 0xFFFFFFFFL);
 4433   match(ConL);
 4434   op_cost(0);
 4435   format %{ %}
 4436   interface(CONST_INTER);
 4437 %}
 4438 
 4439 // Pointer operands
 4440 // Pointer Immediate
 4441 operand immP()
 4442 %{
 4443   match(ConP);
 4444 
 4445   op_cost(0);
 4446   format %{ %}
 4447   interface(CONST_INTER);
 4448 %}
 4449 
 4450 // NULL Pointer Immediate
 4451 operand immP0()
 4452 %{
 4453   predicate(n->get_ptr() == 0);
 4454   match(ConP);
 4455 
 4456   op_cost(0);
 4457   format %{ %}
 4458   interface(CONST_INTER);
 4459 %}
 4460 
 4461 // Pointer Immediate One
 4462 // this is used in object initialization (initial object header)
 4463 operand immP_1()
 4464 %{
 4465   predicate(n->get_ptr() == 1);
 4466   match(ConP);
 4467 
 4468   op_cost(0);
 4469   format %{ %}
 4470   interface(CONST_INTER);
 4471 %}
 4472 
 4473 // Card Table Byte Map Base
 4474 operand immByteMapBase()
 4475 %{
 4476   // Get base of card map
 4477   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
 4478             (CardTable::CardValue*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
 4479   match(ConP);
 4480 
 4481   op_cost(0);
 4482   format %{ %}
 4483   interface(CONST_INTER);
 4484 %}
 4485 
 4486 // Pointer Immediate Minus One
 4487 // this is used when we want to write the current PC to the thread anchor
 4488 operand immP_M1()
 4489 %{
 4490   predicate(n->get_ptr() == -1);
 4491   match(ConP);
 4492 
 4493   op_cost(0);
 4494   format %{ %}
 4495   interface(CONST_INTER);
 4496 %}
 4497 
 4498 // Pointer Immediate Minus Two
 4499 // this is used when we want to write the current PC to the thread anchor
 4500 operand immP_M2()
 4501 %{
 4502   predicate(n->get_ptr() == -2);
 4503   match(ConP);
 4504 
 4505   op_cost(0);
 4506   format %{ %}
 4507   interface(CONST_INTER);
 4508 %}
 4509 
 4510 // Float and Double operands
 4511 // Double Immediate
 4512 operand immD()
 4513 %{
 4514   match(ConD);
 4515   op_cost(0);
 4516   format %{ %}
 4517   interface(CONST_INTER);
 4518 %}
 4519 
 4520 // Double Immediate: +0.0d
 4521 operand immD0()
 4522 %{
 4523   predicate(jlong_cast(n->getd()) == 0);
 4524   match(ConD);
 4525 
 4526   op_cost(0);
 4527   format %{ %}
 4528   interface(CONST_INTER);
 4529 %}
 4530 
 4531 // constant 'double +0.0'.
 4532 operand immDPacked()
 4533 %{
 4534   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
 4535   match(ConD);
 4536   op_cost(0);
 4537   format %{ %}
 4538   interface(CONST_INTER);
 4539 %}
 4540 
 4541 // Float Immediate
 4542 operand immF()
 4543 %{
 4544   match(ConF);
 4545   op_cost(0);
 4546   format %{ %}
 4547   interface(CONST_INTER);
 4548 %}
 4549 
 4550 // Float Immediate: +0.0f.
 4551 operand immF0()
 4552 %{
 4553   predicate(jint_cast(n->getf()) == 0);
 4554   match(ConF);
 4555 
 4556   op_cost(0);
 4557   format %{ %}
 4558   interface(CONST_INTER);
 4559 %}
 4560 
 4561 //
 4562 operand immFPacked()
 4563 %{
 4564   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
 4565   match(ConF);
 4566   op_cost(0);
 4567   format %{ %}
 4568   interface(CONST_INTER);
 4569 %}
 4570 
 4571 // Narrow pointer operands
 4572 // Narrow Pointer Immediate
 4573 operand immN()
 4574 %{
 4575   match(ConN);
 4576 
 4577   op_cost(0);
 4578   format %{ %}
 4579   interface(CONST_INTER);
 4580 %}
 4581 
 4582 // Narrow NULL Pointer Immediate
 4583 operand immN0()
 4584 %{
 4585   predicate(n->get_narrowcon() == 0);
 4586   match(ConN);
 4587 
 4588   op_cost(0);
 4589   format %{ %}
 4590   interface(CONST_INTER);
 4591 %}
 4592 
 4593 operand immNKlass()
 4594 %{
 4595   match(ConNKlass);
 4596 
 4597   op_cost(0);
 4598   format %{ %}
 4599   interface(CONST_INTER);
 4600 %}
 4601 
 4602 // Integer 32 bit Register Operands
 4603 // Integer 32 bitRegister (excludes SP)
 4604 operand iRegI()
 4605 %{
 4606   constraint(ALLOC_IN_RC(any_reg32));
 4607   match(RegI);
 4608   match(iRegINoSp);
 4609   op_cost(0);
 4610   format %{ %}
 4611   interface(REG_INTER);
 4612 %}
 4613 
 4614 // Integer 32 bit Register not Special
 4615 operand iRegINoSp()
 4616 %{
 4617   constraint(ALLOC_IN_RC(no_special_reg32));
 4618   match(RegI);
 4619   op_cost(0);
 4620   format %{ %}
 4621   interface(REG_INTER);
 4622 %}
 4623 
 4624 // Integer 64 bit Register Operands
 4625 // Integer 64 bit Register (includes SP)
 4626 operand iRegL()
 4627 %{
 4628   constraint(ALLOC_IN_RC(any_reg));
 4629   match(RegL);
 4630   match(iRegLNoSp);
 4631   op_cost(0);
 4632   format %{ %}
 4633   interface(REG_INTER);
 4634 %}
 4635 
 4636 // Integer 64 bit Register not Special
 4637 operand iRegLNoSp()
 4638 %{
 4639   constraint(ALLOC_IN_RC(no_special_reg));
 4640   match(RegL);
 4641   match(iRegL_R0);
 4642   format %{ %}
 4643   interface(REG_INTER);
 4644 %}
 4645 
 4646 // Pointer Register Operands
 4647 // Pointer Register
 4648 operand iRegP()
 4649 %{
 4650   constraint(ALLOC_IN_RC(ptr_reg));
 4651   match(RegP);
 4652   match(iRegPNoSp);
 4653   match(iRegP_R0);
 4654   //match(iRegP_R2);
 4655   //match(iRegP_R4);
 4656   //match(iRegP_R5);
 4657   match(thread_RegP);
 4658   op_cost(0);
 4659   format %{ %}
 4660   interface(REG_INTER);
 4661 %}
 4662 
 4663 // Pointer 64 bit Register not Special
 4664 operand iRegPNoSp()
 4665 %{
 4666   constraint(ALLOC_IN_RC(no_special_ptr_reg));
 4667   match(RegP);
 4668   // match(iRegP);
 4669   // match(iRegP_R0);
 4670   // match(iRegP_R2);
 4671   // match(iRegP_R4);
 4672   // match(iRegP_R5);
 4673   // match(thread_RegP);
 4674   op_cost(0);
 4675   format %{ %}
 4676   interface(REG_INTER);
 4677 %}
 4678 
 4679 // Pointer 64 bit Register R0 only
 4680 operand iRegP_R0()
 4681 %{
 4682   constraint(ALLOC_IN_RC(r0_reg));
 4683   match(RegP);
 4684   // match(iRegP);
 4685   match(iRegPNoSp);
 4686   op_cost(0);
 4687   format %{ %}
 4688   interface(REG_INTER);
 4689 %}
 4690 
 4691 // Pointer 64 bit Register R1 only
 4692 operand iRegP_R1()
 4693 %{
 4694   constraint(ALLOC_IN_RC(r1_reg));
 4695   match(RegP);
 4696   // match(iRegP);
 4697   match(iRegPNoSp);
 4698   op_cost(0);
 4699   format %{ %}
 4700   interface(REG_INTER);
 4701 %}
 4702 
 4703 // Pointer 64 bit Register R2 only
 4704 operand iRegP_R2()
 4705 %{
 4706   constraint(ALLOC_IN_RC(r2_reg));
 4707   match(RegP);
 4708   // match(iRegP);
 4709   match(iRegPNoSp);
 4710   op_cost(0);
 4711   format %{ %}
 4712   interface(REG_INTER);
 4713 %}
 4714 
 4715 // Pointer 64 bit Register R3 only
 4716 operand iRegP_R3()
 4717 %{
 4718   constraint(ALLOC_IN_RC(r3_reg));
 4719   match(RegP);
 4720   // match(iRegP);
 4721   match(iRegPNoSp);
 4722   op_cost(0);
 4723   format %{ %}
 4724   interface(REG_INTER);
 4725 %}
 4726 
 4727 // Pointer 64 bit Register R4 only
 4728 operand iRegP_R4()
 4729 %{
 4730   constraint(ALLOC_IN_RC(r4_reg));
 4731   match(RegP);
 4732   // match(iRegP);
 4733   match(iRegPNoSp);
 4734   op_cost(0);
 4735   format %{ %}
 4736   interface(REG_INTER);
 4737 %}
 4738 
 4739 // Pointer 64 bit Register R5 only
 4740 operand iRegP_R5()
 4741 %{
 4742   constraint(ALLOC_IN_RC(r5_reg));
 4743   match(RegP);
 4744   // match(iRegP);
 4745   match(iRegPNoSp);
 4746   op_cost(0);
 4747   format %{ %}
 4748   interface(REG_INTER);
 4749 %}
 4750 
 4751 // Pointer 64 bit Register R10 only
 4752 operand iRegP_R10()
 4753 %{
 4754   constraint(ALLOC_IN_RC(r10_reg));
 4755   match(RegP);
 4756   // match(iRegP);
 4757   match(iRegPNoSp);
 4758   op_cost(0);
 4759   format %{ %}
 4760   interface(REG_INTER);
 4761 %}
 4762 
 4763 // Long 64 bit Register R0 only
 4764 operand iRegL_R0()
 4765 %{
 4766   constraint(ALLOC_IN_RC(r0_reg));
 4767   match(RegL);
 4768   match(iRegLNoSp);
 4769   op_cost(0);
 4770   format %{ %}
 4771   interface(REG_INTER);
 4772 %}
 4773 
 4774 // Long 64 bit Register R2 only
 4775 operand iRegL_R2()
 4776 %{
 4777   constraint(ALLOC_IN_RC(r2_reg));
 4778   match(RegL);
 4779   match(iRegLNoSp);
 4780   op_cost(0);
 4781   format %{ %}
 4782   interface(REG_INTER);
 4783 %}
 4784 
 4785 // Long 64 bit Register R3 only
 4786 operand iRegL_R3()
 4787 %{
 4788   constraint(ALLOC_IN_RC(r3_reg));
 4789   match(RegL);
 4790   match(iRegLNoSp);
 4791   op_cost(0);
 4792   format %{ %}
 4793   interface(REG_INTER);
 4794 %}
 4795 
 4796 // Long 64 bit Register R11 only
 4797 operand iRegL_R11()
 4798 %{
 4799   constraint(ALLOC_IN_RC(r11_reg));
 4800   match(RegL);
 4801   match(iRegLNoSp);
 4802   op_cost(0);
 4803   format %{ %}
 4804   interface(REG_INTER);
 4805 %}
 4806 
 4807 // Pointer 64 bit Register FP only
 4808 operand iRegP_FP()
 4809 %{
 4810   constraint(ALLOC_IN_RC(fp_reg));
 4811   match(RegP);
 4812   // match(iRegP);
 4813   op_cost(0);
 4814   format %{ %}
 4815   interface(REG_INTER);
 4816 %}
 4817 
 4818 // Register R0 only
 4819 operand iRegI_R0()
 4820 %{
 4821   constraint(ALLOC_IN_RC(int_r0_reg));
 4822   match(RegI);
 4823   match(iRegINoSp);
 4824   op_cost(0);
 4825   format %{ %}
 4826   interface(REG_INTER);
 4827 %}
 4828 
 4829 // Register R2 only
 4830 operand iRegI_R2()
 4831 %{
 4832   constraint(ALLOC_IN_RC(int_r2_reg));
 4833   match(RegI);
 4834   match(iRegINoSp);
 4835   op_cost(0);
 4836   format %{ %}
 4837   interface(REG_INTER);
 4838 %}
 4839 
 4840 // Register R3 only
 4841 operand iRegI_R3()
 4842 %{
 4843   constraint(ALLOC_IN_RC(int_r3_reg));
 4844   match(RegI);
 4845   match(iRegINoSp);
 4846   op_cost(0);
 4847   format %{ %}
 4848   interface(REG_INTER);
 4849 %}
 4850 
 4851 
 4852 // Register R4 only
 4853 operand iRegI_R4()
 4854 %{
 4855   constraint(ALLOC_IN_RC(int_r4_reg));
 4856   match(RegI);
 4857   match(iRegINoSp);
 4858   op_cost(0);
 4859   format %{ %}
 4860   interface(REG_INTER);
 4861 %}
 4862 
 4863 
 4864 // Pointer Register Operands
 4865 // Narrow Pointer Register
 4866 operand iRegN()
 4867 %{
 4868   constraint(ALLOC_IN_RC(any_reg32));
 4869   match(RegN);
 4870   match(iRegNNoSp);
 4871   op_cost(0);
 4872   format %{ %}
 4873   interface(REG_INTER);
 4874 %}
 4875 
 4876 operand iRegN_R0()
 4877 %{
 4878   constraint(ALLOC_IN_RC(r0_reg));
 4879   match(iRegN);
 4880   op_cost(0);
 4881   format %{ %}
 4882   interface(REG_INTER);
 4883 %}
 4884 
 4885 operand iRegN_R2()
 4886 %{
 4887   constraint(ALLOC_IN_RC(r2_reg));
 4888   match(iRegN);
 4889   op_cost(0);
 4890   format %{ %}
 4891   interface(REG_INTER);
 4892 %}
 4893 
 4894 operand iRegN_R3()
 4895 %{
 4896   constraint(ALLOC_IN_RC(r3_reg));
 4897   match(iRegN);
 4898   op_cost(0);
 4899   format %{ %}
 4900   interface(REG_INTER);
 4901 %}
 4902 
 4903 // Integer 64 bit Register not Special
 4904 operand iRegNNoSp()
 4905 %{
 4906   constraint(ALLOC_IN_RC(no_special_reg32));
 4907   match(RegN);
 4908   op_cost(0);
 4909   format %{ %}
 4910   interface(REG_INTER);
 4911 %}
 4912 
 4913 // heap base register -- used for encoding immN0
 4914 
 4915 operand iRegIHeapbase()
 4916 %{
 4917   constraint(ALLOC_IN_RC(heapbase_reg));
 4918   match(RegI);
 4919   op_cost(0);
 4920   format %{ %}
 4921   interface(REG_INTER);
 4922 %}
 4923 
 4924 // Float Register
 4925 // Float register operands
 4926 operand vRegF()
 4927 %{
 4928   constraint(ALLOC_IN_RC(float_reg));
 4929   match(RegF);
 4930 
 4931   op_cost(0);
 4932   format %{ %}
 4933   interface(REG_INTER);
 4934 %}
 4935 
 4936 // Double Register
 4937 // Double register operands
 4938 operand vRegD()
 4939 %{
 4940   constraint(ALLOC_IN_RC(double_reg));
 4941   match(RegD);
 4942 
 4943   op_cost(0);
 4944   format %{ %}
 4945   interface(REG_INTER);
 4946 %}
 4947 
 4948 operand vecD()
 4949 %{
 4950   constraint(ALLOC_IN_RC(vectord_reg));
 4951   match(VecD);
 4952 
 4953   op_cost(0);
 4954   format %{ %}
 4955   interface(REG_INTER);
 4956 %}
 4957 
 4958 operand vecX()
 4959 %{
 4960   constraint(ALLOC_IN_RC(vectorx_reg));
 4961   match(VecX);
 4962 
 4963   op_cost(0);
 4964   format %{ %}
 4965   interface(REG_INTER);
 4966 %}
 4967 
 4968 operand vRegD_V0()
 4969 %{
 4970   constraint(ALLOC_IN_RC(v0_reg));
 4971   match(RegD);
 4972   op_cost(0);
 4973   format %{ %}
 4974   interface(REG_INTER);
 4975 %}
 4976 
 4977 operand vRegD_V1()
 4978 %{
 4979   constraint(ALLOC_IN_RC(v1_reg));
 4980   match(RegD);
 4981   op_cost(0);
 4982   format %{ %}
 4983   interface(REG_INTER);
 4984 %}
 4985 
 4986 operand vRegD_V2()
 4987 %{
 4988   constraint(ALLOC_IN_RC(v2_reg));
 4989   match(RegD);
 4990   op_cost(0);
 4991   format %{ %}
 4992   interface(REG_INTER);
 4993 %}
 4994 
 4995 operand vRegD_V3()
 4996 %{
 4997   constraint(ALLOC_IN_RC(v3_reg));
 4998   match(RegD);
 4999   op_cost(0);
 5000   format %{ %}
 5001   interface(REG_INTER);
 5002 %}
 5003 
 5004 operand vRegD_V4()
 5005 %{
 5006   constraint(ALLOC_IN_RC(v4_reg));
 5007   match(RegD);
 5008   op_cost(0);
 5009   format %{ %}
 5010   interface(REG_INTER);
 5011 %}
 5012 
 5013 operand vRegD_V5()
 5014 %{
 5015   constraint(ALLOC_IN_RC(v5_reg));
 5016   match(RegD);
 5017   op_cost(0);
 5018   format %{ %}
 5019   interface(REG_INTER);
 5020 %}
 5021 
 5022 operand vRegD_V6()
 5023 %{
 5024   constraint(ALLOC_IN_RC(v6_reg));
 5025   match(RegD);
 5026   op_cost(0);
 5027   format %{ %}
 5028   interface(REG_INTER);
 5029 %}
 5030 
 5031 operand vRegD_V7()
 5032 %{
 5033   constraint(ALLOC_IN_RC(v7_reg));
 5034   match(RegD);
 5035   op_cost(0);
 5036   format %{ %}
 5037   interface(REG_INTER);
 5038 %}
 5039 
 5040 operand vRegD_V8()
 5041 %{
 5042   constraint(ALLOC_IN_RC(v8_reg));
 5043   match(RegD);
 5044   op_cost(0);
 5045   format %{ %}
 5046   interface(REG_INTER);
 5047 %}
 5048 
 5049 operand vRegD_V9()
 5050 %{
 5051   constraint(ALLOC_IN_RC(v9_reg));
 5052   match(RegD);
 5053   op_cost(0);
 5054   format %{ %}
 5055   interface(REG_INTER);
 5056 %}
 5057 
 5058 operand vRegD_V10()
 5059 %{
 5060   constraint(ALLOC_IN_RC(v10_reg));
 5061   match(RegD);
 5062   op_cost(0);
 5063   format %{ %}
 5064   interface(REG_INTER);
 5065 %}
 5066 
 5067 operand vRegD_V11()
 5068 %{
 5069   constraint(ALLOC_IN_RC(v11_reg));
 5070   match(RegD);
 5071   op_cost(0);
 5072   format %{ %}
 5073   interface(REG_INTER);
 5074 %}
 5075 
 5076 operand vRegD_V12()
 5077 %{
 5078   constraint(ALLOC_IN_RC(v12_reg));
 5079   match(RegD);
 5080   op_cost(0);
 5081   format %{ %}
 5082   interface(REG_INTER);
 5083 %}
 5084 
 5085 operand vRegD_V13()
 5086 %{
 5087   constraint(ALLOC_IN_RC(v13_reg));
 5088   match(RegD);
 5089   op_cost(0);
 5090   format %{ %}
 5091   interface(REG_INTER);
 5092 %}
 5093 
 5094 operand vRegD_V14()
 5095 %{
 5096   constraint(ALLOC_IN_RC(v14_reg));
 5097   match(RegD);
 5098   op_cost(0);
 5099   format %{ %}
 5100   interface(REG_INTER);
 5101 %}
 5102 
 5103 operand vRegD_V15()
 5104 %{
 5105   constraint(ALLOC_IN_RC(v15_reg));
 5106   match(RegD);
 5107   op_cost(0);
 5108   format %{ %}
 5109   interface(REG_INTER);
 5110 %}
 5111 
 5112 operand vRegD_V16()
 5113 %{
 5114   constraint(ALLOC_IN_RC(v16_reg));
 5115   match(RegD);
 5116   op_cost(0);
 5117   format %{ %}
 5118   interface(REG_INTER);
 5119 %}
 5120 
 5121 operand vRegD_V17()
 5122 %{
 5123   constraint(ALLOC_IN_RC(v17_reg));
 5124   match(RegD);
 5125   op_cost(0);
 5126   format %{ %}
 5127   interface(REG_INTER);
 5128 %}
 5129 
 5130 operand vRegD_V18()
 5131 %{
 5132   constraint(ALLOC_IN_RC(v18_reg));
 5133   match(RegD);
 5134   op_cost(0);
 5135   format %{ %}
 5136   interface(REG_INTER);
 5137 %}
 5138 
 5139 operand vRegD_V19()
 5140 %{
 5141   constraint(ALLOC_IN_RC(v19_reg));
 5142   match(RegD);
 5143   op_cost(0);
 5144   format %{ %}
 5145   interface(REG_INTER);
 5146 %}
 5147 
 5148 operand vRegD_V20()
 5149 %{
 5150   constraint(ALLOC_IN_RC(v20_reg));
 5151   match(RegD);
 5152   op_cost(0);
 5153   format %{ %}
 5154   interface(REG_INTER);
 5155 %}
 5156 
 5157 operand vRegD_V21()
 5158 %{
 5159   constraint(ALLOC_IN_RC(v21_reg));
 5160   match(RegD);
 5161   op_cost(0);
 5162   format %{ %}
 5163   interface(REG_INTER);
 5164 %}
 5165 
 5166 operand vRegD_V22()
 5167 %{
 5168   constraint(ALLOC_IN_RC(v22_reg));
 5169   match(RegD);
 5170   op_cost(0);
 5171   format %{ %}
 5172   interface(REG_INTER);
 5173 %}
 5174 
 5175 operand vRegD_V23()
 5176 %{
 5177   constraint(ALLOC_IN_RC(v23_reg));
 5178   match(RegD);
 5179   op_cost(0);
 5180   format %{ %}
 5181   interface(REG_INTER);
 5182 %}
 5183 
 5184 operand vRegD_V24()
 5185 %{
 5186   constraint(ALLOC_IN_RC(v24_reg));
 5187   match(RegD);
 5188   op_cost(0);
 5189   format %{ %}
 5190   interface(REG_INTER);
 5191 %}
 5192 
 5193 operand vRegD_V25()
 5194 %{
 5195   constraint(ALLOC_IN_RC(v25_reg));
 5196   match(RegD);
 5197   op_cost(0);
 5198   format %{ %}
 5199   interface(REG_INTER);
 5200 %}
 5201 
 5202 operand vRegD_V26()
 5203 %{
 5204   constraint(ALLOC_IN_RC(v26_reg));
 5205   match(RegD);
 5206   op_cost(0);
 5207   format %{ %}
 5208   interface(REG_INTER);
 5209 %}
 5210 
 5211 operand vRegD_V27()
 5212 %{
 5213   constraint(ALLOC_IN_RC(v27_reg));
 5214   match(RegD);
 5215   op_cost(0);
 5216   format %{ %}
 5217   interface(REG_INTER);
 5218 %}
 5219 
 5220 operand vRegD_V28()
 5221 %{
 5222   constraint(ALLOC_IN_RC(v28_reg));
 5223   match(RegD);
 5224   op_cost(0);
 5225   format %{ %}
 5226   interface(REG_INTER);
 5227 %}
 5228 
 5229 operand vRegD_V29()
 5230 %{
 5231   constraint(ALLOC_IN_RC(v29_reg));
 5232   match(RegD);
 5233   op_cost(0);
 5234   format %{ %}
 5235   interface(REG_INTER);
 5236 %}
 5237 
 5238 operand vRegD_V30()
 5239 %{
 5240   constraint(ALLOC_IN_RC(v30_reg));
 5241   match(RegD);
 5242   op_cost(0);
 5243   format %{ %}
 5244   interface(REG_INTER);
 5245 %}
 5246 
 5247 operand vRegD_V31()
 5248 %{
 5249   constraint(ALLOC_IN_RC(v31_reg));
 5250   match(RegD);
 5251   op_cost(0);
 5252   format %{ %}
 5253   interface(REG_INTER);
 5254 %}
 5255 
 5256 // Flags register, used as output of signed compare instructions
 5257 
 5258 // note that on AArch64 we also use this register as the output for
 5259 // for floating point compare instructions (CmpF CmpD). this ensures
 5260 // that ordered inequality tests use GT, GE, LT or LE none of which
 5261 // pass through cases where the result is unordered i.e. one or both
 5262 // inputs to the compare is a NaN. this means that the ideal code can
 5263 // replace e.g. a GT with an LE and not end up capturing the NaN case
 5264 // (where the comparison should always fail). EQ and NE tests are
 5265 // always generated in ideal code so that unordered folds into the NE
 5266 // case, matching the behaviour of AArch64 NE.
 5267 //
 5268 // This differs from x86 where the outputs of FP compares use a
 5269 // special FP flags registers and where compares based on this
 5270 // register are distinguished into ordered inequalities (cmpOpUCF) and
 5271 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
 5272 // to explicitly handle the unordered case in branches. x86 also has
 5273 // to include extra CMoveX rules to accept a cmpOpUCF input.
 5274 
 5275 operand rFlagsReg()
 5276 %{
 5277   constraint(ALLOC_IN_RC(int_flags));
 5278   match(RegFlags);
 5279 
 5280   op_cost(0);
 5281   format %{ "RFLAGS" %}
 5282   interface(REG_INTER);
 5283 %}
 5284 
 5285 // Flags register, used as output of unsigned compare instructions
 5286 operand rFlagsRegU()
 5287 %{
 5288   constraint(ALLOC_IN_RC(int_flags));
 5289   match(RegFlags);
 5290 
 5291   op_cost(0);
 5292   format %{ "RFLAGSU" %}
 5293   interface(REG_INTER);
 5294 %}
 5295 
 5296 // Special Registers
 5297 
 5298 // Method Register
 5299 operand inline_cache_RegP(iRegP reg)
 5300 %{
 5301   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
 5302   match(reg);
 5303   match(iRegPNoSp);
 5304   op_cost(0);
 5305   format %{ %}
 5306   interface(REG_INTER);
 5307 %}
 5308 
 5309 operand interpreter_method_oop_RegP(iRegP reg)
 5310 %{
 5311   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
 5312   match(reg);
 5313   match(iRegPNoSp);
 5314   op_cost(0);
 5315   format %{ %}
 5316   interface(REG_INTER);
 5317 %}
 5318 
 5319 // Thread Register
 5320 operand thread_RegP(iRegP reg)
 5321 %{
 5322   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
 5323   match(reg);
 5324   op_cost(0);
 5325   format %{ %}
 5326   interface(REG_INTER);
 5327 %}
 5328 
 5329 operand lr_RegP(iRegP reg)
 5330 %{
 5331   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
 5332   match(reg);
 5333   op_cost(0);
 5334   format %{ %}
 5335   interface(REG_INTER);
 5336 %}
 5337 
 5338 //----------Memory Operands----------------------------------------------------
 5339 
 5340 operand indirect(iRegP reg)
 5341 %{
 5342   constraint(ALLOC_IN_RC(ptr_reg));
 5343   match(reg);
 5344   op_cost(0);
 5345   format %{ "[$reg]" %}
 5346   interface(MEMORY_INTER) %{
 5347     base($reg);
 5348     index(0xffffffff);
 5349     scale(0x0);
 5350     disp(0x0);
 5351   %}
 5352 %}
 5353 
 5354 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
 5355 %{
 5356   constraint(ALLOC_IN_RC(ptr_reg));
 5357   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5358   match(AddP reg (LShiftL (ConvI2L ireg) scale));
 5359   op_cost(0);
 5360   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
 5361   interface(MEMORY_INTER) %{
 5362     base($reg);
 5363     index($ireg);
 5364     scale($scale);
 5365     disp(0x0);
 5366   %}
 5367 %}
 5368 
 5369 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
 5370 %{
 5371   constraint(ALLOC_IN_RC(ptr_reg));
 5372   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5373   match(AddP reg (LShiftL lreg scale));
 5374   op_cost(0);
 5375   format %{ "$reg, $lreg lsl($scale)" %}
 5376   interface(MEMORY_INTER) %{
 5377     base($reg);
 5378     index($lreg);
 5379     scale($scale);
 5380     disp(0x0);
 5381   %}
 5382 %}
 5383 
 5384 operand indIndexI2L(iRegP reg, iRegI ireg)
 5385 %{
 5386   constraint(ALLOC_IN_RC(ptr_reg));
 5387   match(AddP reg (ConvI2L ireg));
 5388   op_cost(0);
 5389   format %{ "$reg, $ireg, 0, I2L" %}
 5390   interface(MEMORY_INTER) %{
 5391     base($reg);
 5392     index($ireg);
 5393     scale(0x0);
 5394     disp(0x0);
 5395   %}
 5396 %}
 5397 
 5398 operand indIndex(iRegP reg, iRegL lreg)
 5399 %{
 5400   constraint(ALLOC_IN_RC(ptr_reg));
 5401   match(AddP reg lreg);
 5402   op_cost(0);
 5403   format %{ "$reg, $lreg" %}
 5404   interface(MEMORY_INTER) %{
 5405     base($reg);
 5406     index($lreg);
 5407     scale(0x0);
 5408     disp(0x0);
 5409   %}
 5410 %}
 5411 
 5412 operand indOffI(iRegP reg, immIOffset off)
 5413 %{
 5414   constraint(ALLOC_IN_RC(ptr_reg));
 5415   match(AddP reg off);
 5416   op_cost(0);
 5417   format %{ "[$reg, $off]" %}
 5418   interface(MEMORY_INTER) %{
 5419     base($reg);
 5420     index(0xffffffff);
 5421     scale(0x0);
 5422     disp($off);
 5423   %}
 5424 %}
 5425 
 5426 operand indOffI1(iRegP reg, immIOffset1 off)
 5427 %{
 5428   constraint(ALLOC_IN_RC(ptr_reg));
 5429   match(AddP reg off);
 5430   op_cost(0);
 5431   format %{ "[$reg, $off]" %}
 5432   interface(MEMORY_INTER) %{
 5433     base($reg);
 5434     index(0xffffffff);
 5435     scale(0x0);
 5436     disp($off);
 5437   %}
 5438 %}
 5439 
 5440 operand indOffI2(iRegP reg, immIOffset2 off)
 5441 %{
 5442   constraint(ALLOC_IN_RC(ptr_reg));
 5443   match(AddP reg off);
 5444   op_cost(0);
 5445   format %{ "[$reg, $off]" %}
 5446   interface(MEMORY_INTER) %{
 5447     base($reg);
 5448     index(0xffffffff);
 5449     scale(0x0);
 5450     disp($off);
 5451   %}
 5452 %}
 5453 
 5454 operand indOffI4(iRegP reg, immIOffset4 off)
 5455 %{
 5456   constraint(ALLOC_IN_RC(ptr_reg));
 5457   match(AddP reg off);
 5458   op_cost(0);
 5459   format %{ "[$reg, $off]" %}
 5460   interface(MEMORY_INTER) %{
 5461     base($reg);
 5462     index(0xffffffff);
 5463     scale(0x0);
 5464     disp($off);
 5465   %}
 5466 %}
 5467 
 5468 operand indOffI8(iRegP reg, immIOffset8 off)
 5469 %{
 5470   constraint(ALLOC_IN_RC(ptr_reg));
 5471   match(AddP reg off);
 5472   op_cost(0);
 5473   format %{ "[$reg, $off]" %}
 5474   interface(MEMORY_INTER) %{
 5475     base($reg);
 5476     index(0xffffffff);
 5477     scale(0x0);
 5478     disp($off);
 5479   %}
 5480 %}
 5481 
 5482 operand indOffI16(iRegP reg, immIOffset16 off)
 5483 %{
 5484   constraint(ALLOC_IN_RC(ptr_reg));
 5485   match(AddP reg off);
 5486   op_cost(0);
 5487   format %{ "[$reg, $off]" %}
 5488   interface(MEMORY_INTER) %{
 5489     base($reg);
 5490     index(0xffffffff);
 5491     scale(0x0);
 5492     disp($off);
 5493   %}
 5494 %}
 5495 
 5496 operand indOffL(iRegP reg, immLoffset off)
 5497 %{
 5498   constraint(ALLOC_IN_RC(ptr_reg));
 5499   match(AddP reg off);
 5500   op_cost(0);
 5501   format %{ "[$reg, $off]" %}
 5502   interface(MEMORY_INTER) %{
 5503     base($reg);
 5504     index(0xffffffff);
 5505     scale(0x0);
 5506     disp($off);
 5507   %}
 5508 %}
 5509 
 5510 operand indOffL1(iRegP reg, immLoffset1 off)
 5511 %{
 5512   constraint(ALLOC_IN_RC(ptr_reg));
 5513   match(AddP reg off);
 5514   op_cost(0);
 5515   format %{ "[$reg, $off]" %}
 5516   interface(MEMORY_INTER) %{
 5517     base($reg);
 5518     index(0xffffffff);
 5519     scale(0x0);
 5520     disp($off);
 5521   %}
 5522 %}
 5523 
 5524 operand indOffL2(iRegP reg, immLoffset2 off)
 5525 %{
 5526   constraint(ALLOC_IN_RC(ptr_reg));
 5527   match(AddP reg off);
 5528   op_cost(0);
 5529   format %{ "[$reg, $off]" %}
 5530   interface(MEMORY_INTER) %{
 5531     base($reg);
 5532     index(0xffffffff);
 5533     scale(0x0);
 5534     disp($off);
 5535   %}
 5536 %}
 5537 
 5538 operand indOffL4(iRegP reg, immLoffset4 off)
 5539 %{
 5540   constraint(ALLOC_IN_RC(ptr_reg));
 5541   match(AddP reg off);
 5542   op_cost(0);
 5543   format %{ "[$reg, $off]" %}
 5544   interface(MEMORY_INTER) %{
 5545     base($reg);
 5546     index(0xffffffff);
 5547     scale(0x0);
 5548     disp($off);
 5549   %}
 5550 %}
 5551 
 5552 operand indOffL8(iRegP reg, immLoffset8 off)
 5553 %{
 5554   constraint(ALLOC_IN_RC(ptr_reg));
 5555   match(AddP reg off);
 5556   op_cost(0);
 5557   format %{ "[$reg, $off]" %}
 5558   interface(MEMORY_INTER) %{
 5559     base($reg);
 5560     index(0xffffffff);
 5561     scale(0x0);
 5562     disp($off);
 5563   %}
 5564 %}
 5565 
 5566 operand indOffL16(iRegP reg, immLoffset16 off)
 5567 %{
 5568   constraint(ALLOC_IN_RC(ptr_reg));
 5569   match(AddP reg off);
 5570   op_cost(0);
 5571   format %{ "[$reg, $off]" %}
 5572   interface(MEMORY_INTER) %{
 5573     base($reg);
 5574     index(0xffffffff);
 5575     scale(0x0);
 5576     disp($off);
 5577   %}
 5578 %}
 5579 
 5580 operand indirectN(iRegN reg)
 5581 %{
 5582   predicate(CompressedOops::shift() == 0);
 5583   constraint(ALLOC_IN_RC(ptr_reg));
 5584   match(DecodeN reg);
 5585   op_cost(0);
 5586   format %{ "[$reg]\t# narrow" %}
 5587   interface(MEMORY_INTER) %{
 5588     base($reg);
 5589     index(0xffffffff);
 5590     scale(0x0);
 5591     disp(0x0);
 5592   %}
 5593 %}
 5594 
 5595 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
 5596 %{
 5597   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5598   constraint(ALLOC_IN_RC(ptr_reg));
 5599   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
 5600   op_cost(0);
 5601   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
 5602   interface(MEMORY_INTER) %{
 5603     base($reg);
 5604     index($ireg);
 5605     scale($scale);
 5606     disp(0x0);
 5607   %}
 5608 %}
 5609 
 5610 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
 5611 %{
 5612   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5613   constraint(ALLOC_IN_RC(ptr_reg));
 5614   match(AddP (DecodeN reg) (LShiftL lreg scale));
 5615   op_cost(0);
 5616   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
 5617   interface(MEMORY_INTER) %{
 5618     base($reg);
 5619     index($lreg);
 5620     scale($scale);
 5621     disp(0x0);
 5622   %}
 5623 %}
 5624 
 5625 operand indIndexI2LN(iRegN reg, iRegI ireg)
 5626 %{
 5627   predicate(CompressedOops::shift() == 0);
 5628   constraint(ALLOC_IN_RC(ptr_reg));
 5629   match(AddP (DecodeN reg) (ConvI2L ireg));
 5630   op_cost(0);
 5631   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
 5632   interface(MEMORY_INTER) %{
 5633     base($reg);
 5634     index($ireg);
 5635     scale(0x0);
 5636     disp(0x0);
 5637   %}
 5638 %}
 5639 
 5640 operand indIndexN(iRegN reg, iRegL lreg)
 5641 %{
 5642   predicate(CompressedOops::shift() == 0);
 5643   constraint(ALLOC_IN_RC(ptr_reg));
 5644   match(AddP (DecodeN reg) lreg);
 5645   op_cost(0);
 5646   format %{ "$reg, $lreg\t# narrow" %}
 5647   interface(MEMORY_INTER) %{
 5648     base($reg);
 5649     index($lreg);
 5650     scale(0x0);
 5651     disp(0x0);
 5652   %}
 5653 %}
 5654 
 5655 operand indOffIN(iRegN reg, immIOffset off)
 5656 %{
 5657   predicate(CompressedOops::shift() == 0);
 5658   constraint(ALLOC_IN_RC(ptr_reg));
 5659   match(AddP (DecodeN reg) off);
 5660   op_cost(0);
 5661   format %{ "[$reg, $off]\t# narrow" %}
 5662   interface(MEMORY_INTER) %{
 5663     base($reg);
 5664     index(0xffffffff);
 5665     scale(0x0);
 5666     disp($off);
 5667   %}
 5668 %}
 5669 
 5670 operand indOffLN(iRegN reg, immLoffset off)
 5671 %{
 5672   predicate(CompressedOops::shift() == 0);
 5673   constraint(ALLOC_IN_RC(ptr_reg));
 5674   match(AddP (DecodeN reg) off);
 5675   op_cost(0);
 5676   format %{ "[$reg, $off]\t# narrow" %}
 5677   interface(MEMORY_INTER) %{
 5678     base($reg);
 5679     index(0xffffffff);
 5680     scale(0x0);
 5681     disp($off);
 5682   %}
 5683 %}
 5684 
 5685 
 5686 
 5687 // AArch64 opto stubs need to write to the pc slot in the thread anchor
 5688 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
 5689 %{
 5690   constraint(ALLOC_IN_RC(ptr_reg));
 5691   match(AddP reg off);
 5692   op_cost(0);
 5693   format %{ "[$reg, $off]" %}
 5694   interface(MEMORY_INTER) %{
 5695     base($reg);
 5696     index(0xffffffff);
 5697     scale(0x0);
 5698     disp($off);
 5699   %}
 5700 %}
 5701 
 5702 //----------Special Memory Operands--------------------------------------------
 5703 // Stack Slot Operand - This operand is used for loading and storing temporary
 5704 //                      values on the stack where a match requires a value to
 5705 //                      flow through memory.
 5706 operand stackSlotP(sRegP reg)
 5707 %{
 5708   constraint(ALLOC_IN_RC(stack_slots));
 5709   op_cost(100);
 5710   // No match rule because this operand is only generated in matching
 5711   // match(RegP);
 5712   format %{ "[$reg]" %}
 5713   interface(MEMORY_INTER) %{
 5714     base(0x1e);  // RSP
 5715     index(0x0);  // No Index
 5716     scale(0x0);  // No Scale
 5717     disp($reg);  // Stack Offset
 5718   %}
 5719 %}
 5720 
 5721 operand stackSlotI(sRegI reg)
 5722 %{
 5723   constraint(ALLOC_IN_RC(stack_slots));
 5724   // No match rule because this operand is only generated in matching
 5725   // match(RegI);
 5726   format %{ "[$reg]" %}
 5727   interface(MEMORY_INTER) %{
 5728     base(0x1e);  // RSP
 5729     index(0x0);  // No Index
 5730     scale(0x0);  // No Scale
 5731     disp($reg);  // Stack Offset
 5732   %}
 5733 %}
 5734 
 5735 operand stackSlotF(sRegF reg)
 5736 %{
 5737   constraint(ALLOC_IN_RC(stack_slots));
 5738   // No match rule because this operand is only generated in matching
 5739   // match(RegF);
 5740   format %{ "[$reg]" %}
 5741   interface(MEMORY_INTER) %{
 5742     base(0x1e);  // RSP
 5743     index(0x0);  // No Index
 5744     scale(0x0);  // No Scale
 5745     disp($reg);  // Stack Offset
 5746   %}
 5747 %}
 5748 
 5749 operand stackSlotD(sRegD reg)
 5750 %{
 5751   constraint(ALLOC_IN_RC(stack_slots));
 5752   // No match rule because this operand is only generated in matching
 5753   // match(RegD);
 5754   format %{ "[$reg]" %}
 5755   interface(MEMORY_INTER) %{
 5756     base(0x1e);  // RSP
 5757     index(0x0);  // No Index
 5758     scale(0x0);  // No Scale
 5759     disp($reg);  // Stack Offset
 5760   %}
 5761 %}
 5762 
 5763 operand stackSlotL(sRegL reg)
 5764 %{
 5765   constraint(ALLOC_IN_RC(stack_slots));
 5766   // No match rule because this operand is only generated in matching
 5767   // match(RegL);
 5768   format %{ "[$reg]" %}
 5769   interface(MEMORY_INTER) %{
 5770     base(0x1e);  // RSP
 5771     index(0x0);  // No Index
 5772     scale(0x0);  // No Scale
 5773     disp($reg);  // Stack Offset
 5774   %}
 5775 %}
 5776 
 5777 // Operands for expressing Control Flow
 5778 // NOTE: Label is a predefined operand which should not be redefined in
 5779 //       the AD file. It is generically handled within the ADLC.
 5780 
 5781 //----------Conditional Branch Operands----------------------------------------
 5782 // Comparison Op  - This is the operation of the comparison, and is limited to
 5783 //                  the following set of codes:
 5784 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
 5785 //
 5786 // Other attributes of the comparison, such as unsignedness, are specified
 5787 // by the comparison instruction that sets a condition code flags register.
 5788 // That result is represented by a flags operand whose subtype is appropriate
 5789 // to the unsignedness (etc.) of the comparison.
 5790 //
 5791 // Later, the instruction which matches both the Comparison Op (a Bool) and
 5792 // the flags (produced by the Cmp) specifies the coding of the comparison op
 5793 // by matching a specific subtype of Bool operand below, such as cmpOpU.
 5794 
 5795 // used for signed integral comparisons and fp comparisons
 5796 
 5797 operand cmpOp()
 5798 %{
 5799   match(Bool);
 5800 
 5801   format %{ "" %}
 5802   interface(COND_INTER) %{
 5803     equal(0x0, "eq");
 5804     not_equal(0x1, "ne");
 5805     less(0xb, "lt");
 5806     greater_equal(0xa, "ge");
 5807     less_equal(0xd, "le");
 5808     greater(0xc, "gt");
 5809     overflow(0x6, "vs");
 5810     no_overflow(0x7, "vc");
 5811   %}
 5812 %}
 5813 
 5814 // used for unsigned integral comparisons
 5815 
 5816 operand cmpOpU()
 5817 %{
 5818   match(Bool);
 5819 
 5820   format %{ "" %}
 5821   interface(COND_INTER) %{
 5822     equal(0x0, "eq");
 5823     not_equal(0x1, "ne");
 5824     less(0x3, "lo");
 5825     greater_equal(0x2, "hs");
 5826     less_equal(0x9, "ls");
 5827     greater(0x8, "hi");
 5828     overflow(0x6, "vs");
 5829     no_overflow(0x7, "vc");
 5830   %}
 5831 %}
 5832 
 5833 // used for certain integral comparisons which can be
 5834 // converted to cbxx or tbxx instructions
 5835 
 5836 operand cmpOpEqNe()
 5837 %{
 5838   match(Bool);
 5839   op_cost(0);
 5840   predicate(n->as_Bool()->_test._test == BoolTest::ne
 5841             || n->as_Bool()->_test._test == BoolTest::eq);
 5842 
 5843   format %{ "" %}
 5844   interface(COND_INTER) %{
 5845     equal(0x0, "eq");
 5846     not_equal(0x1, "ne");
 5847     less(0xb, "lt");
 5848     greater_equal(0xa, "ge");
 5849     less_equal(0xd, "le");
 5850     greater(0xc, "gt");
 5851     overflow(0x6, "vs");
 5852     no_overflow(0x7, "vc");
 5853   %}
 5854 %}
 5855 
 5856 // used for certain integral comparisons which can be
 5857 // converted to cbxx or tbxx instructions
 5858 
 5859 operand cmpOpLtGe()
 5860 %{
 5861   match(Bool);
 5862   op_cost(0);
 5863 
 5864   predicate(n->as_Bool()->_test._test == BoolTest::lt
 5865             || n->as_Bool()->_test._test == BoolTest::ge);
 5866 
 5867   format %{ "" %}
 5868   interface(COND_INTER) %{
 5869     equal(0x0, "eq");
 5870     not_equal(0x1, "ne");
 5871     less(0xb, "lt");
 5872     greater_equal(0xa, "ge");
 5873     less_equal(0xd, "le");
 5874     greater(0xc, "gt");
 5875     overflow(0x6, "vs");
 5876     no_overflow(0x7, "vc");
 5877   %}
 5878 %}
 5879 
 5880 // used for certain unsigned integral comparisons which can be
 5881 // converted to cbxx or tbxx instructions
 5882 
 5883 operand cmpOpUEqNeLtGe()
 5884 %{
 5885   match(Bool);
 5886   op_cost(0);
 5887 
 5888   predicate(n->as_Bool()->_test._test == BoolTest::eq
 5889             || n->as_Bool()->_test._test == BoolTest::ne
 5890             || n->as_Bool()->_test._test == BoolTest::lt
 5891             || n->as_Bool()->_test._test == BoolTest::ge);
 5892 
 5893   format %{ "" %}
 5894   interface(COND_INTER) %{
 5895     equal(0x0, "eq");
 5896     not_equal(0x1, "ne");
 5897     less(0xb, "lt");
 5898     greater_equal(0xa, "ge");
 5899     less_equal(0xd, "le");
 5900     greater(0xc, "gt");
 5901     overflow(0x6, "vs");
 5902     no_overflow(0x7, "vc");
 5903   %}
 5904 %}
 5905 
 5906 // Special operand allowing long args to int ops to be truncated for free
 5907 
 5908 operand iRegL2I(iRegL reg) %{
 5909 
 5910   op_cost(0);
 5911 
 5912   match(ConvL2I reg);
 5913 
 5914   format %{ "l2i($reg)" %}
 5915 
 5916   interface(REG_INTER)
 5917 %}
 5918 
 5919 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
 5920 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
 5921 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
 5922 
 5923 //----------OPERAND CLASSES----------------------------------------------------
 5924 // Operand Classes are groups of operands that are used as to simplify
 5925 // instruction definitions by not requiring the AD writer to specify
 5926 // separate instructions for every form of operand when the
 5927 // instruction accepts multiple operand types with the same basic
 5928 // encoding and format. The classic case of this is memory operands.
 5929 
 5930 // memory is used to define read/write location for load/store
 5931 // instruction defs. we can turn a memory op into an Address
 5932 
 5933 opclass memory1(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI1, indOffL1,
 5934                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN);
 5935 
 5936 opclass memory2(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI2, indOffL2,
 5937                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN);
 5938 
 5939 opclass memory4(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI4, indOffL4,
 5940                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
 5941 
 5942 opclass memory8(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI8, indOffL8,
 5943                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
 5944 
 5945 // All of the memory operands. For the pipeline description.
 5946 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex,
 5947                indOffI1, indOffL1, indOffI2, indOffL2, indOffI4, indOffL4, indOffI8, indOffL8,
 5948                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
 5949 
 5950 
 5951 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
 5952 // operations. it allows the src to be either an iRegI or a (ConvL2I
 5953 // iRegL). in the latter case the l2i normally planted for a ConvL2I
 5954 // can be elided because the 32-bit instruction will just employ the
 5955 // lower 32 bits anyway.
 5956 //
 5957 // n.b. this does not elide all L2I conversions. if the truncated
 5958 // value is consumed by more than one operation then the ConvL2I
 5959 // cannot be bundled into the consuming nodes so an l2i gets planted
 5960 // (actually a movw $dst $src) and the downstream instructions consume
 5961 // the result of the l2i as an iRegI input. That's a shame since the
 5962 // movw is actually redundant but its not too costly.
 5963 
 5964 opclass iRegIorL2I(iRegI, iRegL2I);
 5965 
 5966 //----------PIPELINE-----------------------------------------------------------
 5967 // Rules which define the behavior of the target architectures pipeline.
 5968 
 5969 // For specific pipelines, eg A53, define the stages of that pipeline
 5970 //pipe_desc(ISS, EX1, EX2, WR);
 5971 #define ISS S0
 5972 #define EX1 S1
 5973 #define EX2 S2
 5974 #define WR  S3
 5975 
 5976 // Integer ALU reg operation
 5977 pipeline %{
 5978 
 5979 attributes %{
 5980   // ARM instructions are of fixed length
 5981   fixed_size_instructions;        // Fixed size instructions TODO does
 5982   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
 5983   // ARM instructions come in 32-bit word units
 5984   instruction_unit_size = 4;         // An instruction is 4 bytes long
 5985   instruction_fetch_unit_size = 64;  // The processor fetches one line
 5986   instruction_fetch_units = 1;       // of 64 bytes
 5987 
 5988   // List of nop instructions
 5989   nops( MachNop );
 5990 %}
 5991 
 5992 // We don't use an actual pipeline model so don't care about resources
 5993 // or description. we do use pipeline classes to introduce fixed
 5994 // latencies
 5995 
 5996 //----------RESOURCES----------------------------------------------------------
 5997 // Resources are the functional units available to the machine
 5998 
 5999 resources( INS0, INS1, INS01 = INS0 | INS1,
 6000            ALU0, ALU1, ALU = ALU0 | ALU1,
 6001            MAC,
 6002            DIV,
 6003            BRANCH,
 6004            LDST,
 6005            NEON_FP);
 6006 
 6007 //----------PIPELINE DESCRIPTION-----------------------------------------------
 6008 // Pipeline Description specifies the stages in the machine's pipeline
 6009 
 6010 // Define the pipeline as a generic 6 stage pipeline
 6011 pipe_desc(S0, S1, S2, S3, S4, S5);
 6012 
 6013 //----------PIPELINE CLASSES---------------------------------------------------
 6014 // Pipeline Classes describe the stages in which input and output are
 6015 // referenced by the hardware pipeline.
 6016 
 6017 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
 6018 %{
 6019   single_instruction;
 6020   src1   : S1(read);
 6021   src2   : S2(read);
 6022   dst    : S5(write);
 6023   INS01  : ISS;
 6024   NEON_FP : S5;
 6025 %}
 6026 
 6027 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
 6028 %{
 6029   single_instruction;
 6030   src1   : S1(read);
 6031   src2   : S2(read);
 6032   dst    : S5(write);
 6033   INS01  : ISS;
 6034   NEON_FP : S5;
 6035 %}
 6036 
 6037 pipe_class fp_uop_s(vRegF dst, vRegF src)
 6038 %{
 6039   single_instruction;
 6040   src    : S1(read);
 6041   dst    : S5(write);
 6042   INS01  : ISS;
 6043   NEON_FP : S5;
 6044 %}
 6045 
 6046 pipe_class fp_uop_d(vRegD dst, vRegD src)
 6047 %{
 6048   single_instruction;
 6049   src    : S1(read);
 6050   dst    : S5(write);
 6051   INS01  : ISS;
 6052   NEON_FP : S5;
 6053 %}
 6054 
 6055 pipe_class fp_d2f(vRegF dst, vRegD src)
 6056 %{
 6057   single_instruction;
 6058   src    : S1(read);
 6059   dst    : S5(write);
 6060   INS01  : ISS;
 6061   NEON_FP : S5;
 6062 %}
 6063 
 6064 pipe_class fp_f2d(vRegD dst, vRegF src)
 6065 %{
 6066   single_instruction;
 6067   src    : S1(read);
 6068   dst    : S5(write);
 6069   INS01  : ISS;
 6070   NEON_FP : S5;
 6071 %}
 6072 
 6073 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
 6074 %{
 6075   single_instruction;
 6076   src    : S1(read);
 6077   dst    : S5(write);
 6078   INS01  : ISS;
 6079   NEON_FP : S5;
 6080 %}
 6081 
 6082 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
 6083 %{
 6084   single_instruction;
 6085   src    : S1(read);
 6086   dst    : S5(write);
 6087   INS01  : ISS;
 6088   NEON_FP : S5;
 6089 %}
 6090 
 6091 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
 6092 %{
 6093   single_instruction;
 6094   src    : S1(read);
 6095   dst    : S5(write);
 6096   INS01  : ISS;
 6097   NEON_FP : S5;
 6098 %}
 6099 
 6100 pipe_class fp_l2f(vRegF dst, iRegL src)
 6101 %{
 6102   single_instruction;
 6103   src    : S1(read);
 6104   dst    : S5(write);
 6105   INS01  : ISS;
 6106   NEON_FP : S5;
 6107 %}
 6108 
 6109 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
 6110 %{
 6111   single_instruction;
 6112   src    : S1(read);
 6113   dst    : S5(write);
 6114   INS01  : ISS;
 6115   NEON_FP : S5;
 6116 %}
 6117 
 6118 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
 6119 %{
 6120   single_instruction;
 6121   src    : S1(read);
 6122   dst    : S5(write);
 6123   INS01  : ISS;
 6124   NEON_FP : S5;
 6125 %}
 6126 
 6127 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
 6128 %{
 6129   single_instruction;
 6130   src    : S1(read);
 6131   dst    : S5(write);
 6132   INS01  : ISS;
 6133   NEON_FP : S5;
 6134 %}
 6135 
 6136 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
 6137 %{
 6138   single_instruction;
 6139   src    : S1(read);
 6140   dst    : S5(write);
 6141   INS01  : ISS;
 6142   NEON_FP : S5;
 6143 %}
 6144 
 6145 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
 6146 %{
 6147   single_instruction;
 6148   src1   : S1(read);
 6149   src2   : S2(read);
 6150   dst    : S5(write);
 6151   INS0   : ISS;
 6152   NEON_FP : S5;
 6153 %}
 6154 
 6155 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
 6156 %{
 6157   single_instruction;
 6158   src1   : S1(read);
 6159   src2   : S2(read);
 6160   dst    : S5(write);
 6161   INS0   : ISS;
 6162   NEON_FP : S5;
 6163 %}
 6164 
 6165 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
 6166 %{
 6167   single_instruction;
 6168   cr     : S1(read);
 6169   src1   : S1(read);
 6170   src2   : S1(read);
 6171   dst    : S3(write);
 6172   INS01  : ISS;
 6173   NEON_FP : S3;
 6174 %}
 6175 
 6176 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
 6177 %{
 6178   single_instruction;
 6179   cr     : S1(read);
 6180   src1   : S1(read);
 6181   src2   : S1(read);
 6182   dst    : S3(write);
 6183   INS01  : ISS;
 6184   NEON_FP : S3;
 6185 %}
 6186 
 6187 pipe_class fp_imm_s(vRegF dst)
 6188 %{
 6189   single_instruction;
 6190   dst    : S3(write);
 6191   INS01  : ISS;
 6192   NEON_FP : S3;
 6193 %}
 6194 
 6195 pipe_class fp_imm_d(vRegD dst)
 6196 %{
 6197   single_instruction;
 6198   dst    : S3(write);
 6199   INS01  : ISS;
 6200   NEON_FP : S3;
 6201 %}
 6202 
 6203 pipe_class fp_load_constant_s(vRegF dst)
 6204 %{
 6205   single_instruction;
 6206   dst    : S4(write);
 6207   INS01  : ISS;
 6208   NEON_FP : S4;
 6209 %}
 6210 
 6211 pipe_class fp_load_constant_d(vRegD dst)
 6212 %{
 6213   single_instruction;
 6214   dst    : S4(write);
 6215   INS01  : ISS;
 6216   NEON_FP : S4;
 6217 %}
 6218 
 6219 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
 6220 %{
 6221   single_instruction;
 6222   dst    : S5(write);
 6223   src1   : S1(read);
 6224   src2   : S1(read);
 6225   INS01  : ISS;
 6226   NEON_FP : S5;
 6227 %}
 6228 
 6229 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
 6230 %{
 6231   single_instruction;
 6232   dst    : S5(write);
 6233   src1   : S1(read);
 6234   src2   : S1(read);
 6235   INS0   : ISS;
 6236   NEON_FP : S5;
 6237 %}
 6238 
 6239 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
 6240 %{
 6241   single_instruction;
 6242   dst    : S5(write);
 6243   src1   : S1(read);
 6244   src2   : S1(read);
 6245   dst    : S1(read);
 6246   INS01  : ISS;
 6247   NEON_FP : S5;
 6248 %}
 6249 
 6250 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
 6251 %{
 6252   single_instruction;
 6253   dst    : S5(write);
 6254   src1   : S1(read);
 6255   src2   : S1(read);
 6256   dst    : S1(read);
 6257   INS0   : ISS;
 6258   NEON_FP : S5;
 6259 %}
 6260 
 6261 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
 6262 %{
 6263   single_instruction;
 6264   dst    : S4(write);
 6265   src1   : S2(read);
 6266   src2   : S2(read);
 6267   INS01  : ISS;
 6268   NEON_FP : S4;
 6269 %}
 6270 
 6271 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
 6272 %{
 6273   single_instruction;
 6274   dst    : S4(write);
 6275   src1   : S2(read);
 6276   src2   : S2(read);
 6277   INS0   : ISS;
 6278   NEON_FP : S4;
 6279 %}
 6280 
 6281 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
 6282 %{
 6283   single_instruction;
 6284   dst    : S3(write);
 6285   src1   : S2(read);
 6286   src2   : S2(read);
 6287   INS01  : ISS;
 6288   NEON_FP : S3;
 6289 %}
 6290 
 6291 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
 6292 %{
 6293   single_instruction;
 6294   dst    : S3(write);
 6295   src1   : S2(read);
 6296   src2   : S2(read);
 6297   INS0   : ISS;
 6298   NEON_FP : S3;
 6299 %}
 6300 
 6301 pipe_class vshift64(vecD dst, vecD src, vecX shift)
 6302 %{
 6303   single_instruction;
 6304   dst    : S3(write);
 6305   src    : S1(read);
 6306   shift  : S1(read);
 6307   INS01  : ISS;
 6308   NEON_FP : S3;
 6309 %}
 6310 
 6311 pipe_class vshift128(vecX dst, vecX src, vecX shift)
 6312 %{
 6313   single_instruction;
 6314   dst    : S3(write);
 6315   src    : S1(read);
 6316   shift  : S1(read);
 6317   INS0   : ISS;
 6318   NEON_FP : S3;
 6319 %}
 6320 
 6321 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
 6322 %{
 6323   single_instruction;
 6324   dst    : S3(write);
 6325   src    : S1(read);
 6326   INS01  : ISS;
 6327   NEON_FP : S3;
 6328 %}
 6329 
 6330 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
 6331 %{
 6332   single_instruction;
 6333   dst    : S3(write);
 6334   src    : S1(read);
 6335   INS0   : ISS;
 6336   NEON_FP : S3;
 6337 %}
 6338 
 6339 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
 6340 %{
 6341   single_instruction;
 6342   dst    : S5(write);
 6343   src1   : S1(read);
 6344   src2   : S1(read);
 6345   INS01  : ISS;
 6346   NEON_FP : S5;
 6347 %}
 6348 
 6349 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
 6350 %{
 6351   single_instruction;
 6352   dst    : S5(write);
 6353   src1   : S1(read);
 6354   src2   : S1(read);
 6355   INS0   : ISS;
 6356   NEON_FP : S5;
 6357 %}
 6358 
 6359 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
 6360 %{
 6361   single_instruction;
 6362   dst    : S5(write);
 6363   src1   : S1(read);
 6364   src2   : S1(read);
 6365   INS0   : ISS;
 6366   NEON_FP : S5;
 6367 %}
 6368 
 6369 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
 6370 %{
 6371   single_instruction;
 6372   dst    : S5(write);
 6373   src1   : S1(read);
 6374   src2   : S1(read);
 6375   INS0   : ISS;
 6376   NEON_FP : S5;
 6377 %}
 6378 
 6379 pipe_class vsqrt_fp128(vecX dst, vecX src)
 6380 %{
 6381   single_instruction;
 6382   dst    : S5(write);
 6383   src    : S1(read);
 6384   INS0   : ISS;
 6385   NEON_FP : S5;
 6386 %}
 6387 
 6388 pipe_class vunop_fp64(vecD dst, vecD src)
 6389 %{
 6390   single_instruction;
 6391   dst    : S5(write);
 6392   src    : S1(read);
 6393   INS01  : ISS;
 6394   NEON_FP : S5;
 6395 %}
 6396 
 6397 pipe_class vunop_fp128(vecX dst, vecX src)
 6398 %{
 6399   single_instruction;
 6400   dst    : S5(write);
 6401   src    : S1(read);
 6402   INS0   : ISS;
 6403   NEON_FP : S5;
 6404 %}
 6405 
 6406 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
 6407 %{
 6408   single_instruction;
 6409   dst    : S3(write);
 6410   src    : S1(read);
 6411   INS01  : ISS;
 6412   NEON_FP : S3;
 6413 %}
 6414 
 6415 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
 6416 %{
 6417   single_instruction;
 6418   dst    : S3(write);
 6419   src    : S1(read);
 6420   INS01  : ISS;
 6421   NEON_FP : S3;
 6422 %}
 6423 
 6424 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
 6425 %{
 6426   single_instruction;
 6427   dst    : S3(write);
 6428   src    : S1(read);
 6429   INS01  : ISS;
 6430   NEON_FP : S3;
 6431 %}
 6432 
 6433 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
 6434 %{
 6435   single_instruction;
 6436   dst    : S3(write);
 6437   src    : S1(read);
 6438   INS01  : ISS;
 6439   NEON_FP : S3;
 6440 %}
 6441 
 6442 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
 6443 %{
 6444   single_instruction;
 6445   dst    : S3(write);
 6446   src    : S1(read);
 6447   INS01  : ISS;
 6448   NEON_FP : S3;
 6449 %}
 6450 
 6451 pipe_class vmovi_reg_imm64(vecD dst)
 6452 %{
 6453   single_instruction;
 6454   dst    : S3(write);
 6455   INS01  : ISS;
 6456   NEON_FP : S3;
 6457 %}
 6458 
 6459 pipe_class vmovi_reg_imm128(vecX dst)
 6460 %{
 6461   single_instruction;
 6462   dst    : S3(write);
 6463   INS0   : ISS;
 6464   NEON_FP : S3;
 6465 %}
 6466 
 6467 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
 6468 %{
 6469   single_instruction;
 6470   dst    : S5(write);
 6471   mem    : ISS(read);
 6472   INS01  : ISS;
 6473   NEON_FP : S3;
 6474 %}
 6475 
 6476 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
 6477 %{
 6478   single_instruction;
 6479   dst    : S5(write);
 6480   mem    : ISS(read);
 6481   INS01  : ISS;
 6482   NEON_FP : S3;
 6483 %}
 6484 
 6485 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
 6486 %{
 6487   single_instruction;
 6488   mem    : ISS(read);
 6489   src    : S2(read);
 6490   INS01  : ISS;
 6491   NEON_FP : S3;
 6492 %}
 6493 
 6494 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
 6495 %{
 6496   single_instruction;
 6497   mem    : ISS(read);
 6498   src    : S2(read);
 6499   INS01  : ISS;
 6500   NEON_FP : S3;
 6501 %}
 6502 
 6503 //------- Integer ALU operations --------------------------
 6504 
 6505 // Integer ALU reg-reg operation
 6506 // Operands needed in EX1, result generated in EX2
 6507 // Eg.  ADD     x0, x1, x2
 6508 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6509 %{
 6510   single_instruction;
 6511   dst    : EX2(write);
 6512   src1   : EX1(read);
 6513   src2   : EX1(read);
 6514   INS01  : ISS; // Dual issue as instruction 0 or 1
 6515   ALU    : EX2;
 6516 %}
 6517 
 6518 // Integer ALU reg-reg operation with constant shift
 6519 // Shifted register must be available in LATE_ISS instead of EX1
 6520 // Eg.  ADD     x0, x1, x2, LSL #2
 6521 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
 6522 %{
 6523   single_instruction;
 6524   dst    : EX2(write);
 6525   src1   : EX1(read);
 6526   src2   : ISS(read);
 6527   INS01  : ISS;
 6528   ALU    : EX2;
 6529 %}
 6530 
 6531 // Integer ALU reg operation with constant shift
 6532 // Eg.  LSL     x0, x1, #shift
 6533 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
 6534 %{
 6535   single_instruction;
 6536   dst    : EX2(write);
 6537   src1   : ISS(read);
 6538   INS01  : ISS;
 6539   ALU    : EX2;
 6540 %}
 6541 
 6542 // Integer ALU reg-reg operation with variable shift
 6543 // Both operands must be available in LATE_ISS instead of EX1
 6544 // Result is available in EX1 instead of EX2
 6545 // Eg.  LSLV    x0, x1, x2
 6546 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
 6547 %{
 6548   single_instruction;
 6549   dst    : EX1(write);
 6550   src1   : ISS(read);
 6551   src2   : ISS(read);
 6552   INS01  : ISS;
 6553   ALU    : EX1;
 6554 %}
 6555 
 6556 // Integer ALU reg-reg operation with extract
 6557 // As for _vshift above, but result generated in EX2
 6558 // Eg.  EXTR    x0, x1, x2, #N
 6559 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
 6560 %{
 6561   single_instruction;
 6562   dst    : EX2(write);
 6563   src1   : ISS(read);
 6564   src2   : ISS(read);
 6565   INS1   : ISS; // Can only dual issue as Instruction 1
 6566   ALU    : EX1;
 6567 %}
 6568 
 6569 // Integer ALU reg operation
 6570 // Eg.  NEG     x0, x1
 6571 pipe_class ialu_reg(iRegI dst, iRegI src)
 6572 %{
 6573   single_instruction;
 6574   dst    : EX2(write);
 6575   src    : EX1(read);
 6576   INS01  : ISS;
 6577   ALU    : EX2;
 6578 %}
 6579 
 6580 // Integer ALU reg mmediate operation
 6581 // Eg.  ADD     x0, x1, #N
 6582 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
 6583 %{
 6584   single_instruction;
 6585   dst    : EX2(write);
 6586   src1   : EX1(read);
 6587   INS01  : ISS;
 6588   ALU    : EX2;
 6589 %}
 6590 
 6591 // Integer ALU immediate operation (no source operands)
 6592 // Eg.  MOV     x0, #N
 6593 pipe_class ialu_imm(iRegI dst)
 6594 %{
 6595   single_instruction;
 6596   dst    : EX1(write);
 6597   INS01  : ISS;
 6598   ALU    : EX1;
 6599 %}
 6600 
 6601 //------- Compare operation -------------------------------
 6602 
 6603 // Compare reg-reg
 6604 // Eg.  CMP     x0, x1
 6605 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
 6606 %{
 6607   single_instruction;
 6608 //  fixed_latency(16);
 6609   cr     : EX2(write);
 6610   op1    : EX1(read);
 6611   op2    : EX1(read);
 6612   INS01  : ISS;
 6613   ALU    : EX2;
 6614 %}
 6615 
 6616 // Compare reg-reg
 6617 // Eg.  CMP     x0, #N
 6618 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
 6619 %{
 6620   single_instruction;
 6621 //  fixed_latency(16);
 6622   cr     : EX2(write);
 6623   op1    : EX1(read);
 6624   INS01  : ISS;
 6625   ALU    : EX2;
 6626 %}
 6627 
 6628 //------- Conditional instructions ------------------------
 6629 
 6630 // Conditional no operands
 6631 // Eg.  CSINC   x0, zr, zr, <cond>
 6632 pipe_class icond_none(iRegI dst, rFlagsReg cr)
 6633 %{
 6634   single_instruction;
 6635   cr     : EX1(read);
 6636   dst    : EX2(write);
 6637   INS01  : ISS;
 6638   ALU    : EX2;
 6639 %}
 6640 
 6641 // Conditional 2 operand
 6642 // EG.  CSEL    X0, X1, X2, <cond>
 6643 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
 6644 %{
 6645   single_instruction;
 6646   cr     : EX1(read);
 6647   src1   : EX1(read);
 6648   src2   : EX1(read);
 6649   dst    : EX2(write);
 6650   INS01  : ISS;
 6651   ALU    : EX2;
 6652 %}
 6653 
 6654 // Conditional 2 operand
 6655 // EG.  CSEL    X0, X1, X2, <cond>
 6656 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
 6657 %{
 6658   single_instruction;
 6659   cr     : EX1(read);
 6660   src    : EX1(read);
 6661   dst    : EX2(write);
 6662   INS01  : ISS;
 6663   ALU    : EX2;
 6664 %}
 6665 
 6666 //------- Multiply pipeline operations --------------------
 6667 
 6668 // Multiply reg-reg
 6669 // Eg.  MUL     w0, w1, w2
 6670 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6671 %{
 6672   single_instruction;
 6673   dst    : WR(write);
 6674   src1   : ISS(read);
 6675   src2   : ISS(read);
 6676   INS01  : ISS;
 6677   MAC    : WR;
 6678 %}
 6679 
 6680 // Multiply accumulate
 6681 // Eg.  MADD    w0, w1, w2, w3
 6682 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 6683 %{
 6684   single_instruction;
 6685   dst    : WR(write);
 6686   src1   : ISS(read);
 6687   src2   : ISS(read);
 6688   src3   : ISS(read);
 6689   INS01  : ISS;
 6690   MAC    : WR;
 6691 %}
 6692 
 6693 // Eg.  MUL     w0, w1, w2
 6694 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6695 %{
 6696   single_instruction;
 6697   fixed_latency(3); // Maximum latency for 64 bit mul
 6698   dst    : WR(write);
 6699   src1   : ISS(read);
 6700   src2   : ISS(read);
 6701   INS01  : ISS;
 6702   MAC    : WR;
 6703 %}
 6704 
 6705 // Multiply accumulate
 6706 // Eg.  MADD    w0, w1, w2, w3
 6707 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 6708 %{
 6709   single_instruction;
 6710   fixed_latency(3); // Maximum latency for 64 bit mul
 6711   dst    : WR(write);
 6712   src1   : ISS(read);
 6713   src2   : ISS(read);
 6714   src3   : ISS(read);
 6715   INS01  : ISS;
 6716   MAC    : WR;
 6717 %}
 6718 
 6719 //------- Divide pipeline operations --------------------
 6720 
 6721 // Eg.  SDIV    w0, w1, w2
 6722 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6723 %{
 6724   single_instruction;
 6725   fixed_latency(8); // Maximum latency for 32 bit divide
 6726   dst    : WR(write);
 6727   src1   : ISS(read);
 6728   src2   : ISS(read);
 6729   INS0   : ISS; // Can only dual issue as instruction 0
 6730   DIV    : WR;
 6731 %}
 6732 
 6733 // Eg.  SDIV    x0, x1, x2
 6734 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6735 %{
 6736   single_instruction;
 6737   fixed_latency(16); // Maximum latency for 64 bit divide
 6738   dst    : WR(write);
 6739   src1   : ISS(read);
 6740   src2   : ISS(read);
 6741   INS0   : ISS; // Can only dual issue as instruction 0
 6742   DIV    : WR;
 6743 %}
 6744 
 6745 //------- Load pipeline operations ------------------------
 6746 
 6747 // Load - prefetch
 6748 // Eg.  PFRM    <mem>
 6749 pipe_class iload_prefetch(memory mem)
 6750 %{
 6751   single_instruction;
 6752   mem    : ISS(read);
 6753   INS01  : ISS;
 6754   LDST   : WR;
 6755 %}
 6756 
 6757 // Load - reg, mem
 6758 // Eg.  LDR     x0, <mem>
 6759 pipe_class iload_reg_mem(iRegI dst, memory mem)
 6760 %{
 6761   single_instruction;
 6762   dst    : WR(write);
 6763   mem    : ISS(read);
 6764   INS01  : ISS;
 6765   LDST   : WR;
 6766 %}
 6767 
 6768 // Load - reg, reg
 6769 // Eg.  LDR     x0, [sp, x1]
 6770 pipe_class iload_reg_reg(iRegI dst, iRegI src)
 6771 %{
 6772   single_instruction;
 6773   dst    : WR(write);
 6774   src    : ISS(read);
 6775   INS01  : ISS;
 6776   LDST   : WR;
 6777 %}
 6778 
 6779 //------- Store pipeline operations -----------------------
 6780 
 6781 // Store - zr, mem
 6782 // Eg.  STR     zr, <mem>
 6783 pipe_class istore_mem(memory mem)
 6784 %{
 6785   single_instruction;
 6786   mem    : ISS(read);
 6787   INS01  : ISS;
 6788   LDST   : WR;
 6789 %}
 6790 
 6791 // Store - reg, mem
 6792 // Eg.  STR     x0, <mem>
 6793 pipe_class istore_reg_mem(iRegI src, memory mem)
 6794 %{
 6795   single_instruction;
 6796   mem    : ISS(read);
 6797   src    : EX2(read);
 6798   INS01  : ISS;
 6799   LDST   : WR;
 6800 %}
 6801 
 6802 // Store - reg, reg
 6803 // Eg. STR      x0, [sp, x1]
 6804 pipe_class istore_reg_reg(iRegI dst, iRegI src)
 6805 %{
 6806   single_instruction;
 6807   dst    : ISS(read);
 6808   src    : EX2(read);
 6809   INS01  : ISS;
 6810   LDST   : WR;
 6811 %}
 6812 
 6813 //------- Store pipeline operations -----------------------
 6814 
 6815 // Branch
 6816 pipe_class pipe_branch()
 6817 %{
 6818   single_instruction;
 6819   INS01  : ISS;
 6820   BRANCH : EX1;
 6821 %}
 6822 
 6823 // Conditional branch
 6824 pipe_class pipe_branch_cond(rFlagsReg cr)
 6825 %{
 6826   single_instruction;
 6827   cr     : EX1(read);
 6828   INS01  : ISS;
 6829   BRANCH : EX1;
 6830 %}
 6831 
 6832 // Compare & Branch
 6833 // EG.  CBZ/CBNZ
 6834 pipe_class pipe_cmp_branch(iRegI op1)
 6835 %{
 6836   single_instruction;
 6837   op1    : EX1(read);
 6838   INS01  : ISS;
 6839   BRANCH : EX1;
 6840 %}
 6841 
 6842 //------- Synchronisation operations ----------------------
 6843 
 6844 // Any operation requiring serialization.
 6845 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
 6846 pipe_class pipe_serial()
 6847 %{
 6848   single_instruction;
 6849   force_serialization;
 6850   fixed_latency(16);
 6851   INS01  : ISS(2); // Cannot dual issue with any other instruction
 6852   LDST   : WR;
 6853 %}
 6854 
 6855 // Generic big/slow expanded idiom - also serialized
 6856 pipe_class pipe_slow()
 6857 %{
 6858   instruction_count(10);
 6859   multiple_bundles;
 6860   force_serialization;
 6861   fixed_latency(16);
 6862   INS01  : ISS(2); // Cannot dual issue with any other instruction
 6863   LDST   : WR;
 6864 %}
 6865 
 6866 // Empty pipeline class
 6867 pipe_class pipe_class_empty()
 6868 %{
 6869   single_instruction;
 6870   fixed_latency(0);
 6871 %}
 6872 
 6873 // Default pipeline class.
 6874 pipe_class pipe_class_default()
 6875 %{
 6876   single_instruction;
 6877   fixed_latency(2);
 6878 %}
 6879 
 6880 // Pipeline class for compares.
 6881 pipe_class pipe_class_compare()
 6882 %{
 6883   single_instruction;
 6884   fixed_latency(16);
 6885 %}
 6886 
 6887 // Pipeline class for memory operations.
 6888 pipe_class pipe_class_memory()
 6889 %{
 6890   single_instruction;
 6891   fixed_latency(16);
 6892 %}
 6893 
 6894 // Pipeline class for call.
 6895 pipe_class pipe_class_call()
 6896 %{
 6897   single_instruction;
 6898   fixed_latency(100);
 6899 %}
 6900 
 6901 // Define the class for the Nop node.
 6902 define %{
 6903    MachNop = pipe_class_empty;
 6904 %}
 6905 
 6906 %}
 6907 //----------INSTRUCTIONS-------------------------------------------------------
 6908 //
 6909 // match      -- States which machine-independent subtree may be replaced
 6910 //               by this instruction.
 6911 // ins_cost   -- The estimated cost of this instruction is used by instruction
 6912 //               selection to identify a minimum cost tree of machine
 6913 //               instructions that matches a tree of machine-independent
 6914 //               instructions.
 6915 // format     -- A string providing the disassembly for this instruction.
 6916 //               The value of an instruction's operand may be inserted
 6917 //               by referring to it with a '$' prefix.
 6918 // opcode     -- Three instruction opcodes may be provided.  These are referred
 6919 //               to within an encode class as $primary, $secondary, and $tertiary
 6920 //               rrspectively.  The primary opcode is commonly used to
 6921 //               indicate the type of machine instruction, while secondary
 6922 //               and tertiary are often used for prefix options or addressing
 6923 //               modes.
 6924 // ins_encode -- A list of encode classes with parameters. The encode class
 6925 //               name must have been defined in an 'enc_class' specification
 6926 //               in the encode section of the architecture description.
 6927 
 6928 // ============================================================================
 6929 // Memory (Load/Store) Instructions
 6930 
 6931 // Load Instructions
 6932 
 6933 // Load Byte (8 bit signed)
 6934 instruct loadB(iRegINoSp dst, memory1 mem)
 6935 %{
 6936   match(Set dst (LoadB mem));
 6937   predicate(!needs_acquiring_load(n));
 6938 
 6939   ins_cost(4 * INSN_COST);
 6940   format %{ "ldrsbw  $dst, $mem\t# byte" %}
 6941 
 6942   ins_encode(aarch64_enc_ldrsbw(dst, mem));
 6943 
 6944   ins_pipe(iload_reg_mem);
 6945 %}
 6946 
 6947 // Load Byte (8 bit signed) into long
 6948 instruct loadB2L(iRegLNoSp dst, memory1 mem)
 6949 %{
 6950   match(Set dst (ConvI2L (LoadB mem)));
 6951   predicate(!needs_acquiring_load(n->in(1)));
 6952 
 6953   ins_cost(4 * INSN_COST);
 6954   format %{ "ldrsb  $dst, $mem\t# byte" %}
 6955 
 6956   ins_encode(aarch64_enc_ldrsb(dst, mem));
 6957 
 6958   ins_pipe(iload_reg_mem);
 6959 %}
 6960 
 6961 // Load Byte (8 bit unsigned)
 6962 instruct loadUB(iRegINoSp dst, memory1 mem)
 6963 %{
 6964   match(Set dst (LoadUB mem));
 6965   predicate(!needs_acquiring_load(n));
 6966 
 6967   ins_cost(4 * INSN_COST);
 6968   format %{ "ldrbw  $dst, $mem\t# byte" %}
 6969 
 6970   ins_encode(aarch64_enc_ldrb(dst, mem));
 6971 
 6972   ins_pipe(iload_reg_mem);
 6973 %}
 6974 
 6975 // Load Byte (8 bit unsigned) into long
 6976 instruct loadUB2L(iRegLNoSp dst, memory1 mem)
 6977 %{
 6978   match(Set dst (ConvI2L (LoadUB mem)));
 6979   predicate(!needs_acquiring_load(n->in(1)));
 6980 
 6981   ins_cost(4 * INSN_COST);
 6982   format %{ "ldrb  $dst, $mem\t# byte" %}
 6983 
 6984   ins_encode(aarch64_enc_ldrb(dst, mem));
 6985 
 6986   ins_pipe(iload_reg_mem);
 6987 %}
 6988 
 6989 // Load Short (16 bit signed)
 6990 instruct loadS(iRegINoSp dst, memory2 mem)
 6991 %{
 6992   match(Set dst (LoadS mem));
 6993   predicate(!needs_acquiring_load(n));
 6994 
 6995   ins_cost(4 * INSN_COST);
 6996   format %{ "ldrshw  $dst, $mem\t# short" %}
 6997 
 6998   ins_encode(aarch64_enc_ldrshw(dst, mem));
 6999 
 7000   ins_pipe(iload_reg_mem);
 7001 %}
 7002 
 7003 // Load Short (16 bit signed) into long
 7004 instruct loadS2L(iRegLNoSp dst, memory2 mem)
 7005 %{
 7006   match(Set dst (ConvI2L (LoadS mem)));
 7007   predicate(!needs_acquiring_load(n->in(1)));
 7008 
 7009   ins_cost(4 * INSN_COST);
 7010   format %{ "ldrsh  $dst, $mem\t# short" %}
 7011 
 7012   ins_encode(aarch64_enc_ldrsh(dst, mem));
 7013 
 7014   ins_pipe(iload_reg_mem);
 7015 %}
 7016 
 7017 // Load Char (16 bit unsigned)
 7018 instruct loadUS(iRegINoSp dst, memory2 mem)
 7019 %{
 7020   match(Set dst (LoadUS mem));
 7021   predicate(!needs_acquiring_load(n));
 7022 
 7023   ins_cost(4 * INSN_COST);
 7024   format %{ "ldrh  $dst, $mem\t# short" %}
 7025 
 7026   ins_encode(aarch64_enc_ldrh(dst, mem));
 7027 
 7028   ins_pipe(iload_reg_mem);
 7029 %}
 7030 
 7031 // Load Short/Char (16 bit unsigned) into long
 7032 instruct loadUS2L(iRegLNoSp dst, memory2 mem)
 7033 %{
 7034   match(Set dst (ConvI2L (LoadUS mem)));
 7035   predicate(!needs_acquiring_load(n->in(1)));
 7036 
 7037   ins_cost(4 * INSN_COST);
 7038   format %{ "ldrh  $dst, $mem\t# short" %}
 7039 
 7040   ins_encode(aarch64_enc_ldrh(dst, mem));
 7041 
 7042   ins_pipe(iload_reg_mem);
 7043 %}
 7044 
 7045 // Load Integer (32 bit signed)
 7046 instruct loadI(iRegINoSp dst, memory4 mem)
 7047 %{
 7048   match(Set dst (LoadI mem));
 7049   predicate(!needs_acquiring_load(n));
 7050 
 7051   ins_cost(4 * INSN_COST);
 7052   format %{ "ldrw  $dst, $mem\t# int" %}
 7053 
 7054   ins_encode(aarch64_enc_ldrw(dst, mem));
 7055 
 7056   ins_pipe(iload_reg_mem);
 7057 %}
 7058 
 7059 // Load Integer (32 bit signed) into long
 7060 instruct loadI2L(iRegLNoSp dst, memory4 mem)
 7061 %{
 7062   match(Set dst (ConvI2L (LoadI mem)));
 7063   predicate(!needs_acquiring_load(n->in(1)));
 7064 
 7065   ins_cost(4 * INSN_COST);
 7066   format %{ "ldrsw  $dst, $mem\t# int" %}
 7067 
 7068   ins_encode(aarch64_enc_ldrsw(dst, mem));
 7069 
 7070   ins_pipe(iload_reg_mem);
 7071 %}
 7072 
 7073 // Load Integer (32 bit unsigned) into long
 7074 instruct loadUI2L(iRegLNoSp dst, memory4 mem, immL_32bits mask)
 7075 %{
 7076   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 7077   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
 7078 
 7079   ins_cost(4 * INSN_COST);
 7080   format %{ "ldrw  $dst, $mem\t# int" %}
 7081 
 7082   ins_encode(aarch64_enc_ldrw(dst, mem));
 7083 
 7084   ins_pipe(iload_reg_mem);
 7085 %}
 7086 
 7087 // Load Long (64 bit signed)
 7088 instruct loadL(iRegLNoSp dst, memory8 mem)
 7089 %{
 7090   match(Set dst (LoadL mem));
 7091   predicate(!needs_acquiring_load(n));
 7092 
 7093   ins_cost(4 * INSN_COST);
 7094   format %{ "ldr  $dst, $mem\t# int" %}
 7095 
 7096   ins_encode(aarch64_enc_ldr(dst, mem));
 7097 
 7098   ins_pipe(iload_reg_mem);
 7099 %}
 7100 
 7101 // Load Range
 7102 instruct loadRange(iRegINoSp dst, memory4 mem)
 7103 %{
 7104   match(Set dst (LoadRange mem));
 7105 
 7106   ins_cost(4 * INSN_COST);
 7107   format %{ "ldrw  $dst, $mem\t# range" %}
 7108 
 7109   ins_encode(aarch64_enc_ldrw(dst, mem));
 7110 
 7111   ins_pipe(iload_reg_mem);
 7112 %}
 7113 
 7114 // Load Pointer
 7115 instruct loadP(iRegPNoSp dst, memory8 mem)
 7116 %{
 7117   match(Set dst (LoadP mem));
 7118   predicate(!needs_acquiring_load(n) && (n->as_Load()->barrier_data() == 0));
 7119 
 7120   ins_cost(4 * INSN_COST);
 7121   format %{ "ldr  $dst, $mem\t# ptr" %}
 7122 
 7123   ins_encode(aarch64_enc_ldr(dst, mem));
 7124 
 7125   ins_pipe(iload_reg_mem);
 7126 %}
 7127 
 7128 // Load Compressed Pointer
 7129 instruct loadN(iRegNNoSp dst, memory4 mem)
 7130 %{
 7131   match(Set dst (LoadN mem));
 7132   predicate(!needs_acquiring_load(n));
 7133 
 7134   ins_cost(4 * INSN_COST);
 7135   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
 7136 
 7137   ins_encode(aarch64_enc_ldrw(dst, mem));
 7138 
 7139   ins_pipe(iload_reg_mem);
 7140 %}
 7141 
 7142 // Load Klass Pointer
 7143 instruct loadKlass(iRegPNoSp dst, memory8 mem)
 7144 %{
 7145   match(Set dst (LoadKlass mem));
 7146   predicate(!needs_acquiring_load(n));
 7147 
 7148   ins_cost(4 * INSN_COST);
 7149   format %{ "ldr  $dst, $mem\t# class" %}
 7150 
 7151   ins_encode(aarch64_enc_ldr(dst, mem));
 7152 
 7153   ins_pipe(iload_reg_mem);
 7154 %}
 7155 
 7156 // Load Narrow Klass Pointer
 7157 instruct loadNKlass(iRegNNoSp dst, memory4 mem)
 7158 %{
 7159   match(Set dst (LoadNKlass mem));
 7160   predicate(!needs_acquiring_load(n));
 7161 
 7162   ins_cost(4 * INSN_COST);
 7163   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
 7164 
 7165   ins_encode(aarch64_enc_ldrw(dst, mem));
 7166 
 7167   ins_pipe(iload_reg_mem);
 7168 %}
 7169 
 7170 // Load Float
 7171 instruct loadF(vRegF dst, memory4 mem)
 7172 %{
 7173   match(Set dst (LoadF mem));
 7174   predicate(!needs_acquiring_load(n));
 7175 
 7176   ins_cost(4 * INSN_COST);
 7177   format %{ "ldrs  $dst, $mem\t# float" %}
 7178 
 7179   ins_encode( aarch64_enc_ldrs(dst, mem) );
 7180 
 7181   ins_pipe(pipe_class_memory);
 7182 %}
 7183 
 7184 // Load Double
 7185 instruct loadD(vRegD dst, memory8 mem)
 7186 %{
 7187   match(Set dst (LoadD mem));
 7188   predicate(!needs_acquiring_load(n));
 7189 
 7190   ins_cost(4 * INSN_COST);
 7191   format %{ "ldrd  $dst, $mem\t# double" %}
 7192 
 7193   ins_encode( aarch64_enc_ldrd(dst, mem) );
 7194 
 7195   ins_pipe(pipe_class_memory);
 7196 %}
 7197 
 7198 
 7199 // Load Int Constant
 7200 instruct loadConI(iRegINoSp dst, immI src)
 7201 %{
 7202   match(Set dst src);
 7203 
 7204   ins_cost(INSN_COST);
 7205   format %{ "mov $dst, $src\t# int" %}
 7206 
 7207   ins_encode( aarch64_enc_movw_imm(dst, src) );
 7208 
 7209   ins_pipe(ialu_imm);
 7210 %}
 7211 
 7212 // Load Long Constant
 7213 instruct loadConL(iRegLNoSp dst, immL src)
 7214 %{
 7215   match(Set dst src);
 7216 
 7217   ins_cost(INSN_COST);
 7218   format %{ "mov $dst, $src\t# long" %}
 7219 
 7220   ins_encode( aarch64_enc_mov_imm(dst, src) );
 7221 
 7222   ins_pipe(ialu_imm);
 7223 %}
 7224 
 7225 // Load Pointer Constant
 7226 
 7227 instruct loadConP(iRegPNoSp dst, immP con)
 7228 %{
 7229   match(Set dst con);
 7230 
 7231   ins_cost(INSN_COST * 4);
 7232   format %{
 7233     "mov  $dst, $con\t# ptr\n\t"
 7234   %}
 7235 
 7236   ins_encode(aarch64_enc_mov_p(dst, con));
 7237 
 7238   ins_pipe(ialu_imm);
 7239 %}
 7240 
 7241 // Load Null Pointer Constant
 7242 
 7243 instruct loadConP0(iRegPNoSp dst, immP0 con)
 7244 %{
 7245   match(Set dst con);
 7246 
 7247   ins_cost(INSN_COST);
 7248   format %{ "mov  $dst, $con\t# NULL ptr" %}
 7249 
 7250   ins_encode(aarch64_enc_mov_p0(dst, con));
 7251 
 7252   ins_pipe(ialu_imm);
 7253 %}
 7254 
 7255 // Load Pointer Constant One
 7256 
 7257 instruct loadConP1(iRegPNoSp dst, immP_1 con)
 7258 %{
 7259   match(Set dst con);
 7260 
 7261   ins_cost(INSN_COST);
 7262   format %{ "mov  $dst, $con\t# NULL ptr" %}
 7263 
 7264   ins_encode(aarch64_enc_mov_p1(dst, con));
 7265 
 7266   ins_pipe(ialu_imm);
 7267 %}
 7268 
 7269 // Load Byte Map Base Constant
 7270 
 7271 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
 7272 %{
 7273   match(Set dst con);
 7274 
 7275   ins_cost(INSN_COST);
 7276   format %{ "adr  $dst, $con\t# Byte Map Base" %}
 7277 
 7278   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
 7279 
 7280   ins_pipe(ialu_imm);
 7281 %}
 7282 
 7283 // Load Narrow Pointer Constant
 7284 
 7285 instruct loadConN(iRegNNoSp dst, immN con)
 7286 %{
 7287   match(Set dst con);
 7288 
 7289   ins_cost(INSN_COST * 4);
 7290   format %{ "mov  $dst, $con\t# compressed ptr" %}
 7291 
 7292   ins_encode(aarch64_enc_mov_n(dst, con));
 7293 
 7294   ins_pipe(ialu_imm);
 7295 %}
 7296 
 7297 // Load Narrow Null Pointer Constant
 7298 
 7299 instruct loadConN0(iRegNNoSp dst, immN0 con)
 7300 %{
 7301   match(Set dst con);
 7302 
 7303   ins_cost(INSN_COST);
 7304   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
 7305 
 7306   ins_encode(aarch64_enc_mov_n0(dst, con));
 7307 
 7308   ins_pipe(ialu_imm);
 7309 %}
 7310 
 7311 // Load Narrow Klass Constant
 7312 
 7313 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
 7314 %{
 7315   match(Set dst con);
 7316 
 7317   ins_cost(INSN_COST);
 7318   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
 7319 
 7320   ins_encode(aarch64_enc_mov_nk(dst, con));
 7321 
 7322   ins_pipe(ialu_imm);
 7323 %}
 7324 
 7325 // Load Packed Float Constant
 7326 
 7327 instruct loadConF_packed(vRegF dst, immFPacked con) %{
 7328   match(Set dst con);
 7329   ins_cost(INSN_COST * 4);
 7330   format %{ "fmovs  $dst, $con"%}
 7331   ins_encode %{
 7332     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
 7333   %}
 7334 
 7335   ins_pipe(fp_imm_s);
 7336 %}
 7337 
 7338 // Load Float Constant
 7339 
 7340 instruct loadConF(vRegF dst, immF con) %{
 7341   match(Set dst con);
 7342 
 7343   ins_cost(INSN_COST * 4);
 7344 
 7345   format %{
 7346     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 7347   %}
 7348 
 7349   ins_encode %{
 7350     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
 7351   %}
 7352 
 7353   ins_pipe(fp_load_constant_s);
 7354 %}
 7355 
 7356 // Load Packed Double Constant
 7357 
 7358 instruct loadConD_packed(vRegD dst, immDPacked con) %{
 7359   match(Set dst con);
 7360   ins_cost(INSN_COST);
 7361   format %{ "fmovd  $dst, $con"%}
 7362   ins_encode %{
 7363     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
 7364   %}
 7365 
 7366   ins_pipe(fp_imm_d);
 7367 %}
 7368 
 7369 // Load Double Constant
 7370 
 7371 instruct loadConD(vRegD dst, immD con) %{
 7372   match(Set dst con);
 7373 
 7374   ins_cost(INSN_COST * 5);
 7375   format %{
 7376     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 7377   %}
 7378 
 7379   ins_encode %{
 7380     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
 7381   %}
 7382 
 7383   ins_pipe(fp_load_constant_d);
 7384 %}
 7385 
 7386 // Store Instructions
 7387 
 7388 // Store CMS card-mark Immediate
 7389 instruct storeimmCM0(immI0 zero, memory1 mem)
 7390 %{
 7391   match(Set mem (StoreCM mem zero));
 7392 
 7393   ins_cost(INSN_COST);
 7394   format %{ "storestore (elided)\n\t"
 7395             "strb zr, $mem\t# byte" %}
 7396 
 7397   ins_encode(aarch64_enc_strb0(mem));
 7398 
 7399   ins_pipe(istore_mem);
 7400 %}
 7401 
 7402 // Store CMS card-mark Immediate with intervening StoreStore
 7403 // needed when using CMS with no conditional card marking
 7404 instruct storeimmCM0_ordered(immI0 zero, memory1 mem)
 7405 %{
 7406   match(Set mem (StoreCM mem zero));
 7407 
 7408   ins_cost(INSN_COST * 2);
 7409   format %{ "storestore\n\t"
 7410             "dmb ishst"
 7411             "\n\tstrb zr, $mem\t# byte" %}
 7412 
 7413   ins_encode(aarch64_enc_strb0_ordered(mem));
 7414 
 7415   ins_pipe(istore_mem);
 7416 %}
 7417 
 7418 // Store Byte
 7419 instruct storeB(iRegIorL2I src, memory1 mem)
 7420 %{
 7421   match(Set mem (StoreB mem src));
 7422   predicate(!needs_releasing_store(n));
 7423 
 7424   ins_cost(INSN_COST);
 7425   format %{ "strb  $src, $mem\t# byte" %}
 7426 
 7427   ins_encode(aarch64_enc_strb(src, mem));
 7428 
 7429   ins_pipe(istore_reg_mem);
 7430 %}
 7431 
 7432 
 7433 instruct storeimmB0(immI0 zero, memory1 mem)
 7434 %{
 7435   match(Set mem (StoreB mem zero));
 7436   predicate(!needs_releasing_store(n));
 7437 
 7438   ins_cost(INSN_COST);
 7439   format %{ "strb rscractch2, $mem\t# byte" %}
 7440 
 7441   ins_encode(aarch64_enc_strb0(mem));
 7442 
 7443   ins_pipe(istore_mem);
 7444 %}
 7445 
 7446 // Store Char/Short
 7447 instruct storeC(iRegIorL2I src, memory2 mem)
 7448 %{
 7449   match(Set mem (StoreC mem src));
 7450   predicate(!needs_releasing_store(n));
 7451 
 7452   ins_cost(INSN_COST);
 7453   format %{ "strh  $src, $mem\t# short" %}
 7454 
 7455   ins_encode(aarch64_enc_strh(src, mem));
 7456 
 7457   ins_pipe(istore_reg_mem);
 7458 %}
 7459 
 7460 instruct storeimmC0(immI0 zero, memory2 mem)
 7461 %{
 7462   match(Set mem (StoreC mem zero));
 7463   predicate(!needs_releasing_store(n));
 7464 
 7465   ins_cost(INSN_COST);
 7466   format %{ "strh  zr, $mem\t# short" %}
 7467 
 7468   ins_encode(aarch64_enc_strh0(mem));
 7469 
 7470   ins_pipe(istore_mem);
 7471 %}
 7472 
 7473 // Store Integer
 7474 
 7475 instruct storeI(iRegIorL2I src, memory4 mem)
 7476 %{
 7477   match(Set mem(StoreI mem src));
 7478   predicate(!needs_releasing_store(n));
 7479 
 7480   ins_cost(INSN_COST);
 7481   format %{ "strw  $src, $mem\t# int" %}
 7482 
 7483   ins_encode(aarch64_enc_strw(src, mem));
 7484 
 7485   ins_pipe(istore_reg_mem);
 7486 %}
 7487 
 7488 instruct storeimmI0(immI0 zero, memory4 mem)
 7489 %{
 7490   match(Set mem(StoreI mem zero));
 7491   predicate(!needs_releasing_store(n));
 7492 
 7493   ins_cost(INSN_COST);
 7494   format %{ "strw  zr, $mem\t# int" %}
 7495 
 7496   ins_encode(aarch64_enc_strw0(mem));
 7497 
 7498   ins_pipe(istore_mem);
 7499 %}
 7500 
 7501 // Store Long (64 bit signed)
 7502 instruct storeL(iRegL src, memory8 mem)
 7503 %{
 7504   match(Set mem (StoreL mem src));
 7505   predicate(!needs_releasing_store(n));
 7506 
 7507   ins_cost(INSN_COST);
 7508   format %{ "str  $src, $mem\t# int" %}
 7509 
 7510   ins_encode(aarch64_enc_str(src, mem));
 7511 
 7512   ins_pipe(istore_reg_mem);
 7513 %}
 7514 
 7515 // Store Long (64 bit signed)
 7516 instruct storeimmL0(immL0 zero, memory8 mem)
 7517 %{
 7518   match(Set mem (StoreL mem zero));
 7519   predicate(!needs_releasing_store(n));
 7520 
 7521   ins_cost(INSN_COST);
 7522   format %{ "str  zr, $mem\t# int" %}
 7523 
 7524   ins_encode(aarch64_enc_str0(mem));
 7525 
 7526   ins_pipe(istore_mem);
 7527 %}
 7528 
 7529 // Store Pointer
 7530 instruct storeP(iRegP src, memory8 mem)
 7531 %{
 7532   match(Set mem (StoreP mem src));
 7533   predicate(!needs_releasing_store(n));
 7534 
 7535   ins_cost(INSN_COST);
 7536   format %{ "str  $src, $mem\t# ptr" %}
 7537 
 7538   ins_encode(aarch64_enc_str(src, mem));
 7539 
 7540   ins_pipe(istore_reg_mem);
 7541 %}
 7542 
 7543 // Store Pointer
 7544 instruct storeimmP0(immP0 zero, memory8 mem)
 7545 %{
 7546   match(Set mem (StoreP mem zero));
 7547   predicate(!needs_releasing_store(n));
 7548 
 7549   ins_cost(INSN_COST);
 7550   format %{ "str zr, $mem\t# ptr" %}
 7551 
 7552   ins_encode(aarch64_enc_str0(mem));
 7553 
 7554   ins_pipe(istore_mem);
 7555 %}
 7556 
 7557 // Store Compressed Pointer
 7558 instruct storeN(iRegN src, memory4 mem)
 7559 %{
 7560   match(Set mem (StoreN mem src));
 7561   predicate(!needs_releasing_store(n));
 7562 
 7563   ins_cost(INSN_COST);
 7564   format %{ "strw  $src, $mem\t# compressed ptr" %}
 7565 
 7566   ins_encode(aarch64_enc_strw(src, mem));
 7567 
 7568   ins_pipe(istore_reg_mem);
 7569 %}
 7570 
 7571 instruct storeImmN0(immN0 zero, memory4 mem)
 7572 %{
 7573   match(Set mem (StoreN mem zero));
 7574   predicate(!needs_releasing_store(n));
 7575 
 7576   ins_cost(INSN_COST);
 7577   format %{ "strw  zr, $mem\t# compressed ptr" %}
 7578 
 7579   ins_encode(aarch64_enc_strw0(mem));
 7580 
 7581   ins_pipe(istore_mem);
 7582 %}
 7583 
 7584 // Store Float
 7585 instruct storeF(vRegF src, memory4 mem)
 7586 %{
 7587   match(Set mem (StoreF mem src));
 7588   predicate(!needs_releasing_store(n));
 7589 
 7590   ins_cost(INSN_COST);
 7591   format %{ "strs  $src, $mem\t# float" %}
 7592 
 7593   ins_encode( aarch64_enc_strs(src, mem) );
 7594 
 7595   ins_pipe(pipe_class_memory);
 7596 %}
 7597 
 7598 // TODO
 7599 // implement storeImmF0 and storeFImmPacked
 7600 
 7601 // Store Double
 7602 instruct storeD(vRegD src, memory8 mem)
 7603 %{
 7604   match(Set mem (StoreD mem src));
 7605   predicate(!needs_releasing_store(n));
 7606 
 7607   ins_cost(INSN_COST);
 7608   format %{ "strd  $src, $mem\t# double" %}
 7609 
 7610   ins_encode( aarch64_enc_strd(src, mem) );
 7611 
 7612   ins_pipe(pipe_class_memory);
 7613 %}
 7614 
 7615 // Store Compressed Klass Pointer
 7616 instruct storeNKlass(iRegN src, memory4 mem)
 7617 %{
 7618   predicate(!needs_releasing_store(n));
 7619   match(Set mem (StoreNKlass mem src));
 7620 
 7621   ins_cost(INSN_COST);
 7622   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
 7623 
 7624   ins_encode(aarch64_enc_strw(src, mem));
 7625 
 7626   ins_pipe(istore_reg_mem);
 7627 %}
 7628 
 7629 // TODO
 7630 // implement storeImmD0 and storeDImmPacked
 7631 
 7632 // prefetch instructions
 7633 // Must be safe to execute with invalid address (cannot fault).
 7634 
 7635 instruct prefetchalloc( memory8 mem ) %{
 7636   match(PrefetchAllocation mem);
 7637 
 7638   ins_cost(INSN_COST);
 7639   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
 7640 
 7641   ins_encode( aarch64_enc_prefetchw(mem) );
 7642 
 7643   ins_pipe(iload_prefetch);
 7644 %}
 7645 
 7646 //  ---------------- volatile loads and stores ----------------
 7647 
 7648 // Load Byte (8 bit signed)
 7649 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7650 %{
 7651   match(Set dst (LoadB mem));
 7652 
 7653   ins_cost(VOLATILE_REF_COST);
 7654   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7655 
 7656   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7657 
 7658   ins_pipe(pipe_serial);
 7659 %}
 7660 
 7661 // Load Byte (8 bit signed) into long
 7662 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7663 %{
 7664   match(Set dst (ConvI2L (LoadB mem)));
 7665 
 7666   ins_cost(VOLATILE_REF_COST);
 7667   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7668 
 7669   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7670 
 7671   ins_pipe(pipe_serial);
 7672 %}
 7673 
 7674 // Load Byte (8 bit unsigned)
 7675 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7676 %{
 7677   match(Set dst (LoadUB mem));
 7678 
 7679   ins_cost(VOLATILE_REF_COST);
 7680   format %{ "ldarb  $dst, $mem\t# byte" %}
 7681 
 7682   ins_encode(aarch64_enc_ldarb(dst, mem));
 7683 
 7684   ins_pipe(pipe_serial);
 7685 %}
 7686 
 7687 // Load Byte (8 bit unsigned) into long
 7688 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7689 %{
 7690   match(Set dst (ConvI2L (LoadUB mem)));
 7691 
 7692   ins_cost(VOLATILE_REF_COST);
 7693   format %{ "ldarb  $dst, $mem\t# byte" %}
 7694 
 7695   ins_encode(aarch64_enc_ldarb(dst, mem));
 7696 
 7697   ins_pipe(pipe_serial);
 7698 %}
 7699 
 7700 // Load Short (16 bit signed)
 7701 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7702 %{
 7703   match(Set dst (LoadS mem));
 7704 
 7705   ins_cost(VOLATILE_REF_COST);
 7706   format %{ "ldarshw  $dst, $mem\t# short" %}
 7707 
 7708   ins_encode(aarch64_enc_ldarshw(dst, mem));
 7709 
 7710   ins_pipe(pipe_serial);
 7711 %}
 7712 
 7713 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7714 %{
 7715   match(Set dst (LoadUS mem));
 7716 
 7717   ins_cost(VOLATILE_REF_COST);
 7718   format %{ "ldarhw  $dst, $mem\t# short" %}
 7719 
 7720   ins_encode(aarch64_enc_ldarhw(dst, mem));
 7721 
 7722   ins_pipe(pipe_serial);
 7723 %}
 7724 
 7725 // Load Short/Char (16 bit unsigned) into long
 7726 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7727 %{
 7728   match(Set dst (ConvI2L (LoadUS mem)));
 7729 
 7730   ins_cost(VOLATILE_REF_COST);
 7731   format %{ "ldarh  $dst, $mem\t# short" %}
 7732 
 7733   ins_encode(aarch64_enc_ldarh(dst, mem));
 7734 
 7735   ins_pipe(pipe_serial);
 7736 %}
 7737 
 7738 // Load Short/Char (16 bit signed) into long
 7739 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7740 %{
 7741   match(Set dst (ConvI2L (LoadS mem)));
 7742 
 7743   ins_cost(VOLATILE_REF_COST);
 7744   format %{ "ldarh  $dst, $mem\t# short" %}
 7745 
 7746   ins_encode(aarch64_enc_ldarsh(dst, mem));
 7747 
 7748   ins_pipe(pipe_serial);
 7749 %}
 7750 
 7751 // Load Integer (32 bit signed)
 7752 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7753 %{
 7754   match(Set dst (LoadI mem));
 7755 
 7756   ins_cost(VOLATILE_REF_COST);
 7757   format %{ "ldarw  $dst, $mem\t# int" %}
 7758 
 7759   ins_encode(aarch64_enc_ldarw(dst, mem));
 7760 
 7761   ins_pipe(pipe_serial);
 7762 %}
 7763 
 7764 // Load Integer (32 bit unsigned) into long
 7765 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
 7766 %{
 7767   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 7768 
 7769   ins_cost(VOLATILE_REF_COST);
 7770   format %{ "ldarw  $dst, $mem\t# int" %}
 7771 
 7772   ins_encode(aarch64_enc_ldarw(dst, mem));
 7773 
 7774   ins_pipe(pipe_serial);
 7775 %}
 7776 
 7777 // Load Long (64 bit signed)
 7778 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7779 %{
 7780   match(Set dst (LoadL mem));
 7781 
 7782   ins_cost(VOLATILE_REF_COST);
 7783   format %{ "ldar  $dst, $mem\t# int" %}
 7784 
 7785   ins_encode(aarch64_enc_ldar(dst, mem));
 7786 
 7787   ins_pipe(pipe_serial);
 7788 %}
 7789 
 7790 // Load Pointer
 7791 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
 7792 %{
 7793   match(Set dst (LoadP mem));
 7794   predicate(n->as_Load()->barrier_data() == 0);
 7795 
 7796   ins_cost(VOLATILE_REF_COST);
 7797   format %{ "ldar  $dst, $mem\t# ptr" %}
 7798 
 7799   ins_encode(aarch64_enc_ldar(dst, mem));
 7800 
 7801   ins_pipe(pipe_serial);
 7802 %}
 7803 
 7804 // Load Compressed Pointer
 7805 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
 7806 %{
 7807   match(Set dst (LoadN mem));
 7808 
 7809   ins_cost(VOLATILE_REF_COST);
 7810   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
 7811 
 7812   ins_encode(aarch64_enc_ldarw(dst, mem));
 7813 
 7814   ins_pipe(pipe_serial);
 7815 %}
 7816 
 7817 // Load Float
 7818 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
 7819 %{
 7820   match(Set dst (LoadF mem));
 7821 
 7822   ins_cost(VOLATILE_REF_COST);
 7823   format %{ "ldars  $dst, $mem\t# float" %}
 7824 
 7825   ins_encode( aarch64_enc_fldars(dst, mem) );
 7826 
 7827   ins_pipe(pipe_serial);
 7828 %}
 7829 
 7830 // Load Double
 7831 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
 7832 %{
 7833   match(Set dst (LoadD mem));
 7834 
 7835   ins_cost(VOLATILE_REF_COST);
 7836   format %{ "ldard  $dst, $mem\t# double" %}
 7837 
 7838   ins_encode( aarch64_enc_fldard(dst, mem) );
 7839 
 7840   ins_pipe(pipe_serial);
 7841 %}
 7842 
 7843 // Store Byte
 7844 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7845 %{
 7846   match(Set mem (StoreB mem src));
 7847 
 7848   ins_cost(VOLATILE_REF_COST);
 7849   format %{ "stlrb  $src, $mem\t# byte" %}
 7850 
 7851   ins_encode(aarch64_enc_stlrb(src, mem));
 7852 
 7853   ins_pipe(pipe_class_memory);
 7854 %}
 7855 
 7856 // Store Char/Short
 7857 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7858 %{
 7859   match(Set mem (StoreC mem src));
 7860 
 7861   ins_cost(VOLATILE_REF_COST);
 7862   format %{ "stlrh  $src, $mem\t# short" %}
 7863 
 7864   ins_encode(aarch64_enc_stlrh(src, mem));
 7865 
 7866   ins_pipe(pipe_class_memory);
 7867 %}
 7868 
 7869 // Store Integer
 7870 
 7871 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7872 %{
 7873   match(Set mem(StoreI mem src));
 7874 
 7875   ins_cost(VOLATILE_REF_COST);
 7876   format %{ "stlrw  $src, $mem\t# int" %}
 7877 
 7878   ins_encode(aarch64_enc_stlrw(src, mem));
 7879 
 7880   ins_pipe(pipe_class_memory);
 7881 %}
 7882 
 7883 // Store Long (64 bit signed)
 7884 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
 7885 %{
 7886   match(Set mem (StoreL mem src));
 7887 
 7888   ins_cost(VOLATILE_REF_COST);
 7889   format %{ "stlr  $src, $mem\t# int" %}
 7890 
 7891   ins_encode(aarch64_enc_stlr(src, mem));
 7892 
 7893   ins_pipe(pipe_class_memory);
 7894 %}
 7895 
 7896 // Store Pointer
 7897 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
 7898 %{
 7899   match(Set mem (StoreP mem src));
 7900 
 7901   ins_cost(VOLATILE_REF_COST);
 7902   format %{ "stlr  $src, $mem\t# ptr" %}
 7903 
 7904   ins_encode(aarch64_enc_stlr(src, mem));
 7905 
 7906   ins_pipe(pipe_class_memory);
 7907 %}
 7908 
 7909 // Store Compressed Pointer
 7910 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
 7911 %{
 7912   match(Set mem (StoreN mem src));
 7913 
 7914   ins_cost(VOLATILE_REF_COST);
 7915   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
 7916 
 7917   ins_encode(aarch64_enc_stlrw(src, mem));
 7918 
 7919   ins_pipe(pipe_class_memory);
 7920 %}
 7921 
 7922 // Store Float
 7923 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
 7924 %{
 7925   match(Set mem (StoreF mem src));
 7926 
 7927   ins_cost(VOLATILE_REF_COST);
 7928   format %{ "stlrs  $src, $mem\t# float" %}
 7929 
 7930   ins_encode( aarch64_enc_fstlrs(src, mem) );
 7931 
 7932   ins_pipe(pipe_class_memory);
 7933 %}
 7934 
 7935 // TODO
 7936 // implement storeImmF0 and storeFImmPacked
 7937 
 7938 // Store Double
 7939 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
 7940 %{
 7941   match(Set mem (StoreD mem src));
 7942 
 7943   ins_cost(VOLATILE_REF_COST);
 7944   format %{ "stlrd  $src, $mem\t# double" %}
 7945 
 7946   ins_encode( aarch64_enc_fstlrd(src, mem) );
 7947 
 7948   ins_pipe(pipe_class_memory);
 7949 %}
 7950 
 7951 //  ---------------- end of volatile loads and stores ----------------
 7952 
 7953 instruct cacheWB(indirect addr)
 7954 %{
 7955   predicate(VM_Version::supports_data_cache_line_flush());
 7956   match(CacheWB addr);
 7957 
 7958   ins_cost(100);
 7959   format %{"cache wb $addr" %}
 7960   ins_encode %{
 7961     assert($addr->index_position() < 0, "should be");
 7962     assert($addr$$disp == 0, "should be");
 7963     __ cache_wb(Address($addr$$base$$Register, 0));
 7964   %}
 7965   ins_pipe(pipe_slow); // XXX
 7966 %}
 7967 
 7968 instruct cacheWBPreSync()
 7969 %{
 7970   predicate(VM_Version::supports_data_cache_line_flush());
 7971   match(CacheWBPreSync);
 7972 
 7973   ins_cost(100);
 7974   format %{"cache wb presync" %}
 7975   ins_encode %{
 7976     __ cache_wbsync(true);
 7977   %}
 7978   ins_pipe(pipe_slow); // XXX
 7979 %}
 7980 
 7981 instruct cacheWBPostSync()
 7982 %{
 7983   predicate(VM_Version::supports_data_cache_line_flush());
 7984   match(CacheWBPostSync);
 7985 
 7986   ins_cost(100);
 7987   format %{"cache wb postsync" %}
 7988   ins_encode %{
 7989     __ cache_wbsync(false);
 7990   %}
 7991   ins_pipe(pipe_slow); // XXX
 7992 %}
 7993 
 7994 // ============================================================================
 7995 // BSWAP Instructions
 7996 
 7997 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
 7998   match(Set dst (ReverseBytesI src));
 7999 
 8000   ins_cost(INSN_COST);
 8001   format %{ "revw  $dst, $src" %}
 8002 
 8003   ins_encode %{
 8004     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
 8005   %}
 8006 
 8007   ins_pipe(ialu_reg);
 8008 %}
 8009 
 8010 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
 8011   match(Set dst (ReverseBytesL src));
 8012 
 8013   ins_cost(INSN_COST);
 8014   format %{ "rev  $dst, $src" %}
 8015 
 8016   ins_encode %{
 8017     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
 8018   %}
 8019 
 8020   ins_pipe(ialu_reg);
 8021 %}
 8022 
 8023 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
 8024   match(Set dst (ReverseBytesUS src));
 8025 
 8026   ins_cost(INSN_COST);
 8027   format %{ "rev16w  $dst, $src" %}
 8028 
 8029   ins_encode %{
 8030     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 8031   %}
 8032 
 8033   ins_pipe(ialu_reg);
 8034 %}
 8035 
 8036 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
 8037   match(Set dst (ReverseBytesS src));
 8038 
 8039   ins_cost(INSN_COST);
 8040   format %{ "rev16w  $dst, $src\n\t"
 8041             "sbfmw $dst, $dst, #0, #15" %}
 8042 
 8043   ins_encode %{
 8044     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 8045     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
 8046   %}
 8047 
 8048   ins_pipe(ialu_reg);
 8049 %}
 8050 
 8051 // ============================================================================
 8052 // Zero Count Instructions
 8053 
 8054 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 8055   match(Set dst (CountLeadingZerosI src));
 8056 
 8057   ins_cost(INSN_COST);
 8058   format %{ "clzw  $dst, $src" %}
 8059   ins_encode %{
 8060     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
 8061   %}
 8062 
 8063   ins_pipe(ialu_reg);
 8064 %}
 8065 
 8066 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
 8067   match(Set dst (CountLeadingZerosL src));
 8068 
 8069   ins_cost(INSN_COST);
 8070   format %{ "clz   $dst, $src" %}
 8071   ins_encode %{
 8072     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
 8073   %}
 8074 
 8075   ins_pipe(ialu_reg);
 8076 %}
 8077 
 8078 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 8079   match(Set dst (CountTrailingZerosI src));
 8080 
 8081   ins_cost(INSN_COST * 2);
 8082   format %{ "rbitw  $dst, $src\n\t"
 8083             "clzw   $dst, $dst" %}
 8084   ins_encode %{
 8085     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
 8086     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
 8087   %}
 8088 
 8089   ins_pipe(ialu_reg);
 8090 %}
 8091 
 8092 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
 8093   match(Set dst (CountTrailingZerosL src));
 8094 
 8095   ins_cost(INSN_COST * 2);
 8096   format %{ "rbit   $dst, $src\n\t"
 8097             "clz    $dst, $dst" %}
 8098   ins_encode %{
 8099     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
 8100     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
 8101   %}
 8102 
 8103   ins_pipe(ialu_reg);
 8104 %}
 8105 
 8106 //---------- Population Count Instructions -------------------------------------
 8107 //
 8108 
 8109 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
 8110   predicate(UsePopCountInstruction);
 8111   match(Set dst (PopCountI src));
 8112   effect(TEMP tmp);
 8113   ins_cost(INSN_COST * 13);
 8114 
 8115   format %{ "movw   $src, $src\n\t"
 8116             "mov    $tmp, $src\t# vector (1D)\n\t"
 8117             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8118             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8119             "mov    $dst, $tmp\t# vector (1D)" %}
 8120   ins_encode %{
 8121     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
 8122     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
 8123     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8124     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8125     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
 8126   %}
 8127 
 8128   ins_pipe(pipe_class_default);
 8129 %}
 8130 
 8131 instruct popCountI_mem(iRegINoSp dst, memory4 mem, vRegF tmp) %{
 8132   predicate(UsePopCountInstruction);
 8133   match(Set dst (PopCountI (LoadI mem)));
 8134   effect(TEMP tmp);
 8135   ins_cost(INSN_COST * 13);
 8136 
 8137   format %{ "ldrs   $tmp, $mem\n\t"
 8138             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8139             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8140             "mov    $dst, $tmp\t# vector (1D)" %}
 8141   ins_encode %{
 8142     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 8143     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
 8144               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 8145     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8146     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8147     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
 8148   %}
 8149 
 8150   ins_pipe(pipe_class_default);
 8151 %}
 8152 
 8153 // Note: Long.bitCount(long) returns an int.
 8154 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
 8155   predicate(UsePopCountInstruction);
 8156   match(Set dst (PopCountL src));
 8157   effect(TEMP tmp);
 8158   ins_cost(INSN_COST * 13);
 8159 
 8160   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
 8161             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8162             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8163             "mov    $dst, $tmp\t# vector (1D)" %}
 8164   ins_encode %{
 8165     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
 8166     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8167     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8168     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
 8169   %}
 8170 
 8171   ins_pipe(pipe_class_default);
 8172 %}
 8173 
 8174 instruct popCountL_mem(iRegINoSp dst, memory8 mem, vRegD tmp) %{
 8175   predicate(UsePopCountInstruction);
 8176   match(Set dst (PopCountL (LoadL mem)));
 8177   effect(TEMP tmp);
 8178   ins_cost(INSN_COST * 13);
 8179 
 8180   format %{ "ldrd   $tmp, $mem\n\t"
 8181             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8182             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8183             "mov    $dst, $tmp\t# vector (1D)" %}
 8184   ins_encode %{
 8185     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 8186     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
 8187               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 8188     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8189     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8190     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
 8191   %}
 8192 
 8193   ins_pipe(pipe_class_default);
 8194 %}
 8195 
 8196 // ============================================================================
 8197 // MemBar Instruction
 8198 
 8199 instruct load_fence() %{
 8200   match(LoadFence);
 8201   ins_cost(VOLATILE_REF_COST);
 8202 
 8203   format %{ "load_fence" %}
 8204 
 8205   ins_encode %{
 8206     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 8207   %}
 8208   ins_pipe(pipe_serial);
 8209 %}
 8210 
 8211 instruct unnecessary_membar_acquire() %{
 8212   predicate(unnecessary_acquire(n));
 8213   match(MemBarAcquire);
 8214   ins_cost(0);
 8215 
 8216   format %{ "membar_acquire (elided)" %}
 8217 
 8218   ins_encode %{
 8219     __ block_comment("membar_acquire (elided)");
 8220   %}
 8221 
 8222   ins_pipe(pipe_class_empty);
 8223 %}
 8224 
 8225 instruct membar_acquire() %{
 8226   match(MemBarAcquire);
 8227   ins_cost(VOLATILE_REF_COST);
 8228 
 8229   format %{ "membar_acquire\n\t"
 8230             "dmb ish" %}
 8231 
 8232   ins_encode %{
 8233     __ block_comment("membar_acquire");
 8234     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 8235   %}
 8236 
 8237   ins_pipe(pipe_serial);
 8238 %}
 8239 
 8240 
 8241 instruct membar_acquire_lock() %{
 8242   match(MemBarAcquireLock);
 8243   ins_cost(VOLATILE_REF_COST);
 8244 
 8245   format %{ "membar_acquire_lock (elided)" %}
 8246 
 8247   ins_encode %{
 8248     __ block_comment("membar_acquire_lock (elided)");
 8249   %}
 8250 
 8251   ins_pipe(pipe_serial);
 8252 %}
 8253 
 8254 instruct store_fence() %{
 8255   match(StoreFence);
 8256   ins_cost(VOLATILE_REF_COST);
 8257 
 8258   format %{ "store_fence" %}
 8259 
 8260   ins_encode %{
 8261     __ membar(Assembler::LoadStore|Assembler::StoreStore);
 8262   %}
 8263   ins_pipe(pipe_serial);
 8264 %}
 8265 
 8266 instruct unnecessary_membar_release() %{
 8267   predicate(unnecessary_release(n));
 8268   match(MemBarRelease);
 8269   ins_cost(0);
 8270 
 8271   format %{ "membar_release (elided)" %}
 8272 
 8273   ins_encode %{
 8274     __ block_comment("membar_release (elided)");
 8275   %}
 8276   ins_pipe(pipe_serial);
 8277 %}
 8278 
 8279 instruct membar_release() %{
 8280   match(MemBarRelease);
 8281   ins_cost(VOLATILE_REF_COST);
 8282 
 8283   format %{ "membar_release\n\t"
 8284             "dmb ish" %}
 8285 
 8286   ins_encode %{
 8287     __ block_comment("membar_release");
 8288     __ membar(Assembler::LoadStore|Assembler::StoreStore);
 8289   %}
 8290   ins_pipe(pipe_serial);
 8291 %}
 8292 
 8293 instruct membar_storestore() %{
 8294   match(MemBarStoreStore);
 8295   ins_cost(VOLATILE_REF_COST);
 8296 
 8297   format %{ "MEMBAR-store-store" %}
 8298 
 8299   ins_encode %{
 8300     __ membar(Assembler::StoreStore);
 8301   %}
 8302   ins_pipe(pipe_serial);
 8303 %}
 8304 
 8305 instruct membar_release_lock() %{
 8306   match(MemBarReleaseLock);
 8307   ins_cost(VOLATILE_REF_COST);
 8308 
 8309   format %{ "membar_release_lock (elided)" %}
 8310 
 8311   ins_encode %{
 8312     __ block_comment("membar_release_lock (elided)");
 8313   %}
 8314 
 8315   ins_pipe(pipe_serial);
 8316 %}
 8317 
 8318 instruct unnecessary_membar_volatile() %{
 8319   predicate(unnecessary_volatile(n));
 8320   match(MemBarVolatile);
 8321   ins_cost(0);
 8322 
 8323   format %{ "membar_volatile (elided)" %}
 8324 
 8325   ins_encode %{
 8326     __ block_comment("membar_volatile (elided)");
 8327   %}
 8328 
 8329   ins_pipe(pipe_serial);
 8330 %}
 8331 
 8332 instruct membar_volatile() %{
 8333   match(MemBarVolatile);
 8334   ins_cost(VOLATILE_REF_COST*100);
 8335 
 8336   format %{ "membar_volatile\n\t"
 8337              "dmb ish"%}
 8338 
 8339   ins_encode %{
 8340     __ block_comment("membar_volatile");
 8341     __ membar(Assembler::StoreLoad);
 8342   %}
 8343 
 8344   ins_pipe(pipe_serial);
 8345 %}
 8346 
 8347 // ============================================================================
 8348 // Cast/Convert Instructions
 8349 
 8350 instruct castX2P(iRegPNoSp dst, iRegL src) %{
 8351   match(Set dst (CastX2P src));
 8352 
 8353   ins_cost(INSN_COST);
 8354   format %{ "mov $dst, $src\t# long -> ptr" %}
 8355 
 8356   ins_encode %{
 8357     if ($dst$$reg != $src$$reg) {
 8358       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8359     }
 8360   %}
 8361 
 8362   ins_pipe(ialu_reg);
 8363 %}
 8364 
 8365 instruct castP2X(iRegLNoSp dst, iRegP src) %{
 8366   match(Set dst (CastP2X src));
 8367 
 8368   ins_cost(INSN_COST);
 8369   format %{ "mov $dst, $src\t# ptr -> long" %}
 8370 
 8371   ins_encode %{
 8372     if ($dst$$reg != $src$$reg) {
 8373       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8374     }
 8375   %}
 8376 
 8377   ins_pipe(ialu_reg);
 8378 %}
 8379 
 8380 // Convert oop into int for vectors alignment masking
 8381 instruct convP2I(iRegINoSp dst, iRegP src) %{
 8382   match(Set dst (ConvL2I (CastP2X src)));
 8383 
 8384   ins_cost(INSN_COST);
 8385   format %{ "movw $dst, $src\t# ptr -> int" %}
 8386   ins_encode %{
 8387     __ movw($dst$$Register, $src$$Register);
 8388   %}
 8389 
 8390   ins_pipe(ialu_reg);
 8391 %}
 8392 
 8393 // Convert compressed oop into int for vectors alignment masking
 8394 // in case of 32bit oops (heap < 4Gb).
 8395 instruct convN2I(iRegINoSp dst, iRegN src)
 8396 %{
 8397   predicate(CompressedOops::shift() == 0);
 8398   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
 8399 
 8400   ins_cost(INSN_COST);
 8401   format %{ "mov dst, $src\t# compressed ptr -> int" %}
 8402   ins_encode %{
 8403     __ movw($dst$$Register, $src$$Register);
 8404   %}
 8405 
 8406   ins_pipe(ialu_reg);
 8407 %}
 8408 
 8409 
 8410 // Convert oop pointer into compressed form
 8411 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8412   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
 8413   match(Set dst (EncodeP src));
 8414   effect(KILL cr);
 8415   ins_cost(INSN_COST * 3);
 8416   format %{ "encode_heap_oop $dst, $src" %}
 8417   ins_encode %{
 8418     Register s = $src$$Register;
 8419     Register d = $dst$$Register;
 8420     __ encode_heap_oop(d, s);
 8421   %}
 8422   ins_pipe(ialu_reg);
 8423 %}
 8424 
 8425 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8426   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
 8427   match(Set dst (EncodeP src));
 8428   ins_cost(INSN_COST * 3);
 8429   format %{ "encode_heap_oop_not_null $dst, $src" %}
 8430   ins_encode %{
 8431     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
 8432   %}
 8433   ins_pipe(ialu_reg);
 8434 %}
 8435 
 8436 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8437   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
 8438             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
 8439   match(Set dst (DecodeN src));
 8440   ins_cost(INSN_COST * 3);
 8441   format %{ "decode_heap_oop $dst, $src" %}
 8442   ins_encode %{
 8443     Register s = $src$$Register;
 8444     Register d = $dst$$Register;
 8445     __ decode_heap_oop(d, s);
 8446   %}
 8447   ins_pipe(ialu_reg);
 8448 %}
 8449 
 8450 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8451   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
 8452             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
 8453   match(Set dst (DecodeN src));
 8454   ins_cost(INSN_COST * 3);
 8455   format %{ "decode_heap_oop_not_null $dst, $src" %}
 8456   ins_encode %{
 8457     Register s = $src$$Register;
 8458     Register d = $dst$$Register;
 8459     __ decode_heap_oop_not_null(d, s);
 8460   %}
 8461   ins_pipe(ialu_reg);
 8462 %}
 8463 
 8464 // n.b. AArch64 implementations of encode_klass_not_null and
 8465 // decode_klass_not_null do not modify the flags register so, unlike
 8466 // Intel, we don't kill CR as a side effect here
 8467 
 8468 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
 8469   match(Set dst (EncodePKlass src));
 8470 
 8471   ins_cost(INSN_COST * 3);
 8472   format %{ "encode_klass_not_null $dst,$src" %}
 8473 
 8474   ins_encode %{
 8475     Register src_reg = as_Register($src$$reg);
 8476     Register dst_reg = as_Register($dst$$reg);
 8477     __ encode_klass_not_null(dst_reg, src_reg);
 8478   %}
 8479 
 8480    ins_pipe(ialu_reg);
 8481 %}
 8482 
 8483 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
 8484   match(Set dst (DecodeNKlass src));
 8485 
 8486   ins_cost(INSN_COST * 3);
 8487   format %{ "decode_klass_not_null $dst,$src" %}
 8488 
 8489   ins_encode %{
 8490     Register src_reg = as_Register($src$$reg);
 8491     Register dst_reg = as_Register($dst$$reg);
 8492     if (dst_reg != src_reg) {
 8493       __ decode_klass_not_null(dst_reg, src_reg);
 8494     } else {
 8495       __ decode_klass_not_null(dst_reg);
 8496     }
 8497   %}
 8498 
 8499    ins_pipe(ialu_reg);
 8500 %}
 8501 
 8502 instruct checkCastPP(iRegPNoSp dst)
 8503 %{
 8504   match(Set dst (CheckCastPP dst));
 8505 
 8506   size(0);
 8507   format %{ "# checkcastPP of $dst" %}
 8508   ins_encode(/* empty encoding */);
 8509   ins_pipe(pipe_class_empty);
 8510 %}
 8511 
 8512 instruct castPP(iRegPNoSp dst)
 8513 %{
 8514   match(Set dst (CastPP dst));
 8515 
 8516   size(0);
 8517   format %{ "# castPP of $dst" %}
 8518   ins_encode(/* empty encoding */);
 8519   ins_pipe(pipe_class_empty);
 8520 %}
 8521 
 8522 instruct castII(iRegI dst)
 8523 %{
 8524   match(Set dst (CastII dst));
 8525 
 8526   size(0);
 8527   format %{ "# castII of $dst" %}
 8528   ins_encode(/* empty encoding */);
 8529   ins_cost(0);
 8530   ins_pipe(pipe_class_empty);
 8531 %}
 8532 
 8533 instruct vcvt4Bto4S(vecD dst, vecD src) %{
 8534   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
 8535   match(Set dst (VectorCastB2X src));
 8536   format %{ "sxtl  $dst, T8H, $src, T8B\t# convert 4B to 4S vector" %}
 8537   ins_encode %{
 8538     __ sxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
 8539   %}
 8540   ins_pipe(vdop64);
 8541 %}
 8542 
 8543 instruct vcvt4Bto4I(vecX dst, vecD src) %{
 8544   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_INT);
 8545   match(Set dst (VectorCastB2X src));
 8546   format %{  "sxtl  $dst, T8H, $src, T8B\n\t"
 8547              "sxtl  $dst, T4S, $dst, T4H\t# convert 4B to 4I vector"
 8548   %}
 8549   ins_encode %{
 8550     __ sxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
 8551     __ sxtl(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($dst$$reg), __ T4H);
 8552   %}
 8553   ins_pipe(vdop128);
 8554 %}
 8555 
 8556 instruct vcvt4Bto4F(vecX dst, vecD src) %{
 8557   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
 8558   match(Set dst (VectorCastB2X src));
 8559   format %{  "sxtl  $dst, T8H, $src, T8B\n\t"
 8560              "sxtl  $dst, T4S, $dst, T4H\n\t"
 8561              "scvtfv  T4S, $dst, $dst\t# convert 4B to 4F vector"
 8562   %}
 8563   ins_encode %{
 8564     __ sxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
 8565     __ sxtl(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($dst$$reg), __ T4H);
 8566     __ scvtfv(__ T4S, as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg));
 8567   %}
 8568   ins_pipe(vdop128);
 8569 %}
 8570 
 8571 instruct vcvt8Bto8S(vecX dst, vecD src) %{
 8572   predicate(n->as_Vector()->length() == 8 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
 8573   match(Set dst (VectorCastB2X src));
 8574   format %{ "sxtl  $dst, T8H, $src, T8B\t# convert 8B to 8S vector" %}
 8575   ins_encode %{
 8576     __ sxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
 8577   %}
 8578   ins_pipe(vdop128);
 8579 %}
 8580 
 8581 instruct vcvt4Sto4B(vecD dst, vecD src) %{
 8582   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
 8583   match(Set dst (VectorCastS2X src));
 8584   format %{ "xtn  $dst, T8S, $src, T8H\t# convert 4S to 4B vector" %}
 8585   ins_encode %{
 8586     __ xtn(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($src$$reg), __ T8H);
 8587   %}
 8588   ins_pipe(vdop64);
 8589 %}
 8590 
 8591 instruct vcvt4Sto4I(vecX dst, vecD src) %{
 8592   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_INT);
 8593   match(Set dst (VectorCastS2X src));
 8594   format %{ "sxtl  $dst, T4S, $src, T4H\t# convert 4S to 4I vector" %}
 8595   ins_encode %{
 8596     __ sxtl(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src$$reg), __ T4H);
 8597   %}
 8598   ins_pipe(vdop128);
 8599 %}
 8600 
 8601 instruct vcvt4Sto4F(vecX dst, vecD src) %{
 8602   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
 8603   match(Set dst (VectorCastS2X src));
 8604   format %{ "sxtl    $dst, T4S, $src, T4H\n\t"
 8605             "scvtfv  T4S, $dst, $dst\t# convert 4S to 4F vector"
 8606   %}
 8607   ins_encode %{
 8608     __ sxtl(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src$$reg), __ T4H);
 8609     __ scvtfv(__ T4S, as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg));
 8610   %}
 8611   ins_pipe(vdop128);
 8612 %}
 8613 
 8614 instruct vcvt8Sto8B(vecD dst, vecX src) %{
 8615   predicate(n->as_Vector()->length() == 8 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
 8616   match(Set dst (VectorCastS2X src));
 8617   format %{ "xtn  $dst, T8B, $src, T8H\t# convert 8S to 8B vector" %}
 8618   ins_encode %{
 8619     __ xtn(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($src$$reg), __ T8H);
 8620   %}
 8621   ins_pipe(vdop128);
 8622 %}
 8623 
 8624 instruct vcvt2Ito2L(vecX dst, vecD src) %{
 8625   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_LONG);
 8626   match(Set dst (VectorCastI2X src));
 8627   format %{ "sxtl  $dst, T2D, $src, T2S\t# convert 2I to 2L vector" %}
 8628   ins_encode %{
 8629     __ sxtl(as_FloatRegister($dst$$reg), __ T2D, as_FloatRegister($src$$reg), __ T2S);
 8630   %}
 8631   ins_pipe(vdop128);
 8632 %}
 8633 
 8634 instruct vcvt2Ito2F(vecD dst, vecD src) %{
 8635   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
 8636   match(Set dst (VectorCastI2X src));
 8637   format %{ "scvtfv  T2S, $dst, $src\t# convert 2I to 2F vector" %}
 8638   ins_encode %{
 8639     __ scvtfv(__ T2S, as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
 8640   %}
 8641   ins_pipe(vdop64);
 8642 %}
 8643 
 8644 instruct vcvt2Ito2D(vecX dst, vecD src) %{
 8645   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
 8646   match(Set dst (VectorCastI2X src));
 8647   format %{ "sxtl    $dst, T2D, $src, T2S\n\t"
 8648             "scvtfv  T2D, $dst, $dst\t# convert 2I to 2D vector"
 8649   %}
 8650   ins_encode %{
 8651     __ sxtl(as_FloatRegister($dst$$reg), __ T2D, as_FloatRegister($src$$reg), __ T2S);
 8652     __ scvtfv(__ T2D, as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg));
 8653   %}
 8654   ins_pipe(vdop128);
 8655 %}
 8656 
 8657 instruct vcvt4Ito4B(vecD dst, vecX src) %{
 8658   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
 8659   match(Set dst (VectorCastI2X src));
 8660   format %{ "xtn  $dst, T4H, $src, T4S\n\t"
 8661             "xtn  $dst, T8B, $dst, T8H\t# convert 4I to 4B vector"
 8662   %}
 8663   ins_encode %{
 8664     __ xtn(as_FloatRegister($dst$$reg), __ T4H, as_FloatRegister($src$$reg), __ T4S);
 8665     __ xtn(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg), __ T8H);
 8666   %}
 8667   ins_pipe(vdop128);
 8668 %}
 8669 
 8670 instruct vcvt4Ito4S(vecD dst, vecX src) %{
 8671   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
 8672   match(Set dst (VectorCastI2X src));
 8673   format %{ "xtn  $dst, T4H, $src, T4S\t# convert 4I to 4S vector" %}
 8674   ins_encode %{
 8675     __ xtn(as_FloatRegister($dst$$reg), __ T4H, as_FloatRegister($src$$reg), __ T4S);
 8676   %}
 8677   ins_pipe(vdop128);
 8678 %}
 8679 
 8680 instruct vcvt4Ito4F(vecX dst, vecX src) %{
 8681   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
 8682   match(Set dst (VectorCastI2X src));
 8683   format %{ "scvtfv  T4S, $dst, $src\t# convert 4I to 4F vector" %}
 8684   ins_encode %{
 8685     __ scvtfv(__ T4S, as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
 8686   %}
 8687   ins_pipe(vdop64);
 8688 %}
 8689 
 8690 instruct vcvt2Lto2I(vecD dst, vecX src) %{
 8691   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_INT);
 8692   match(Set dst (VectorCastL2X src));
 8693   format %{ "xtn  $dst, T2S, $src, T2D\t# convert 2L to 2I vector" %}
 8694   ins_encode %{
 8695     __ xtn(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($src$$reg), __ T2D);
 8696   %}
 8697   ins_pipe(vdop128);
 8698 %}
 8699 
 8700 instruct vcvt2Lto2F(vecD dst, vecX src) %{
 8701   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
 8702   match(Set dst (VectorCastL2X src));
 8703   format %{ "scvtfv  T2D, $dst, $src\n\t"
 8704             "fcvtn   $dst, T2S, $dst, T2D\t# convert 2L to 2F vector"
 8705   %}
 8706   ins_encode %{
 8707     __ scvtfv(__ T2D, as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
 8708     __ fcvtn(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($dst$$reg), __ T2D);
 8709   %}
 8710   ins_pipe(vdop128);
 8711 %}
 8712 
 8713 instruct vcvt2Lto2D(vecX dst, vecX src) %{
 8714   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
 8715   match(Set dst (VectorCastL2X src));
 8716   format %{ "scvtfv  T2D, $dst, $src\t# convert 2L to 2D vector" %}
 8717   ins_encode %{
 8718     __ scvtfv(__ T2D, as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
 8719   %}
 8720   ins_pipe(vdop128);
 8721 %}
 8722 
 8723 instruct vcvt2Fto2D(vecX dst, vecD src) %{
 8724   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
 8725   match(Set dst (VectorCastF2X src));
 8726   format %{ "fcvtl  $dst, T2D, $src, T2S\t# convert 2F to 2D vector" %}
 8727   ins_encode %{
 8728     __ fcvtl(as_FloatRegister($dst$$reg), __ T2D, as_FloatRegister($src$$reg), __ T2S);
 8729   %}
 8730   ins_pipe(vdop128);
 8731 %}
 8732 
 8733 instruct vcvt2Dto2F(vecD dst, vecX src) %{
 8734   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
 8735   match(Set dst (VectorCastD2X src));
 8736   format %{ "fcvtn  $dst, T2S, $src, T2D\t# convert 2D to 2F vector" %}
 8737   ins_encode %{
 8738     __ fcvtn(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($src$$reg), __ T2D);
 8739   %}
 8740   ins_pipe(vdop128);
 8741 %}
 8742 
 8743 // ============================================================================
 8744 // Atomic operation instructions
 8745 //
 8746 // Intel and SPARC both implement Ideal Node LoadPLocked and
 8747 // Store{PIL}Conditional instructions using a normal load for the
 8748 // LoadPLocked and a CAS for the Store{PIL}Conditional.
 8749 //
 8750 // The ideal code appears only to use LoadPLocked/StorePLocked as a
 8751 // pair to lock object allocations from Eden space when not using
 8752 // TLABs.
 8753 //
 8754 // There does not appear to be a Load{IL}Locked Ideal Node and the
 8755 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
 8756 // and to use StoreIConditional only for 32-bit and StoreLConditional
 8757 // only for 64-bit.
 8758 //
 8759 // We implement LoadPLocked and StorePLocked instructions using,
 8760 // respectively the AArch64 hw load-exclusive and store-conditional
 8761 // instructions. Whereas we must implement each of
 8762 // Store{IL}Conditional using a CAS which employs a pair of
 8763 // instructions comprising a load-exclusive followed by a
 8764 // store-conditional.
 8765 
 8766 
 8767 // Locked-load (linked load) of the current heap-top
 8768 // used when updating the eden heap top
 8769 // implemented using ldaxr on AArch64
 8770 
 8771 instruct loadPLocked(iRegPNoSp dst, indirect mem)
 8772 %{
 8773   match(Set dst (LoadPLocked mem));
 8774 
 8775   ins_cost(VOLATILE_REF_COST);
 8776 
 8777   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
 8778 
 8779   ins_encode(aarch64_enc_ldaxr(dst, mem));
 8780 
 8781   ins_pipe(pipe_serial);
 8782 %}
 8783 
 8784 // Conditional-store of the updated heap-top.
 8785 // Used during allocation of the shared heap.
 8786 // Sets flag (EQ) on success.
 8787 // implemented using stlxr on AArch64.
 8788 
 8789 instruct storePConditional(memory8 heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
 8790 %{
 8791   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
 8792 
 8793   ins_cost(VOLATILE_REF_COST);
 8794 
 8795  // TODO
 8796  // do we need to do a store-conditional release or can we just use a
 8797  // plain store-conditional?
 8798 
 8799   format %{
 8800     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
 8801     "cmpw rscratch1, zr\t# EQ on successful write"
 8802   %}
 8803 
 8804   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
 8805 
 8806   ins_pipe(pipe_serial);
 8807 %}
 8808 
 8809 
 8810 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
 8811 // when attempting to rebias a lock towards the current thread.  We
 8812 // must use the acquire form of cmpxchg in order to guarantee acquire
 8813 // semantics in this case.
 8814 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
 8815 %{
 8816   match(Set cr (StoreLConditional mem (Binary oldval newval)));
 8817 
 8818   ins_cost(VOLATILE_REF_COST);
 8819 
 8820   format %{
 8821     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
 8822     "cmpw rscratch1, zr\t# EQ on successful write"
 8823   %}
 8824 
 8825   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
 8826 
 8827   ins_pipe(pipe_slow);
 8828 %}
 8829 
 8830 // storeIConditional also has acquire semantics, for no better reason
 8831 // than matching storeLConditional.  At the time of writing this
 8832 // comment storeIConditional was not used anywhere by AArch64.
 8833 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
 8834 %{
 8835   match(Set cr (StoreIConditional mem (Binary oldval newval)));
 8836 
 8837   ins_cost(VOLATILE_REF_COST);
 8838 
 8839   format %{
 8840     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
 8841     "cmpw rscratch1, zr\t# EQ on successful write"
 8842   %}
 8843 
 8844   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
 8845 
 8846   ins_pipe(pipe_slow);
 8847 %}
 8848 
 8849 // standard CompareAndSwapX when we are using barriers
 8850 // these have higher priority than the rules selected by a predicate
 8851 
 8852 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
 8853 // can't match them
 8854 
 8855 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8856 
 8857   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 8858   ins_cost(2 * VOLATILE_REF_COST);
 8859 
 8860   effect(KILL cr);
 8861 
 8862   format %{
 8863     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8864     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8865   %}
 8866 
 8867   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
 8868             aarch64_enc_cset_eq(res));
 8869 
 8870   ins_pipe(pipe_slow);
 8871 %}
 8872 
 8873 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8874 
 8875   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 8876   ins_cost(2 * VOLATILE_REF_COST);
 8877 
 8878   effect(KILL cr);
 8879 
 8880   format %{
 8881     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8882     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8883   %}
 8884 
 8885   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
 8886             aarch64_enc_cset_eq(res));
 8887 
 8888   ins_pipe(pipe_slow);
 8889 %}
 8890 
 8891 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8892 
 8893   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 8894   ins_cost(2 * VOLATILE_REF_COST);
 8895 
 8896   effect(KILL cr);
 8897 
 8898  format %{
 8899     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8900     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8901  %}
 8902 
 8903  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 8904             aarch64_enc_cset_eq(res));
 8905 
 8906   ins_pipe(pipe_slow);
 8907 %}
 8908 
 8909 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 8910 
 8911   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 8912   ins_cost(2 * VOLATILE_REF_COST);
 8913 
 8914   effect(KILL cr);
 8915 
 8916  format %{
 8917     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 8918     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8919  %}
 8920 
 8921  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 8922             aarch64_enc_cset_eq(res));
 8923 
 8924   ins_pipe(pipe_slow);
 8925 %}
 8926 
 8927 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8928 
 8929   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 8930   predicate(n->as_LoadStore()->barrier_data() == 0);
 8931   ins_cost(2 * VOLATILE_REF_COST);
 8932 
 8933   effect(KILL cr);
 8934 
 8935  format %{
 8936     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 8937     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8938  %}
 8939 
 8940  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 8941             aarch64_enc_cset_eq(res));
 8942 
 8943   ins_pipe(pipe_slow);
 8944 %}
 8945 
 8946 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 8947 
 8948   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 8949   ins_cost(2 * VOLATILE_REF_COST);
 8950 
 8951   effect(KILL cr);
 8952 
 8953  format %{
 8954     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 8955     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8956  %}
 8957 
 8958  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 8959             aarch64_enc_cset_eq(res));
 8960 
 8961   ins_pipe(pipe_slow);
 8962 %}
 8963 
 8964 // alternative CompareAndSwapX when we are eliding barriers
 8965 
 8966 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8967 
 8968   predicate(needs_acquiring_load_exclusive(n));
 8969   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 8970   ins_cost(VOLATILE_REF_COST);
 8971 
 8972   effect(KILL cr);
 8973 
 8974   format %{
 8975     "cmpxchgb_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8976     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8977   %}
 8978 
 8979   ins_encode(aarch64_enc_cmpxchgb_acq(mem, oldval, newval),
 8980             aarch64_enc_cset_eq(res));
 8981 
 8982   ins_pipe(pipe_slow);
 8983 %}
 8984 
 8985 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8986 
 8987   predicate(needs_acquiring_load_exclusive(n));
 8988   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 8989   ins_cost(VOLATILE_REF_COST);
 8990 
 8991   effect(KILL cr);
 8992 
 8993   format %{
 8994     "cmpxchgs_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8995     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8996   %}
 8997 
 8998   ins_encode(aarch64_enc_cmpxchgs_acq(mem, oldval, newval),
 8999             aarch64_enc_cset_eq(res));
 9000 
 9001   ins_pipe(pipe_slow);
 9002 %}
 9003 
 9004 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 9005 
 9006   predicate(needs_acquiring_load_exclusive(n));
 9007   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 9008   ins_cost(VOLATILE_REF_COST);
 9009 
 9010   effect(KILL cr);
 9011 
 9012  format %{
 9013     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 9014     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9015  %}
 9016 
 9017  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 9018             aarch64_enc_cset_eq(res));
 9019 
 9020   ins_pipe(pipe_slow);
 9021 %}
 9022 
 9023 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 9024 
 9025   predicate(needs_acquiring_load_exclusive(n));
 9026   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 9027   ins_cost(VOLATILE_REF_COST);
 9028 
 9029   effect(KILL cr);
 9030 
 9031  format %{
 9032     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 9033     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9034  %}
 9035 
 9036  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 9037             aarch64_enc_cset_eq(res));
 9038 
 9039   ins_pipe(pipe_slow);
 9040 %}
 9041 
 9042 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9043 
 9044   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9045   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 9046   ins_cost(VOLATILE_REF_COST);
 9047 
 9048   effect(KILL cr);
 9049 
 9050  format %{
 9051     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 9052     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9053  %}
 9054 
 9055  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 9056             aarch64_enc_cset_eq(res));
 9057 
 9058   ins_pipe(pipe_slow);
 9059 %}
 9060 
 9061 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 9062 
 9063   predicate(needs_acquiring_load_exclusive(n));
 9064   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 9065   ins_cost(VOLATILE_REF_COST);
 9066 
 9067   effect(KILL cr);
 9068 
 9069  format %{
 9070     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 9071     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9072  %}
 9073 
 9074  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 9075             aarch64_enc_cset_eq(res));
 9076 
 9077   ins_pipe(pipe_slow);
 9078 %}
 9079 
 9080 
 9081 // ---------------------------------------------------------------------
 9082 
 9083 
 9084 // BEGIN This section of the file is automatically generated. Do not edit --------------
 9085 
 9086 // Sundry CAS operations.  Note that release is always true,
 9087 // regardless of the memory ordering of the CAS.  This is because we
 9088 // need the volatile case to be sequentially consistent but there is
 9089 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
 9090 // can't check the type of memory ordering here, so we always emit a
 9091 // STLXR.
 9092 
 9093 // This section is generated from aarch64_ad_cas.m4
 9094 
 9095 
 9096 
 9097 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9098   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 9099   ins_cost(2 * VOLATILE_REF_COST);
 9100   effect(TEMP_DEF res, KILL cr);
 9101   format %{
 9102     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9103   %}
 9104   ins_encode %{
 9105     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9106                Assembler::byte, /*acquire*/ false, /*release*/ true,
 9107                /*weak*/ false, $res$$Register);
 9108     __ sxtbw($res$$Register, $res$$Register);
 9109   %}
 9110   ins_pipe(pipe_slow);
 9111 %}
 9112 
 9113 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9114   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 9115   ins_cost(2 * VOLATILE_REF_COST);
 9116   effect(TEMP_DEF res, KILL cr);
 9117   format %{
 9118     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9119   %}
 9120   ins_encode %{
 9121     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9122                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 9123                /*weak*/ false, $res$$Register);
 9124     __ sxthw($res$$Register, $res$$Register);
 9125   %}
 9126   ins_pipe(pipe_slow);
 9127 %}
 9128 
 9129 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9130   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 9131   ins_cost(2 * VOLATILE_REF_COST);
 9132   effect(TEMP_DEF res, KILL cr);
 9133   format %{
 9134     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9135   %}
 9136   ins_encode %{
 9137     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9138                Assembler::word, /*acquire*/ false, /*release*/ true,
 9139                /*weak*/ false, $res$$Register);
 9140   %}
 9141   ins_pipe(pipe_slow);
 9142 %}
 9143 
 9144 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9145   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 9146   ins_cost(2 * VOLATILE_REF_COST);
 9147   effect(TEMP_DEF res, KILL cr);
 9148   format %{
 9149     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9150   %}
 9151   ins_encode %{
 9152     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9153                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9154                /*weak*/ false, $res$$Register);
 9155   %}
 9156   ins_pipe(pipe_slow);
 9157 %}
 9158 
 9159 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9160   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 9161   ins_cost(2 * VOLATILE_REF_COST);
 9162   effect(TEMP_DEF res, KILL cr);
 9163   format %{
 9164     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9165   %}
 9166   ins_encode %{
 9167     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9168                Assembler::word, /*acquire*/ false, /*release*/ true,
 9169                /*weak*/ false, $res$$Register);
 9170   %}
 9171   ins_pipe(pipe_slow);
 9172 %}
 9173 
 9174 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9175   predicate(n->as_LoadStore()->barrier_data() == 0);
 9176   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 9177   ins_cost(2 * VOLATILE_REF_COST);
 9178   effect(TEMP_DEF res, KILL cr);
 9179   format %{
 9180     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9181   %}
 9182   ins_encode %{
 9183     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9184                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9185                /*weak*/ false, $res$$Register);
 9186   %}
 9187   ins_pipe(pipe_slow);
 9188 %}
 9189 
 9190 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9191   predicate(needs_acquiring_load_exclusive(n));
 9192   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 9193   ins_cost(VOLATILE_REF_COST);
 9194   effect(TEMP_DEF res, KILL cr);
 9195   format %{
 9196     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9197   %}
 9198   ins_encode %{
 9199     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9200                Assembler::byte, /*acquire*/ true, /*release*/ true,
 9201                /*weak*/ false, $res$$Register);
 9202     __ sxtbw($res$$Register, $res$$Register);
 9203   %}
 9204   ins_pipe(pipe_slow);
 9205 %}
 9206 
 9207 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9208   predicate(needs_acquiring_load_exclusive(n));
 9209   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 9210   ins_cost(VOLATILE_REF_COST);
 9211   effect(TEMP_DEF res, KILL cr);
 9212   format %{
 9213     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9214   %}
 9215   ins_encode %{
 9216     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9217                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 9218                /*weak*/ false, $res$$Register);
 9219     __ sxthw($res$$Register, $res$$Register);
 9220   %}
 9221   ins_pipe(pipe_slow);
 9222 %}
 9223 
 9224 
 9225 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9226   predicate(needs_acquiring_load_exclusive(n));
 9227   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 9228   ins_cost(VOLATILE_REF_COST);
 9229   effect(TEMP_DEF res, KILL cr);
 9230   format %{
 9231     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9232   %}
 9233   ins_encode %{
 9234     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9235                Assembler::word, /*acquire*/ true, /*release*/ true,
 9236                /*weak*/ false, $res$$Register);
 9237   %}
 9238   ins_pipe(pipe_slow);
 9239 %}
 9240 
 9241 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9242   predicate(needs_acquiring_load_exclusive(n));
 9243   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 9244   ins_cost(VOLATILE_REF_COST);
 9245   effect(TEMP_DEF res, KILL cr);
 9246   format %{
 9247     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9248   %}
 9249   ins_encode %{
 9250     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9251                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9252                /*weak*/ false, $res$$Register);
 9253   %}
 9254   ins_pipe(pipe_slow);
 9255 %}
 9256 
 9257 
 9258 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9259   predicate(needs_acquiring_load_exclusive(n));
 9260   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 9261   ins_cost(VOLATILE_REF_COST);
 9262   effect(TEMP_DEF res, KILL cr);
 9263   format %{
 9264     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9265   %}
 9266   ins_encode %{
 9267     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9268                Assembler::word, /*acquire*/ true, /*release*/ true,
 9269                /*weak*/ false, $res$$Register);
 9270   %}
 9271   ins_pipe(pipe_slow);
 9272 %}
 9273 
 9274 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9275   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9276   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 9277   ins_cost(VOLATILE_REF_COST);
 9278   effect(TEMP_DEF res, KILL cr);
 9279   format %{
 9280     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9281   %}
 9282   ins_encode %{
 9283     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9284                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9285                /*weak*/ false, $res$$Register);
 9286   %}
 9287   ins_pipe(pipe_slow);
 9288 %}
 9289 
 9290 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9291   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 9292   ins_cost(2 * VOLATILE_REF_COST);
 9293   effect(KILL cr);
 9294   format %{
 9295     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9296     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9297   %}
 9298   ins_encode %{
 9299     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9300                Assembler::byte, /*acquire*/ false, /*release*/ true,
 9301                /*weak*/ true, noreg);
 9302     __ csetw($res$$Register, Assembler::EQ);
 9303   %}
 9304   ins_pipe(pipe_slow);
 9305 %}
 9306 
 9307 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9308   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 9309   ins_cost(2 * VOLATILE_REF_COST);
 9310   effect(KILL cr);
 9311   format %{
 9312     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9313     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9314   %}
 9315   ins_encode %{
 9316     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9317                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 9318                /*weak*/ true, noreg);
 9319     __ csetw($res$$Register, Assembler::EQ);
 9320   %}
 9321   ins_pipe(pipe_slow);
 9322 %}
 9323 
 9324 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9325   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 9326   ins_cost(2 * VOLATILE_REF_COST);
 9327   effect(KILL cr);
 9328   format %{
 9329     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9330     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9331   %}
 9332   ins_encode %{
 9333     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9334                Assembler::word, /*acquire*/ false, /*release*/ true,
 9335                /*weak*/ true, noreg);
 9336     __ csetw($res$$Register, Assembler::EQ);
 9337   %}
 9338   ins_pipe(pipe_slow);
 9339 %}
 9340 
 9341 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9342   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 9343   ins_cost(2 * VOLATILE_REF_COST);
 9344   effect(KILL cr);
 9345   format %{
 9346     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9347     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9348   %}
 9349   ins_encode %{
 9350     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9351                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9352                /*weak*/ true, noreg);
 9353     __ csetw($res$$Register, Assembler::EQ);
 9354   %}
 9355   ins_pipe(pipe_slow);
 9356 %}
 9357 
 9358 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9359   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 9360   ins_cost(2 * VOLATILE_REF_COST);
 9361   effect(KILL cr);
 9362   format %{
 9363     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9364     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9365   %}
 9366   ins_encode %{
 9367     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9368                Assembler::word, /*acquire*/ false, /*release*/ true,
 9369                /*weak*/ true, noreg);
 9370     __ csetw($res$$Register, Assembler::EQ);
 9371   %}
 9372   ins_pipe(pipe_slow);
 9373 %}
 9374 
 9375 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9376   predicate(n->as_LoadStore()->barrier_data() == 0);
 9377   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 9378   ins_cost(2 * VOLATILE_REF_COST);
 9379   effect(KILL cr);
 9380   format %{
 9381     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9382     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9383   %}
 9384   ins_encode %{
 9385     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9386                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9387                /*weak*/ true, noreg);
 9388     __ csetw($res$$Register, Assembler::EQ);
 9389   %}
 9390   ins_pipe(pipe_slow);
 9391 %}
 9392 
 9393 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9394   predicate(needs_acquiring_load_exclusive(n));
 9395   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 9396   ins_cost(VOLATILE_REF_COST);
 9397   effect(KILL cr);
 9398   format %{
 9399     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9400     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9401   %}
 9402   ins_encode %{
 9403     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9404                Assembler::byte, /*acquire*/ true, /*release*/ true,
 9405                /*weak*/ true, noreg);
 9406     __ csetw($res$$Register, Assembler::EQ);
 9407   %}
 9408   ins_pipe(pipe_slow);
 9409 %}
 9410 
 9411 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9412   predicate(needs_acquiring_load_exclusive(n));
 9413   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 9414   ins_cost(VOLATILE_REF_COST);
 9415   effect(KILL cr);
 9416   format %{
 9417     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9418     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9419   %}
 9420   ins_encode %{
 9421     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9422                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 9423                /*weak*/ true, noreg);
 9424     __ csetw($res$$Register, Assembler::EQ);
 9425   %}
 9426   ins_pipe(pipe_slow);
 9427 %}
 9428 
 9429 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9430   predicate(needs_acquiring_load_exclusive(n));
 9431   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 9432   ins_cost(VOLATILE_REF_COST);
 9433   effect(KILL cr);
 9434   format %{
 9435     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9436     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9437   %}
 9438   ins_encode %{
 9439     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9440                Assembler::word, /*acquire*/ true, /*release*/ true,
 9441                /*weak*/ true, noreg);
 9442     __ csetw($res$$Register, Assembler::EQ);
 9443   %}
 9444   ins_pipe(pipe_slow);
 9445 %}
 9446 
 9447 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9448   predicate(needs_acquiring_load_exclusive(n));
 9449   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 9450   ins_cost(VOLATILE_REF_COST);
 9451   effect(KILL cr);
 9452   format %{
 9453     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9454     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9455   %}
 9456   ins_encode %{
 9457     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9458                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9459                /*weak*/ true, noreg);
 9460     __ csetw($res$$Register, Assembler::EQ);
 9461   %}
 9462   ins_pipe(pipe_slow);
 9463 %}
 9464 
 9465 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9466   predicate(needs_acquiring_load_exclusive(n));
 9467   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 9468   ins_cost(VOLATILE_REF_COST);
 9469   effect(KILL cr);
 9470   format %{
 9471     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9472     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9473   %}
 9474   ins_encode %{
 9475     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9476                Assembler::word, /*acquire*/ true, /*release*/ true,
 9477                /*weak*/ true, noreg);
 9478     __ csetw($res$$Register, Assembler::EQ);
 9479   %}
 9480   ins_pipe(pipe_slow);
 9481 %}
 9482 
 9483 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9484   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 9485   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9486   ins_cost(VOLATILE_REF_COST);
 9487   effect(KILL cr);
 9488   format %{
 9489     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9490     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9491   %}
 9492   ins_encode %{
 9493     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9494                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9495                /*weak*/ true, noreg);
 9496     __ csetw($res$$Register, Assembler::EQ);
 9497   %}
 9498   ins_pipe(pipe_slow);
 9499 %}
 9500 
 9501 // END This section of the file is automatically generated. Do not edit --------------
 9502 // ---------------------------------------------------------------------
 9503 
 9504 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
 9505   match(Set prev (GetAndSetI mem newv));
 9506   ins_cost(2 * VOLATILE_REF_COST);
 9507   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
 9508   ins_encode %{
 9509     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9510   %}
 9511   ins_pipe(pipe_serial);
 9512 %}
 9513 
 9514 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
 9515   match(Set prev (GetAndSetL mem newv));
 9516   ins_cost(2 * VOLATILE_REF_COST);
 9517   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
 9518   ins_encode %{
 9519     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9520   %}
 9521   ins_pipe(pipe_serial);
 9522 %}
 9523 
 9524 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
 9525   match(Set prev (GetAndSetN mem newv));
 9526   ins_cost(2 * VOLATILE_REF_COST);
 9527   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
 9528   ins_encode %{
 9529     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9530   %}
 9531   ins_pipe(pipe_serial);
 9532 %}
 9533 
 9534 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
 9535   predicate(n->as_LoadStore()->barrier_data() == 0);
 9536   match(Set prev (GetAndSetP mem newv));
 9537   ins_cost(2 * VOLATILE_REF_COST);
 9538   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
 9539   ins_encode %{
 9540     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9541   %}
 9542   ins_pipe(pipe_serial);
 9543 %}
 9544 
 9545 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
 9546   predicate(needs_acquiring_load_exclusive(n));
 9547   match(Set prev (GetAndSetI mem newv));
 9548   ins_cost(VOLATILE_REF_COST);
 9549   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]" %}
 9550   ins_encode %{
 9551     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9552   %}
 9553   ins_pipe(pipe_serial);
 9554 %}
 9555 
 9556 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
 9557   predicate(needs_acquiring_load_exclusive(n));
 9558   match(Set prev (GetAndSetL mem newv));
 9559   ins_cost(VOLATILE_REF_COST);
 9560   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
 9561   ins_encode %{
 9562     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9563   %}
 9564   ins_pipe(pipe_serial);
 9565 %}
 9566 
 9567 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
 9568   predicate(needs_acquiring_load_exclusive(n));
 9569   match(Set prev (GetAndSetN mem newv));
 9570   ins_cost(VOLATILE_REF_COST);
 9571   format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
 9572   ins_encode %{
 9573     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9574   %}
 9575   ins_pipe(pipe_serial);
 9576 %}
 9577 
 9578 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
 9579   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9580   match(Set prev (GetAndSetP mem newv));
 9581   ins_cost(VOLATILE_REF_COST);
 9582   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
 9583   ins_encode %{
 9584     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9585   %}
 9586   ins_pipe(pipe_serial);
 9587 %}
 9588 
 9589 
 9590 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
 9591   match(Set newval (GetAndAddL mem incr));
 9592   ins_cost(2 * VOLATILE_REF_COST + 1);
 9593   format %{ "get_and_addL $newval, [$mem], $incr" %}
 9594   ins_encode %{
 9595     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9596   %}
 9597   ins_pipe(pipe_serial);
 9598 %}
 9599 
 9600 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
 9601   predicate(n->as_LoadStore()->result_not_used());
 9602   match(Set dummy (GetAndAddL mem incr));
 9603   ins_cost(2 * VOLATILE_REF_COST);
 9604   format %{ "get_and_addL [$mem], $incr" %}
 9605   ins_encode %{
 9606     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
 9607   %}
 9608   ins_pipe(pipe_serial);
 9609 %}
 9610 
 9611 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
 9612   match(Set newval (GetAndAddL mem incr));
 9613   ins_cost(2 * VOLATILE_REF_COST + 1);
 9614   format %{ "get_and_addL $newval, [$mem], $incr" %}
 9615   ins_encode %{
 9616     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9617   %}
 9618   ins_pipe(pipe_serial);
 9619 %}
 9620 
 9621 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
 9622   predicate(n->as_LoadStore()->result_not_used());
 9623   match(Set dummy (GetAndAddL mem incr));
 9624   ins_cost(2 * VOLATILE_REF_COST);
 9625   format %{ "get_and_addL [$mem], $incr" %}
 9626   ins_encode %{
 9627     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
 9628   %}
 9629   ins_pipe(pipe_serial);
 9630 %}
 9631 
 9632 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
 9633   match(Set newval (GetAndAddI mem incr));
 9634   ins_cost(2 * VOLATILE_REF_COST + 1);
 9635   format %{ "get_and_addI $newval, [$mem], $incr" %}
 9636   ins_encode %{
 9637     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9638   %}
 9639   ins_pipe(pipe_serial);
 9640 %}
 9641 
 9642 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
 9643   predicate(n->as_LoadStore()->result_not_used());
 9644   match(Set dummy (GetAndAddI mem incr));
 9645   ins_cost(2 * VOLATILE_REF_COST);
 9646   format %{ "get_and_addI [$mem], $incr" %}
 9647   ins_encode %{
 9648     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
 9649   %}
 9650   ins_pipe(pipe_serial);
 9651 %}
 9652 
 9653 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
 9654   match(Set newval (GetAndAddI mem incr));
 9655   ins_cost(2 * VOLATILE_REF_COST + 1);
 9656   format %{ "get_and_addI $newval, [$mem], $incr" %}
 9657   ins_encode %{
 9658     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9659   %}
 9660   ins_pipe(pipe_serial);
 9661 %}
 9662 
 9663 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
 9664   predicate(n->as_LoadStore()->result_not_used());
 9665   match(Set dummy (GetAndAddI mem incr));
 9666   ins_cost(2 * VOLATILE_REF_COST);
 9667   format %{ "get_and_addI [$mem], $incr" %}
 9668   ins_encode %{
 9669     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
 9670   %}
 9671   ins_pipe(pipe_serial);
 9672 %}
 9673 
 9674 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
 9675   predicate(needs_acquiring_load_exclusive(n));
 9676   match(Set newval (GetAndAddL mem incr));
 9677   ins_cost(VOLATILE_REF_COST + 1);
 9678   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
 9679   ins_encode %{
 9680     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9681   %}
 9682   ins_pipe(pipe_serial);
 9683 %}
 9684 
 9685 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
 9686   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9687   match(Set dummy (GetAndAddL mem incr));
 9688   ins_cost(VOLATILE_REF_COST);
 9689   format %{ "get_and_addL_acq [$mem], $incr" %}
 9690   ins_encode %{
 9691     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
 9692   %}
 9693   ins_pipe(pipe_serial);
 9694 %}
 9695 
 9696 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
 9697   predicate(needs_acquiring_load_exclusive(n));
 9698   match(Set newval (GetAndAddL mem incr));
 9699   ins_cost(VOLATILE_REF_COST + 1);
 9700   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
 9701   ins_encode %{
 9702     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9703   %}
 9704   ins_pipe(pipe_serial);
 9705 %}
 9706 
 9707 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
 9708   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9709   match(Set dummy (GetAndAddL mem incr));
 9710   ins_cost(VOLATILE_REF_COST);
 9711   format %{ "get_and_addL_acq [$mem], $incr" %}
 9712   ins_encode %{
 9713     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
 9714   %}
 9715   ins_pipe(pipe_serial);
 9716 %}
 9717 
 9718 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
 9719   predicate(needs_acquiring_load_exclusive(n));
 9720   match(Set newval (GetAndAddI mem incr));
 9721   ins_cost(VOLATILE_REF_COST + 1);
 9722   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
 9723   ins_encode %{
 9724     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9725   %}
 9726   ins_pipe(pipe_serial);
 9727 %}
 9728 
 9729 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
 9730   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9731   match(Set dummy (GetAndAddI mem incr));
 9732   ins_cost(VOLATILE_REF_COST);
 9733   format %{ "get_and_addI_acq [$mem], $incr" %}
 9734   ins_encode %{
 9735     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
 9736   %}
 9737   ins_pipe(pipe_serial);
 9738 %}
 9739 
 9740 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
 9741   predicate(needs_acquiring_load_exclusive(n));
 9742   match(Set newval (GetAndAddI mem incr));
 9743   ins_cost(VOLATILE_REF_COST + 1);
 9744   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
 9745   ins_encode %{
 9746     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9747   %}
 9748   ins_pipe(pipe_serial);
 9749 %}
 9750 
 9751 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
 9752   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9753   match(Set dummy (GetAndAddI mem incr));
 9754   ins_cost(VOLATILE_REF_COST);
 9755   format %{ "get_and_addI_acq [$mem], $incr" %}
 9756   ins_encode %{
 9757     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
 9758   %}
 9759   ins_pipe(pipe_serial);
 9760 %}
 9761 
 9762 // Manifest a CmpL result in an integer register.
 9763 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9764 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
 9765 %{
 9766   match(Set dst (CmpL3 src1 src2));
 9767   effect(KILL flags);
 9768 
 9769   ins_cost(INSN_COST * 6);
 9770   format %{
 9771       "cmp $src1, $src2"
 9772       "csetw $dst, ne"
 9773       "cnegw $dst, lt"
 9774   %}
 9775   // format %{ "CmpL3 $dst, $src1, $src2" %}
 9776   ins_encode %{
 9777     __ cmp($src1$$Register, $src2$$Register);
 9778     __ csetw($dst$$Register, Assembler::NE);
 9779     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 9780   %}
 9781 
 9782   ins_pipe(pipe_class_default);
 9783 %}
 9784 
 9785 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
 9786 %{
 9787   match(Set dst (CmpL3 src1 src2));
 9788   effect(KILL flags);
 9789 
 9790   ins_cost(INSN_COST * 6);
 9791   format %{
 9792       "cmp $src1, $src2"
 9793       "csetw $dst, ne"
 9794       "cnegw $dst, lt"
 9795   %}
 9796   ins_encode %{
 9797     int32_t con = (int32_t)$src2$$constant;
 9798      if (con < 0) {
 9799       __ adds(zr, $src1$$Register, -con);
 9800     } else {
 9801       __ subs(zr, $src1$$Register, con);
 9802     }
 9803     __ csetw($dst$$Register, Assembler::NE);
 9804     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 9805   %}
 9806 
 9807   ins_pipe(pipe_class_default);
 9808 %}
 9809 
 9810 // ============================================================================
 9811 // Conditional Move Instructions
 9812 
 9813 // n.b. we have identical rules for both a signed compare op (cmpOp)
 9814 // and an unsigned compare op (cmpOpU). it would be nice if we could
 9815 // define an op class which merged both inputs and use it to type the
 9816 // argument to a single rule. unfortunatelyt his fails because the
 9817 // opclass does not live up to the COND_INTER interface of its
 9818 // component operands. When the generic code tries to negate the
 9819 // operand it ends up running the generci Machoper::negate method
 9820 // which throws a ShouldNotHappen. So, we have to provide two flavours
 9821 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
 9822 
 9823 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9824   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 9825 
 9826   ins_cost(INSN_COST * 2);
 9827   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
 9828 
 9829   ins_encode %{
 9830     __ cselw(as_Register($dst$$reg),
 9831              as_Register($src2$$reg),
 9832              as_Register($src1$$reg),
 9833              (Assembler::Condition)$cmp$$cmpcode);
 9834   %}
 9835 
 9836   ins_pipe(icond_reg_reg);
 9837 %}
 9838 
 9839 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9840   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 9841 
 9842   ins_cost(INSN_COST * 2);
 9843   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
 9844 
 9845   ins_encode %{
 9846     __ cselw(as_Register($dst$$reg),
 9847              as_Register($src2$$reg),
 9848              as_Register($src1$$reg),
 9849              (Assembler::Condition)$cmp$$cmpcode);
 9850   %}
 9851 
 9852   ins_pipe(icond_reg_reg);
 9853 %}
 9854 
 9855 // special cases where one arg is zero
 9856 
 9857 // n.b. this is selected in preference to the rule above because it
 9858 // avoids loading constant 0 into a source register
 9859 
 9860 // TODO
 9861 // we ought only to be able to cull one of these variants as the ideal
 9862 // transforms ought always to order the zero consistently (to left/right?)
 9863 
 9864 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
 9865   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
 9866 
 9867   ins_cost(INSN_COST * 2);
 9868   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
 9869 
 9870   ins_encode %{
 9871     __ cselw(as_Register($dst$$reg),
 9872              as_Register($src$$reg),
 9873              zr,
 9874              (Assembler::Condition)$cmp$$cmpcode);
 9875   %}
 9876 
 9877   ins_pipe(icond_reg);
 9878 %}
 9879 
 9880 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
 9881   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
 9882 
 9883   ins_cost(INSN_COST * 2);
 9884   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
 9885 
 9886   ins_encode %{
 9887     __ cselw(as_Register($dst$$reg),
 9888              as_Register($src$$reg),
 9889              zr,
 9890              (Assembler::Condition)$cmp$$cmpcode);
 9891   %}
 9892 
 9893   ins_pipe(icond_reg);
 9894 %}
 9895 
 9896 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 9897   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
 9898 
 9899   ins_cost(INSN_COST * 2);
 9900   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
 9901 
 9902   ins_encode %{
 9903     __ cselw(as_Register($dst$$reg),
 9904              zr,
 9905              as_Register($src$$reg),
 9906              (Assembler::Condition)$cmp$$cmpcode);
 9907   %}
 9908 
 9909   ins_pipe(icond_reg);
 9910 %}
 9911 
 9912 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 9913   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
 9914 
 9915   ins_cost(INSN_COST * 2);
 9916   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
 9917 
 9918   ins_encode %{
 9919     __ cselw(as_Register($dst$$reg),
 9920              zr,
 9921              as_Register($src$$reg),
 9922              (Assembler::Condition)$cmp$$cmpcode);
 9923   %}
 9924 
 9925   ins_pipe(icond_reg);
 9926 %}
 9927 
 9928 // special case for creating a boolean 0 or 1
 9929 
 9930 // n.b. this is selected in preference to the rule above because it
 9931 // avoids loading constants 0 and 1 into a source register
 9932 
 9933 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
 9934   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
 9935 
 9936   ins_cost(INSN_COST * 2);
 9937   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
 9938 
 9939   ins_encode %{
 9940     // equivalently
 9941     // cset(as_Register($dst$$reg),
 9942     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
 9943     __ csincw(as_Register($dst$$reg),
 9944              zr,
 9945              zr,
 9946              (Assembler::Condition)$cmp$$cmpcode);
 9947   %}
 9948 
 9949   ins_pipe(icond_none);
 9950 %}
 9951 
 9952 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
 9953   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
 9954 
 9955   ins_cost(INSN_COST * 2);
 9956   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
 9957 
 9958   ins_encode %{
 9959     // equivalently
 9960     // cset(as_Register($dst$$reg),
 9961     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
 9962     __ csincw(as_Register($dst$$reg),
 9963              zr,
 9964              zr,
 9965              (Assembler::Condition)$cmp$$cmpcode);
 9966   %}
 9967 
 9968   ins_pipe(icond_none);
 9969 %}
 9970 
 9971 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9972   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
 9973 
 9974   ins_cost(INSN_COST * 2);
 9975   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
 9976 
 9977   ins_encode %{
 9978     __ csel(as_Register($dst$$reg),
 9979             as_Register($src2$$reg),
 9980             as_Register($src1$$reg),
 9981             (Assembler::Condition)$cmp$$cmpcode);
 9982   %}
 9983 
 9984   ins_pipe(icond_reg_reg);
 9985 %}
 9986 
 9987 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9988   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
 9989 
 9990   ins_cost(INSN_COST * 2);
 9991   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
 9992 
 9993   ins_encode %{
 9994     __ csel(as_Register($dst$$reg),
 9995             as_Register($src2$$reg),
 9996             as_Register($src1$$reg),
 9997             (Assembler::Condition)$cmp$$cmpcode);
 9998   %}
 9999 
10000   ins_pipe(icond_reg_reg);
10001 %}
10002 
10003 // special cases where one arg is zero
10004 
10005 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
10006   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
10007 
10008   ins_cost(INSN_COST * 2);
10009   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
10010 
10011   ins_encode %{
10012     __ csel(as_Register($dst$$reg),
10013             zr,
10014             as_Register($src$$reg),
10015             (Assembler::Condition)$cmp$$cmpcode);
10016   %}
10017 
10018   ins_pipe(icond_reg);
10019 %}
10020 
10021 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
10022   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
10023 
10024   ins_cost(INSN_COST * 2);
10025   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
10026 
10027   ins_encode %{
10028     __ csel(as_Register($dst$$reg),
10029             zr,
10030             as_Register($src$$reg),
10031             (Assembler::Condition)$cmp$$cmpcode);
10032   %}
10033 
10034   ins_pipe(icond_reg);
10035 %}
10036 
10037 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
10038   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
10039 
10040   ins_cost(INSN_COST * 2);
10041   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
10042 
10043   ins_encode %{
10044     __ csel(as_Register($dst$$reg),
10045             as_Register($src$$reg),
10046             zr,
10047             (Assembler::Condition)$cmp$$cmpcode);
10048   %}
10049 
10050   ins_pipe(icond_reg);
10051 %}
10052 
10053 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
10054   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
10055 
10056   ins_cost(INSN_COST * 2);
10057   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
10058 
10059   ins_encode %{
10060     __ csel(as_Register($dst$$reg),
10061             as_Register($src$$reg),
10062             zr,
10063             (Assembler::Condition)$cmp$$cmpcode);
10064   %}
10065 
10066   ins_pipe(icond_reg);
10067 %}
10068 
10069 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10070   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10071 
10072   ins_cost(INSN_COST * 2);
10073   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
10074 
10075   ins_encode %{
10076     __ csel(as_Register($dst$$reg),
10077             as_Register($src2$$reg),
10078             as_Register($src1$$reg),
10079             (Assembler::Condition)$cmp$$cmpcode);
10080   %}
10081 
10082   ins_pipe(icond_reg_reg);
10083 %}
10084 
10085 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10086   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10087 
10088   ins_cost(INSN_COST * 2);
10089   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
10090 
10091   ins_encode %{
10092     __ csel(as_Register($dst$$reg),
10093             as_Register($src2$$reg),
10094             as_Register($src1$$reg),
10095             (Assembler::Condition)$cmp$$cmpcode);
10096   %}
10097 
10098   ins_pipe(icond_reg_reg);
10099 %}
10100 
10101 // special cases where one arg is zero
10102 
10103 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10104   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10105 
10106   ins_cost(INSN_COST * 2);
10107   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
10108 
10109   ins_encode %{
10110     __ csel(as_Register($dst$$reg),
10111             zr,
10112             as_Register($src$$reg),
10113             (Assembler::Condition)$cmp$$cmpcode);
10114   %}
10115 
10116   ins_pipe(icond_reg);
10117 %}
10118 
10119 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10120   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10121 
10122   ins_cost(INSN_COST * 2);
10123   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
10124 
10125   ins_encode %{
10126     __ csel(as_Register($dst$$reg),
10127             zr,
10128             as_Register($src$$reg),
10129             (Assembler::Condition)$cmp$$cmpcode);
10130   %}
10131 
10132   ins_pipe(icond_reg);
10133 %}
10134 
10135 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10136   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10137 
10138   ins_cost(INSN_COST * 2);
10139   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
10140 
10141   ins_encode %{
10142     __ csel(as_Register($dst$$reg),
10143             as_Register($src$$reg),
10144             zr,
10145             (Assembler::Condition)$cmp$$cmpcode);
10146   %}
10147 
10148   ins_pipe(icond_reg);
10149 %}
10150 
10151 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10152   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10153 
10154   ins_cost(INSN_COST * 2);
10155   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
10156 
10157   ins_encode %{
10158     __ csel(as_Register($dst$$reg),
10159             as_Register($src$$reg),
10160             zr,
10161             (Assembler::Condition)$cmp$$cmpcode);
10162   %}
10163 
10164   ins_pipe(icond_reg);
10165 %}
10166 
10167 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10168   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10169 
10170   ins_cost(INSN_COST * 2);
10171   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10172 
10173   ins_encode %{
10174     __ cselw(as_Register($dst$$reg),
10175              as_Register($src2$$reg),
10176              as_Register($src1$$reg),
10177              (Assembler::Condition)$cmp$$cmpcode);
10178   %}
10179 
10180   ins_pipe(icond_reg_reg);
10181 %}
10182 
10183 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10184   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10185 
10186   ins_cost(INSN_COST * 2);
10187   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10188 
10189   ins_encode %{
10190     __ cselw(as_Register($dst$$reg),
10191              as_Register($src2$$reg),
10192              as_Register($src1$$reg),
10193              (Assembler::Condition)$cmp$$cmpcode);
10194   %}
10195 
10196   ins_pipe(icond_reg_reg);
10197 %}
10198 
10199 // special cases where one arg is zero
10200 
10201 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10202   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10203 
10204   ins_cost(INSN_COST * 2);
10205   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
10206 
10207   ins_encode %{
10208     __ cselw(as_Register($dst$$reg),
10209              zr,
10210              as_Register($src$$reg),
10211              (Assembler::Condition)$cmp$$cmpcode);
10212   %}
10213 
10214   ins_pipe(icond_reg);
10215 %}
10216 
10217 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10218   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10219 
10220   ins_cost(INSN_COST * 2);
10221   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
10222 
10223   ins_encode %{
10224     __ cselw(as_Register($dst$$reg),
10225              zr,
10226              as_Register($src$$reg),
10227              (Assembler::Condition)$cmp$$cmpcode);
10228   %}
10229 
10230   ins_pipe(icond_reg);
10231 %}
10232 
10233 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10234   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10235 
10236   ins_cost(INSN_COST * 2);
10237   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
10238 
10239   ins_encode %{
10240     __ cselw(as_Register($dst$$reg),
10241              as_Register($src$$reg),
10242              zr,
10243              (Assembler::Condition)$cmp$$cmpcode);
10244   %}
10245 
10246   ins_pipe(icond_reg);
10247 %}
10248 
10249 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10250   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10251 
10252   ins_cost(INSN_COST * 2);
10253   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
10254 
10255   ins_encode %{
10256     __ cselw(as_Register($dst$$reg),
10257              as_Register($src$$reg),
10258              zr,
10259              (Assembler::Condition)$cmp$$cmpcode);
10260   %}
10261 
10262   ins_pipe(icond_reg);
10263 %}
10264 
10265 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
10266 %{
10267   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10268 
10269   ins_cost(INSN_COST * 3);
10270 
10271   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10272   ins_encode %{
10273     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10274     __ fcsels(as_FloatRegister($dst$$reg),
10275               as_FloatRegister($src2$$reg),
10276               as_FloatRegister($src1$$reg),
10277               cond);
10278   %}
10279 
10280   ins_pipe(fp_cond_reg_reg_s);
10281 %}
10282 
10283 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
10284 %{
10285   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10286 
10287   ins_cost(INSN_COST * 3);
10288 
10289   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10290   ins_encode %{
10291     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10292     __ fcsels(as_FloatRegister($dst$$reg),
10293               as_FloatRegister($src2$$reg),
10294               as_FloatRegister($src1$$reg),
10295               cond);
10296   %}
10297 
10298   ins_pipe(fp_cond_reg_reg_s);
10299 %}
10300 
10301 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
10302 %{
10303   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10304 
10305   ins_cost(INSN_COST * 3);
10306 
10307   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10308   ins_encode %{
10309     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10310     __ fcseld(as_FloatRegister($dst$$reg),
10311               as_FloatRegister($src2$$reg),
10312               as_FloatRegister($src1$$reg),
10313               cond);
10314   %}
10315 
10316   ins_pipe(fp_cond_reg_reg_d);
10317 %}
10318 
10319 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
10320 %{
10321   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10322 
10323   ins_cost(INSN_COST * 3);
10324 
10325   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10326   ins_encode %{
10327     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10328     __ fcseld(as_FloatRegister($dst$$reg),
10329               as_FloatRegister($src2$$reg),
10330               as_FloatRegister($src1$$reg),
10331               cond);
10332   %}
10333 
10334   ins_pipe(fp_cond_reg_reg_d);
10335 %}
10336 
10337 // ============================================================================
10338 // Arithmetic Instructions
10339 //
10340 
10341 // Integer Addition
10342 
10343 // TODO
10344 // these currently employ operations which do not set CR and hence are
10345 // not flagged as killing CR but we would like to isolate the cases
10346 // where we want to set flags from those where we don't. need to work
10347 // out how to do that.
10348 
10349 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10350   match(Set dst (AddI src1 src2));
10351 
10352   ins_cost(INSN_COST);
10353   format %{ "addw  $dst, $src1, $src2" %}
10354 
10355   ins_encode %{
10356     __ addw(as_Register($dst$$reg),
10357             as_Register($src1$$reg),
10358             as_Register($src2$$reg));
10359   %}
10360 
10361   ins_pipe(ialu_reg_reg);
10362 %}
10363 
10364 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10365   match(Set dst (AddI src1 src2));
10366 
10367   ins_cost(INSN_COST);
10368   format %{ "addw $dst, $src1, $src2" %}
10369 
10370   // use opcode to indicate that this is an add not a sub
10371   opcode(0x0);
10372 
10373   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10374 
10375   ins_pipe(ialu_reg_imm);
10376 %}
10377 
10378 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10379   match(Set dst (AddI (ConvL2I src1) src2));
10380 
10381   ins_cost(INSN_COST);
10382   format %{ "addw $dst, $src1, $src2" %}
10383 
10384   // use opcode to indicate that this is an add not a sub
10385   opcode(0x0);
10386 
10387   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10388 
10389   ins_pipe(ialu_reg_imm);
10390 %}
10391 
10392 // Pointer Addition
10393 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10394   match(Set dst (AddP src1 src2));
10395 
10396   ins_cost(INSN_COST);
10397   format %{ "add $dst, $src1, $src2\t# ptr" %}
10398 
10399   ins_encode %{
10400     __ add(as_Register($dst$$reg),
10401            as_Register($src1$$reg),
10402            as_Register($src2$$reg));
10403   %}
10404 
10405   ins_pipe(ialu_reg_reg);
10406 %}
10407 
10408 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10409   match(Set dst (AddP src1 (ConvI2L src2)));
10410 
10411   ins_cost(1.9 * INSN_COST);
10412   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10413 
10414   ins_encode %{
10415     __ add(as_Register($dst$$reg),
10416            as_Register($src1$$reg),
10417            as_Register($src2$$reg), ext::sxtw);
10418   %}
10419 
10420   ins_pipe(ialu_reg_reg);
10421 %}
10422 
10423 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10424   match(Set dst (AddP src1 (LShiftL src2 scale)));
10425 
10426   ins_cost(1.9 * INSN_COST);
10427   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10428 
10429   ins_encode %{
10430     __ lea(as_Register($dst$$reg),
10431            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10432                    Address::lsl($scale$$constant)));
10433   %}
10434 
10435   ins_pipe(ialu_reg_reg_shift);
10436 %}
10437 
10438 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10439   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10440 
10441   ins_cost(1.9 * INSN_COST);
10442   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10443 
10444   ins_encode %{
10445     __ lea(as_Register($dst$$reg),
10446            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10447                    Address::sxtw($scale$$constant)));
10448   %}
10449 
10450   ins_pipe(ialu_reg_reg_shift);
10451 %}
10452 
10453 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10454   match(Set dst (LShiftL (ConvI2L src) scale));
10455 
10456   ins_cost(INSN_COST);
10457   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10458 
10459   ins_encode %{
10460     __ sbfiz(as_Register($dst$$reg),
10461           as_Register($src$$reg),
10462           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
10463   %}
10464 
10465   ins_pipe(ialu_reg_shift);
10466 %}
10467 
10468 // Pointer Immediate Addition
10469 // n.b. this needs to be more expensive than using an indirect memory
10470 // operand
10471 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10472   match(Set dst (AddP src1 src2));
10473 
10474   ins_cost(INSN_COST);
10475   format %{ "add $dst, $src1, $src2\t# ptr" %}
10476 
10477   // use opcode to indicate that this is an add not a sub
10478   opcode(0x0);
10479 
10480   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10481 
10482   ins_pipe(ialu_reg_imm);
10483 %}
10484 
10485 // Long Addition
10486 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10487 
10488   match(Set dst (AddL src1 src2));
10489 
10490   ins_cost(INSN_COST);
10491   format %{ "add  $dst, $src1, $src2" %}
10492 
10493   ins_encode %{
10494     __ add(as_Register($dst$$reg),
10495            as_Register($src1$$reg),
10496            as_Register($src2$$reg));
10497   %}
10498 
10499   ins_pipe(ialu_reg_reg);
10500 %}
10501 
10502 // No constant pool entries requiredLong Immediate Addition.
10503 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10504   match(Set dst (AddL src1 src2));
10505 
10506   ins_cost(INSN_COST);
10507   format %{ "add $dst, $src1, $src2" %}
10508 
10509   // use opcode to indicate that this is an add not a sub
10510   opcode(0x0);
10511 
10512   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10513 
10514   ins_pipe(ialu_reg_imm);
10515 %}
10516 
10517 // Integer Subtraction
10518 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10519   match(Set dst (SubI src1 src2));
10520 
10521   ins_cost(INSN_COST);
10522   format %{ "subw  $dst, $src1, $src2" %}
10523 
10524   ins_encode %{
10525     __ subw(as_Register($dst$$reg),
10526             as_Register($src1$$reg),
10527             as_Register($src2$$reg));
10528   %}
10529 
10530   ins_pipe(ialu_reg_reg);
10531 %}
10532 
10533 // Immediate Subtraction
10534 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10535   match(Set dst (SubI src1 src2));
10536 
10537   ins_cost(INSN_COST);
10538   format %{ "subw $dst, $src1, $src2" %}
10539 
10540   // use opcode to indicate that this is a sub not an add
10541   opcode(0x1);
10542 
10543   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10544 
10545   ins_pipe(ialu_reg_imm);
10546 %}
10547 
10548 // Long Subtraction
10549 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10550 
10551   match(Set dst (SubL src1 src2));
10552 
10553   ins_cost(INSN_COST);
10554   format %{ "sub  $dst, $src1, $src2" %}
10555 
10556   ins_encode %{
10557     __ sub(as_Register($dst$$reg),
10558            as_Register($src1$$reg),
10559            as_Register($src2$$reg));
10560   %}
10561 
10562   ins_pipe(ialu_reg_reg);
10563 %}
10564 
10565 // No constant pool entries requiredLong Immediate Subtraction.
10566 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10567   match(Set dst (SubL src1 src2));
10568 
10569   ins_cost(INSN_COST);
10570   format %{ "sub$dst, $src1, $src2" %}
10571 
10572   // use opcode to indicate that this is a sub not an add
10573   opcode(0x1);
10574 
10575   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10576 
10577   ins_pipe(ialu_reg_imm);
10578 %}
10579 
10580 // Integer Negation (special case for sub)
10581 
10582 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10583   match(Set dst (SubI zero src));
10584 
10585   ins_cost(INSN_COST);
10586   format %{ "negw $dst, $src\t# int" %}
10587 
10588   ins_encode %{
10589     __ negw(as_Register($dst$$reg),
10590             as_Register($src$$reg));
10591   %}
10592 
10593   ins_pipe(ialu_reg);
10594 %}
10595 
10596 // Long Negation
10597 
10598 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
10599   match(Set dst (SubL zero src));
10600 
10601   ins_cost(INSN_COST);
10602   format %{ "neg $dst, $src\t# long" %}
10603 
10604   ins_encode %{
10605     __ neg(as_Register($dst$$reg),
10606            as_Register($src$$reg));
10607   %}
10608 
10609   ins_pipe(ialu_reg);
10610 %}
10611 
10612 // Integer Multiply
10613 
10614 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10615   match(Set dst (MulI src1 src2));
10616 
10617   ins_cost(INSN_COST * 3);
10618   format %{ "mulw  $dst, $src1, $src2" %}
10619 
10620   ins_encode %{
10621     __ mulw(as_Register($dst$$reg),
10622             as_Register($src1$$reg),
10623             as_Register($src2$$reg));
10624   %}
10625 
10626   ins_pipe(imul_reg_reg);
10627 %}
10628 
10629 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10630   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10631 
10632   ins_cost(INSN_COST * 3);
10633   format %{ "smull  $dst, $src1, $src2" %}
10634 
10635   ins_encode %{
10636     __ smull(as_Register($dst$$reg),
10637              as_Register($src1$$reg),
10638              as_Register($src2$$reg));
10639   %}
10640 
10641   ins_pipe(imul_reg_reg);
10642 %}
10643 
10644 // Long Multiply
10645 
10646 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10647   match(Set dst (MulL src1 src2));
10648 
10649   ins_cost(INSN_COST * 5);
10650   format %{ "mul  $dst, $src1, $src2" %}
10651 
10652   ins_encode %{
10653     __ mul(as_Register($dst$$reg),
10654            as_Register($src1$$reg),
10655            as_Register($src2$$reg));
10656   %}
10657 
10658   ins_pipe(lmul_reg_reg);
10659 %}
10660 
10661 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10662 %{
10663   match(Set dst (MulHiL src1 src2));
10664 
10665   ins_cost(INSN_COST * 7);
10666   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
10667 
10668   ins_encode %{
10669     __ smulh(as_Register($dst$$reg),
10670              as_Register($src1$$reg),
10671              as_Register($src2$$reg));
10672   %}
10673 
10674   ins_pipe(lmul_reg_reg);
10675 %}
10676 
10677 // Combined Integer Multiply & Add/Sub
10678 
10679 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10680   match(Set dst (AddI src3 (MulI src1 src2)));
10681 
10682   ins_cost(INSN_COST * 3);
10683   format %{ "madd  $dst, $src1, $src2, $src3" %}
10684 
10685   ins_encode %{
10686     __ maddw(as_Register($dst$$reg),
10687              as_Register($src1$$reg),
10688              as_Register($src2$$reg),
10689              as_Register($src3$$reg));
10690   %}
10691 
10692   ins_pipe(imac_reg_reg);
10693 %}
10694 
10695 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10696   match(Set dst (SubI src3 (MulI src1 src2)));
10697 
10698   ins_cost(INSN_COST * 3);
10699   format %{ "msub  $dst, $src1, $src2, $src3" %}
10700 
10701   ins_encode %{
10702     __ msubw(as_Register($dst$$reg),
10703              as_Register($src1$$reg),
10704              as_Register($src2$$reg),
10705              as_Register($src3$$reg));
10706   %}
10707 
10708   ins_pipe(imac_reg_reg);
10709 %}
10710 
10711 // Combined Integer Multiply & Neg
10712 
10713 instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
10714   match(Set dst (MulI (SubI zero src1) src2));
10715   match(Set dst (MulI src1 (SubI zero src2)));
10716 
10717   ins_cost(INSN_COST * 3);
10718   format %{ "mneg  $dst, $src1, $src2" %}
10719 
10720   ins_encode %{
10721     __ mnegw(as_Register($dst$$reg),
10722              as_Register($src1$$reg),
10723              as_Register($src2$$reg));
10724   %}
10725 
10726   ins_pipe(imac_reg_reg);
10727 %}
10728 
10729 // Combined Long Multiply & Add/Sub
10730 
10731 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10732   match(Set dst (AddL src3 (MulL src1 src2)));
10733 
10734   ins_cost(INSN_COST * 5);
10735   format %{ "madd  $dst, $src1, $src2, $src3" %}
10736 
10737   ins_encode %{
10738     __ madd(as_Register($dst$$reg),
10739             as_Register($src1$$reg),
10740             as_Register($src2$$reg),
10741             as_Register($src3$$reg));
10742   %}
10743 
10744   ins_pipe(lmac_reg_reg);
10745 %}
10746 
10747 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10748   match(Set dst (SubL src3 (MulL src1 src2)));
10749 
10750   ins_cost(INSN_COST * 5);
10751   format %{ "msub  $dst, $src1, $src2, $src3" %}
10752 
10753   ins_encode %{
10754     __ msub(as_Register($dst$$reg),
10755             as_Register($src1$$reg),
10756             as_Register($src2$$reg),
10757             as_Register($src3$$reg));
10758   %}
10759 
10760   ins_pipe(lmac_reg_reg);
10761 %}
10762 
10763 // Combined Long Multiply & Neg
10764 
10765 instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
10766   match(Set dst (MulL (SubL zero src1) src2));
10767   match(Set dst (MulL src1 (SubL zero src2)));
10768 
10769   ins_cost(INSN_COST * 5);
10770   format %{ "mneg  $dst, $src1, $src2" %}
10771 
10772   ins_encode %{
10773     __ mneg(as_Register($dst$$reg),
10774             as_Register($src1$$reg),
10775             as_Register($src2$$reg));
10776   %}
10777 
10778   ins_pipe(lmac_reg_reg);
10779 %}
10780 
10781 // Combine Integer Signed Multiply & Add/Sub/Neg Long
10782 
10783 instruct smaddL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10784   match(Set dst (AddL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10785 
10786   ins_cost(INSN_COST * 3);
10787   format %{ "smaddl  $dst, $src1, $src2, $src3" %}
10788 
10789   ins_encode %{
10790     __ smaddl(as_Register($dst$$reg),
10791               as_Register($src1$$reg),
10792               as_Register($src2$$reg),
10793               as_Register($src3$$reg));
10794   %}
10795 
10796   ins_pipe(imac_reg_reg);
10797 %}
10798 
10799 instruct smsubL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10800   match(Set dst (SubL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10801 
10802   ins_cost(INSN_COST * 3);
10803   format %{ "smsubl  $dst, $src1, $src2, $src3" %}
10804 
10805   ins_encode %{
10806     __ smsubl(as_Register($dst$$reg),
10807               as_Register($src1$$reg),
10808               as_Register($src2$$reg),
10809               as_Register($src3$$reg));
10810   %}
10811 
10812   ins_pipe(imac_reg_reg);
10813 %}
10814 
10815 instruct smnegL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, immL0 zero) %{
10816   match(Set dst (MulL (SubL zero (ConvI2L src1)) (ConvI2L src2)));
10817   match(Set dst (MulL (ConvI2L src1) (SubL zero (ConvI2L src2))));
10818 
10819   ins_cost(INSN_COST * 3);
10820   format %{ "smnegl  $dst, $src1, $src2" %}
10821 
10822   ins_encode %{
10823     __ smnegl(as_Register($dst$$reg),
10824               as_Register($src1$$reg),
10825               as_Register($src2$$reg));
10826   %}
10827 
10828   ins_pipe(imac_reg_reg);
10829 %}
10830 
10831 // Combined Multiply-Add Shorts into Integer (dst = src1 * src2 + src3 * src4)
10832 
10833 instruct muladdS2I(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3, iRegIorL2I src4) %{
10834   match(Set dst (MulAddS2I (Binary src1 src2) (Binary src3 src4)));
10835 
10836   ins_cost(INSN_COST * 5);
10837   format %{ "mulw  rscratch1, $src1, $src2\n\t"
10838             "maddw $dst, $src3, $src4, rscratch1" %}
10839 
10840   ins_encode %{
10841     __ mulw(rscratch1, as_Register($src1$$reg), as_Register($src2$$reg));
10842     __ maddw(as_Register($dst$$reg), as_Register($src3$$reg), as_Register($src4$$reg), rscratch1); %}
10843 
10844   ins_pipe(imac_reg_reg);
10845 %}
10846 
10847 // Integer Divide
10848 
10849 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10850   match(Set dst (DivI src1 src2));
10851 
10852   ins_cost(INSN_COST * 19);
10853   format %{ "sdivw  $dst, $src1, $src2" %}
10854 
10855   ins_encode(aarch64_enc_divw(dst, src1, src2));
10856   ins_pipe(idiv_reg_reg);
10857 %}
10858 
10859 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
10860   match(Set dst (URShiftI (RShiftI src1 div1) div2));
10861   ins_cost(INSN_COST);
10862   format %{ "lsrw $dst, $src1, $div1" %}
10863   ins_encode %{
10864     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
10865   %}
10866   ins_pipe(ialu_reg_shift);
10867 %}
10868 
10869 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
10870   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
10871   ins_cost(INSN_COST);
10872   format %{ "addw $dst, $src, LSR $div1" %}
10873 
10874   ins_encode %{
10875     __ addw(as_Register($dst$$reg),
10876               as_Register($src$$reg),
10877               as_Register($src$$reg),
10878               Assembler::LSR, 31);
10879   %}
10880   ins_pipe(ialu_reg);
10881 %}
10882 
10883 // Long Divide
10884 
10885 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10886   match(Set dst (DivL src1 src2));
10887 
10888   ins_cost(INSN_COST * 35);
10889   format %{ "sdiv   $dst, $src1, $src2" %}
10890 
10891   ins_encode(aarch64_enc_div(dst, src1, src2));
10892   ins_pipe(ldiv_reg_reg);
10893 %}
10894 
10895 instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{
10896   match(Set dst (URShiftL (RShiftL src1 div1) div2));
10897   ins_cost(INSN_COST);
10898   format %{ "lsr $dst, $src1, $div1" %}
10899   ins_encode %{
10900     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
10901   %}
10902   ins_pipe(ialu_reg_shift);
10903 %}
10904 
10905 instruct div2RoundL(iRegLNoSp dst, iRegL src, immI_63 div1, immI_63 div2) %{
10906   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
10907   ins_cost(INSN_COST);
10908   format %{ "add $dst, $src, $div1" %}
10909 
10910   ins_encode %{
10911     __ add(as_Register($dst$$reg),
10912               as_Register($src$$reg),
10913               as_Register($src$$reg),
10914               Assembler::LSR, 63);
10915   %}
10916   ins_pipe(ialu_reg);
10917 %}
10918 
10919 // Integer Remainder
10920 
10921 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10922   match(Set dst (ModI src1 src2));
10923 
10924   ins_cost(INSN_COST * 22);
10925   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10926             "msubw($dst, rscratch1, $src2, $src1" %}
10927 
10928   ins_encode(aarch64_enc_modw(dst, src1, src2));
10929   ins_pipe(idiv_reg_reg);
10930 %}
10931 
10932 // Long Remainder
10933 
10934 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10935   match(Set dst (ModL src1 src2));
10936 
10937   ins_cost(INSN_COST * 38);
10938   format %{ "sdiv   rscratch1, $src1, $src2\n"
10939             "msub($dst, rscratch1, $src2, $src1" %}
10940 
10941   ins_encode(aarch64_enc_mod(dst, src1, src2));
10942   ins_pipe(ldiv_reg_reg);
10943 %}
10944 
10945 // Integer Shifts
10946 
10947 // Shift Left Register
10948 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10949   match(Set dst (LShiftI src1 src2));
10950 
10951   ins_cost(INSN_COST * 2);
10952   format %{ "lslvw  $dst, $src1, $src2" %}
10953 
10954   ins_encode %{
10955     __ lslvw(as_Register($dst$$reg),
10956              as_Register($src1$$reg),
10957              as_Register($src2$$reg));
10958   %}
10959 
10960   ins_pipe(ialu_reg_reg_vshift);
10961 %}
10962 
10963 // Shift Left Immediate
10964 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10965   match(Set dst (LShiftI src1 src2));
10966 
10967   ins_cost(INSN_COST);
10968   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10969 
10970   ins_encode %{
10971     __ lslw(as_Register($dst$$reg),
10972             as_Register($src1$$reg),
10973             $src2$$constant & 0x1f);
10974   %}
10975 
10976   ins_pipe(ialu_reg_shift);
10977 %}
10978 
10979 // Shift Right Logical Register
10980 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10981   match(Set dst (URShiftI src1 src2));
10982 
10983   ins_cost(INSN_COST * 2);
10984   format %{ "lsrvw  $dst, $src1, $src2" %}
10985 
10986   ins_encode %{
10987     __ lsrvw(as_Register($dst$$reg),
10988              as_Register($src1$$reg),
10989              as_Register($src2$$reg));
10990   %}
10991 
10992   ins_pipe(ialu_reg_reg_vshift);
10993 %}
10994 
10995 // Shift Right Logical Immediate
10996 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10997   match(Set dst (URShiftI src1 src2));
10998 
10999   ins_cost(INSN_COST);
11000   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
11001 
11002   ins_encode %{
11003     __ lsrw(as_Register($dst$$reg),
11004             as_Register($src1$$reg),
11005             $src2$$constant & 0x1f);
11006   %}
11007 
11008   ins_pipe(ialu_reg_shift);
11009 %}
11010 
11011 // Shift Right Arithmetic Register
11012 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11013   match(Set dst (RShiftI src1 src2));
11014 
11015   ins_cost(INSN_COST * 2);
11016   format %{ "asrvw  $dst, $src1, $src2" %}
11017 
11018   ins_encode %{
11019     __ asrvw(as_Register($dst$$reg),
11020              as_Register($src1$$reg),
11021              as_Register($src2$$reg));
11022   %}
11023 
11024   ins_pipe(ialu_reg_reg_vshift);
11025 %}
11026 
11027 // Shift Right Arithmetic Immediate
11028 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11029   match(Set dst (RShiftI src1 src2));
11030 
11031   ins_cost(INSN_COST);
11032   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
11033 
11034   ins_encode %{
11035     __ asrw(as_Register($dst$$reg),
11036             as_Register($src1$$reg),
11037             $src2$$constant & 0x1f);
11038   %}
11039 
11040   ins_pipe(ialu_reg_shift);
11041 %}
11042 
11043 // Combined Int Mask and Right Shift (using UBFM)
11044 // TODO
11045 
11046 // Long Shifts
11047 
11048 // Shift Left Register
11049 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11050   match(Set dst (LShiftL src1 src2));
11051 
11052   ins_cost(INSN_COST * 2);
11053   format %{ "lslv  $dst, $src1, $src2" %}
11054 
11055   ins_encode %{
11056     __ lslv(as_Register($dst$$reg),
11057             as_Register($src1$$reg),
11058             as_Register($src2$$reg));
11059   %}
11060 
11061   ins_pipe(ialu_reg_reg_vshift);
11062 %}
11063 
11064 // Shift Left Immediate
11065 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11066   match(Set dst (LShiftL src1 src2));
11067 
11068   ins_cost(INSN_COST);
11069   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
11070 
11071   ins_encode %{
11072     __ lsl(as_Register($dst$$reg),
11073             as_Register($src1$$reg),
11074             $src2$$constant & 0x3f);
11075   %}
11076 
11077   ins_pipe(ialu_reg_shift);
11078 %}
11079 
11080 // Shift Right Logical Register
11081 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11082   match(Set dst (URShiftL src1 src2));
11083 
11084   ins_cost(INSN_COST * 2);
11085   format %{ "lsrv  $dst, $src1, $src2" %}
11086 
11087   ins_encode %{
11088     __ lsrv(as_Register($dst$$reg),
11089             as_Register($src1$$reg),
11090             as_Register($src2$$reg));
11091   %}
11092 
11093   ins_pipe(ialu_reg_reg_vshift);
11094 %}
11095 
11096 // Shift Right Logical Immediate
11097 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11098   match(Set dst (URShiftL src1 src2));
11099 
11100   ins_cost(INSN_COST);
11101   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
11102 
11103   ins_encode %{
11104     __ lsr(as_Register($dst$$reg),
11105            as_Register($src1$$reg),
11106            $src2$$constant & 0x3f);
11107   %}
11108 
11109   ins_pipe(ialu_reg_shift);
11110 %}
11111 
11112 // A special-case pattern for card table stores.
11113 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
11114   match(Set dst (URShiftL (CastP2X src1) src2));
11115 
11116   ins_cost(INSN_COST);
11117   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
11118 
11119   ins_encode %{
11120     __ lsr(as_Register($dst$$reg),
11121            as_Register($src1$$reg),
11122            $src2$$constant & 0x3f);
11123   %}
11124 
11125   ins_pipe(ialu_reg_shift);
11126 %}
11127 
11128 // Shift Right Arithmetic Register
11129 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11130   match(Set dst (RShiftL src1 src2));
11131 
11132   ins_cost(INSN_COST * 2);
11133   format %{ "asrv  $dst, $src1, $src2" %}
11134 
11135   ins_encode %{
11136     __ asrv(as_Register($dst$$reg),
11137             as_Register($src1$$reg),
11138             as_Register($src2$$reg));
11139   %}
11140 
11141   ins_pipe(ialu_reg_reg_vshift);
11142 %}
11143 
11144 // Shift Right Arithmetic Immediate
11145 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11146   match(Set dst (RShiftL src1 src2));
11147 
11148   ins_cost(INSN_COST);
11149   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
11150 
11151   ins_encode %{
11152     __ asr(as_Register($dst$$reg),
11153            as_Register($src1$$reg),
11154            $src2$$constant & 0x3f);
11155   %}
11156 
11157   ins_pipe(ialu_reg_shift);
11158 %}
11159 
11160 // BEGIN This section of the file is automatically generated. Do not edit --------------
11161 
11162 instruct regL_not_reg(iRegLNoSp dst,
11163                          iRegL src1, immL_M1 m1,
11164                          rFlagsReg cr) %{
11165   match(Set dst (XorL src1 m1));
11166   ins_cost(INSN_COST);
11167   format %{ "eon  $dst, $src1, zr" %}
11168 
11169   ins_encode %{
11170     __ eon(as_Register($dst$$reg),
11171               as_Register($src1$$reg),
11172               zr,
11173               Assembler::LSL, 0);
11174   %}
11175 
11176   ins_pipe(ialu_reg);
11177 %}
11178 instruct regI_not_reg(iRegINoSp dst,
11179                          iRegIorL2I src1, immI_M1 m1,
11180                          rFlagsReg cr) %{
11181   match(Set dst (XorI src1 m1));
11182   ins_cost(INSN_COST);
11183   format %{ "eonw  $dst, $src1, zr" %}
11184 
11185   ins_encode %{
11186     __ eonw(as_Register($dst$$reg),
11187               as_Register($src1$$reg),
11188               zr,
11189               Assembler::LSL, 0);
11190   %}
11191 
11192   ins_pipe(ialu_reg);
11193 %}
11194 
11195 instruct AndI_reg_not_reg(iRegINoSp dst,
11196                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11197                          rFlagsReg cr) %{
11198   match(Set dst (AndI src1 (XorI src2 m1)));
11199   ins_cost(INSN_COST);
11200   format %{ "bicw  $dst, $src1, $src2" %}
11201 
11202   ins_encode %{
11203     __ bicw(as_Register($dst$$reg),
11204               as_Register($src1$$reg),
11205               as_Register($src2$$reg),
11206               Assembler::LSL, 0);
11207   %}
11208 
11209   ins_pipe(ialu_reg_reg);
11210 %}
11211 
11212 instruct AndL_reg_not_reg(iRegLNoSp dst,
11213                          iRegL src1, iRegL src2, immL_M1 m1,
11214                          rFlagsReg cr) %{
11215   match(Set dst (AndL src1 (XorL src2 m1)));
11216   ins_cost(INSN_COST);
11217   format %{ "bic  $dst, $src1, $src2" %}
11218 
11219   ins_encode %{
11220     __ bic(as_Register($dst$$reg),
11221               as_Register($src1$$reg),
11222               as_Register($src2$$reg),
11223               Assembler::LSL, 0);
11224   %}
11225 
11226   ins_pipe(ialu_reg_reg);
11227 %}
11228 
11229 instruct OrI_reg_not_reg(iRegINoSp dst,
11230                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11231                          rFlagsReg cr) %{
11232   match(Set dst (OrI src1 (XorI src2 m1)));
11233   ins_cost(INSN_COST);
11234   format %{ "ornw  $dst, $src1, $src2" %}
11235 
11236   ins_encode %{
11237     __ ornw(as_Register($dst$$reg),
11238               as_Register($src1$$reg),
11239               as_Register($src2$$reg),
11240               Assembler::LSL, 0);
11241   %}
11242 
11243   ins_pipe(ialu_reg_reg);
11244 %}
11245 
11246 instruct OrL_reg_not_reg(iRegLNoSp dst,
11247                          iRegL src1, iRegL src2, immL_M1 m1,
11248                          rFlagsReg cr) %{
11249   match(Set dst (OrL src1 (XorL src2 m1)));
11250   ins_cost(INSN_COST);
11251   format %{ "orn  $dst, $src1, $src2" %}
11252 
11253   ins_encode %{
11254     __ orn(as_Register($dst$$reg),
11255               as_Register($src1$$reg),
11256               as_Register($src2$$reg),
11257               Assembler::LSL, 0);
11258   %}
11259 
11260   ins_pipe(ialu_reg_reg);
11261 %}
11262 
11263 instruct XorI_reg_not_reg(iRegINoSp dst,
11264                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11265                          rFlagsReg cr) %{
11266   match(Set dst (XorI m1 (XorI src2 src1)));
11267   ins_cost(INSN_COST);
11268   format %{ "eonw  $dst, $src1, $src2" %}
11269 
11270   ins_encode %{
11271     __ eonw(as_Register($dst$$reg),
11272               as_Register($src1$$reg),
11273               as_Register($src2$$reg),
11274               Assembler::LSL, 0);
11275   %}
11276 
11277   ins_pipe(ialu_reg_reg);
11278 %}
11279 
11280 instruct XorL_reg_not_reg(iRegLNoSp dst,
11281                          iRegL src1, iRegL src2, immL_M1 m1,
11282                          rFlagsReg cr) %{
11283   match(Set dst (XorL m1 (XorL src2 src1)));
11284   ins_cost(INSN_COST);
11285   format %{ "eon  $dst, $src1, $src2" %}
11286 
11287   ins_encode %{
11288     __ eon(as_Register($dst$$reg),
11289               as_Register($src1$$reg),
11290               as_Register($src2$$reg),
11291               Assembler::LSL, 0);
11292   %}
11293 
11294   ins_pipe(ialu_reg_reg);
11295 %}
11296 
11297 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
11298                          iRegIorL2I src1, iRegIorL2I src2,
11299                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11300   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
11301   ins_cost(1.9 * INSN_COST);
11302   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
11303 
11304   ins_encode %{
11305     __ bicw(as_Register($dst$$reg),
11306               as_Register($src1$$reg),
11307               as_Register($src2$$reg),
11308               Assembler::LSR,
11309               $src3$$constant & 0x1f);
11310   %}
11311 
11312   ins_pipe(ialu_reg_reg_shift);
11313 %}
11314 
11315 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
11316                          iRegL src1, iRegL src2,
11317                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11318   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
11319   ins_cost(1.9 * INSN_COST);
11320   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
11321 
11322   ins_encode %{
11323     __ bic(as_Register($dst$$reg),
11324               as_Register($src1$$reg),
11325               as_Register($src2$$reg),
11326               Assembler::LSR,
11327               $src3$$constant & 0x3f);
11328   %}
11329 
11330   ins_pipe(ialu_reg_reg_shift);
11331 %}
11332 
11333 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
11334                          iRegIorL2I src1, iRegIorL2I src2,
11335                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11336   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
11337   ins_cost(1.9 * INSN_COST);
11338   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
11339 
11340   ins_encode %{
11341     __ bicw(as_Register($dst$$reg),
11342               as_Register($src1$$reg),
11343               as_Register($src2$$reg),
11344               Assembler::ASR,
11345               $src3$$constant & 0x1f);
11346   %}
11347 
11348   ins_pipe(ialu_reg_reg_shift);
11349 %}
11350 
11351 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
11352                          iRegL src1, iRegL src2,
11353                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11354   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
11355   ins_cost(1.9 * INSN_COST);
11356   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
11357 
11358   ins_encode %{
11359     __ bic(as_Register($dst$$reg),
11360               as_Register($src1$$reg),
11361               as_Register($src2$$reg),
11362               Assembler::ASR,
11363               $src3$$constant & 0x3f);
11364   %}
11365 
11366   ins_pipe(ialu_reg_reg_shift);
11367 %}
11368 
11369 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11370                          iRegIorL2I src1, iRegIorL2I src2,
11371                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11372   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11373   ins_cost(1.9 * INSN_COST);
11374   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11375 
11376   ins_encode %{
11377     __ bicw(as_Register($dst$$reg),
11378               as_Register($src1$$reg),
11379               as_Register($src2$$reg),
11380               Assembler::LSL,
11381               $src3$$constant & 0x1f);
11382   %}
11383 
11384   ins_pipe(ialu_reg_reg_shift);
11385 %}
11386 
11387 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11388                          iRegL src1, iRegL src2,
11389                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11390   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11391   ins_cost(1.9 * INSN_COST);
11392   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11393 
11394   ins_encode %{
11395     __ bic(as_Register($dst$$reg),
11396               as_Register($src1$$reg),
11397               as_Register($src2$$reg),
11398               Assembler::LSL,
11399               $src3$$constant & 0x3f);
11400   %}
11401 
11402   ins_pipe(ialu_reg_reg_shift);
11403 %}
11404 
11405 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11406                          iRegIorL2I src1, iRegIorL2I src2,
11407                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11408   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11409   ins_cost(1.9 * INSN_COST);
11410   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11411 
11412   ins_encode %{
11413     __ eonw(as_Register($dst$$reg),
11414               as_Register($src1$$reg),
11415               as_Register($src2$$reg),
11416               Assembler::LSR,
11417               $src3$$constant & 0x1f);
11418   %}
11419 
11420   ins_pipe(ialu_reg_reg_shift);
11421 %}
11422 
11423 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11424                          iRegL src1, iRegL src2,
11425                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11426   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11427   ins_cost(1.9 * INSN_COST);
11428   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11429 
11430   ins_encode %{
11431     __ eon(as_Register($dst$$reg),
11432               as_Register($src1$$reg),
11433               as_Register($src2$$reg),
11434               Assembler::LSR,
11435               $src3$$constant & 0x3f);
11436   %}
11437 
11438   ins_pipe(ialu_reg_reg_shift);
11439 %}
11440 
11441 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11442                          iRegIorL2I src1, iRegIorL2I src2,
11443                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11444   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11445   ins_cost(1.9 * INSN_COST);
11446   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11447 
11448   ins_encode %{
11449     __ eonw(as_Register($dst$$reg),
11450               as_Register($src1$$reg),
11451               as_Register($src2$$reg),
11452               Assembler::ASR,
11453               $src3$$constant & 0x1f);
11454   %}
11455 
11456   ins_pipe(ialu_reg_reg_shift);
11457 %}
11458 
11459 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11460                          iRegL src1, iRegL src2,
11461                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11462   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11463   ins_cost(1.9 * INSN_COST);
11464   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11465 
11466   ins_encode %{
11467     __ eon(as_Register($dst$$reg),
11468               as_Register($src1$$reg),
11469               as_Register($src2$$reg),
11470               Assembler::ASR,
11471               $src3$$constant & 0x3f);
11472   %}
11473 
11474   ins_pipe(ialu_reg_reg_shift);
11475 %}
11476 
11477 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11478                          iRegIorL2I src1, iRegIorL2I src2,
11479                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11480   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11481   ins_cost(1.9 * INSN_COST);
11482   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11483 
11484   ins_encode %{
11485     __ eonw(as_Register($dst$$reg),
11486               as_Register($src1$$reg),
11487               as_Register($src2$$reg),
11488               Assembler::LSL,
11489               $src3$$constant & 0x1f);
11490   %}
11491 
11492   ins_pipe(ialu_reg_reg_shift);
11493 %}
11494 
11495 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11496                          iRegL src1, iRegL src2,
11497                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11498   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11499   ins_cost(1.9 * INSN_COST);
11500   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11501 
11502   ins_encode %{
11503     __ eon(as_Register($dst$$reg),
11504               as_Register($src1$$reg),
11505               as_Register($src2$$reg),
11506               Assembler::LSL,
11507               $src3$$constant & 0x3f);
11508   %}
11509 
11510   ins_pipe(ialu_reg_reg_shift);
11511 %}
11512 
11513 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11514                          iRegIorL2I src1, iRegIorL2I src2,
11515                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11516   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11517   ins_cost(1.9 * INSN_COST);
11518   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11519 
11520   ins_encode %{
11521     __ ornw(as_Register($dst$$reg),
11522               as_Register($src1$$reg),
11523               as_Register($src2$$reg),
11524               Assembler::LSR,
11525               $src3$$constant & 0x1f);
11526   %}
11527 
11528   ins_pipe(ialu_reg_reg_shift);
11529 %}
11530 
11531 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11532                          iRegL src1, iRegL src2,
11533                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11534   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11535   ins_cost(1.9 * INSN_COST);
11536   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11537 
11538   ins_encode %{
11539     __ orn(as_Register($dst$$reg),
11540               as_Register($src1$$reg),
11541               as_Register($src2$$reg),
11542               Assembler::LSR,
11543               $src3$$constant & 0x3f);
11544   %}
11545 
11546   ins_pipe(ialu_reg_reg_shift);
11547 %}
11548 
11549 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11550                          iRegIorL2I src1, iRegIorL2I src2,
11551                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11552   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11553   ins_cost(1.9 * INSN_COST);
11554   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11555 
11556   ins_encode %{
11557     __ ornw(as_Register($dst$$reg),
11558               as_Register($src1$$reg),
11559               as_Register($src2$$reg),
11560               Assembler::ASR,
11561               $src3$$constant & 0x1f);
11562   %}
11563 
11564   ins_pipe(ialu_reg_reg_shift);
11565 %}
11566 
11567 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11568                          iRegL src1, iRegL src2,
11569                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11570   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11571   ins_cost(1.9 * INSN_COST);
11572   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11573 
11574   ins_encode %{
11575     __ orn(as_Register($dst$$reg),
11576               as_Register($src1$$reg),
11577               as_Register($src2$$reg),
11578               Assembler::ASR,
11579               $src3$$constant & 0x3f);
11580   %}
11581 
11582   ins_pipe(ialu_reg_reg_shift);
11583 %}
11584 
11585 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11586                          iRegIorL2I src1, iRegIorL2I src2,
11587                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11588   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11589   ins_cost(1.9 * INSN_COST);
11590   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11591 
11592   ins_encode %{
11593     __ ornw(as_Register($dst$$reg),
11594               as_Register($src1$$reg),
11595               as_Register($src2$$reg),
11596               Assembler::LSL,
11597               $src3$$constant & 0x1f);
11598   %}
11599 
11600   ins_pipe(ialu_reg_reg_shift);
11601 %}
11602 
11603 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11604                          iRegL src1, iRegL src2,
11605                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11606   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11607   ins_cost(1.9 * INSN_COST);
11608   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11609 
11610   ins_encode %{
11611     __ orn(as_Register($dst$$reg),
11612               as_Register($src1$$reg),
11613               as_Register($src2$$reg),
11614               Assembler::LSL,
11615               $src3$$constant & 0x3f);
11616   %}
11617 
11618   ins_pipe(ialu_reg_reg_shift);
11619 %}
11620 
11621 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11622                          iRegIorL2I src1, iRegIorL2I src2,
11623                          immI src3, rFlagsReg cr) %{
11624   match(Set dst (AndI src1 (URShiftI src2 src3)));
11625 
11626   ins_cost(1.9 * INSN_COST);
11627   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11628 
11629   ins_encode %{
11630     __ andw(as_Register($dst$$reg),
11631               as_Register($src1$$reg),
11632               as_Register($src2$$reg),
11633               Assembler::LSR,
11634               $src3$$constant & 0x1f);
11635   %}
11636 
11637   ins_pipe(ialu_reg_reg_shift);
11638 %}
11639 
11640 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11641                          iRegL src1, iRegL src2,
11642                          immI src3, rFlagsReg cr) %{
11643   match(Set dst (AndL src1 (URShiftL src2 src3)));
11644 
11645   ins_cost(1.9 * INSN_COST);
11646   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11647 
11648   ins_encode %{
11649     __ andr(as_Register($dst$$reg),
11650               as_Register($src1$$reg),
11651               as_Register($src2$$reg),
11652               Assembler::LSR,
11653               $src3$$constant & 0x3f);
11654   %}
11655 
11656   ins_pipe(ialu_reg_reg_shift);
11657 %}
11658 
11659 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11660                          iRegIorL2I src1, iRegIorL2I src2,
11661                          immI src3, rFlagsReg cr) %{
11662   match(Set dst (AndI src1 (RShiftI src2 src3)));
11663 
11664   ins_cost(1.9 * INSN_COST);
11665   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11666 
11667   ins_encode %{
11668     __ andw(as_Register($dst$$reg),
11669               as_Register($src1$$reg),
11670               as_Register($src2$$reg),
11671               Assembler::ASR,
11672               $src3$$constant & 0x1f);
11673   %}
11674 
11675   ins_pipe(ialu_reg_reg_shift);
11676 %}
11677 
11678 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11679                          iRegL src1, iRegL src2,
11680                          immI src3, rFlagsReg cr) %{
11681   match(Set dst (AndL src1 (RShiftL src2 src3)));
11682 
11683   ins_cost(1.9 * INSN_COST);
11684   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11685 
11686   ins_encode %{
11687     __ andr(as_Register($dst$$reg),
11688               as_Register($src1$$reg),
11689               as_Register($src2$$reg),
11690               Assembler::ASR,
11691               $src3$$constant & 0x3f);
11692   %}
11693 
11694   ins_pipe(ialu_reg_reg_shift);
11695 %}
11696 
11697 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11698                          iRegIorL2I src1, iRegIorL2I src2,
11699                          immI src3, rFlagsReg cr) %{
11700   match(Set dst (AndI src1 (LShiftI src2 src3)));
11701 
11702   ins_cost(1.9 * INSN_COST);
11703   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11704 
11705   ins_encode %{
11706     __ andw(as_Register($dst$$reg),
11707               as_Register($src1$$reg),
11708               as_Register($src2$$reg),
11709               Assembler::LSL,
11710               $src3$$constant & 0x1f);
11711   %}
11712 
11713   ins_pipe(ialu_reg_reg_shift);
11714 %}
11715 
11716 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11717                          iRegL src1, iRegL src2,
11718                          immI src3, rFlagsReg cr) %{
11719   match(Set dst (AndL src1 (LShiftL src2 src3)));
11720 
11721   ins_cost(1.9 * INSN_COST);
11722   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11723 
11724   ins_encode %{
11725     __ andr(as_Register($dst$$reg),
11726               as_Register($src1$$reg),
11727               as_Register($src2$$reg),
11728               Assembler::LSL,
11729               $src3$$constant & 0x3f);
11730   %}
11731 
11732   ins_pipe(ialu_reg_reg_shift);
11733 %}
11734 
11735 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11736                          iRegIorL2I src1, iRegIorL2I src2,
11737                          immI src3, rFlagsReg cr) %{
11738   match(Set dst (XorI src1 (URShiftI src2 src3)));
11739 
11740   ins_cost(1.9 * INSN_COST);
11741   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11742 
11743   ins_encode %{
11744     __ eorw(as_Register($dst$$reg),
11745               as_Register($src1$$reg),
11746               as_Register($src2$$reg),
11747               Assembler::LSR,
11748               $src3$$constant & 0x1f);
11749   %}
11750 
11751   ins_pipe(ialu_reg_reg_shift);
11752 %}
11753 
11754 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11755                          iRegL src1, iRegL src2,
11756                          immI src3, rFlagsReg cr) %{
11757   match(Set dst (XorL src1 (URShiftL src2 src3)));
11758 
11759   ins_cost(1.9 * INSN_COST);
11760   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11761 
11762   ins_encode %{
11763     __ eor(as_Register($dst$$reg),
11764               as_Register($src1$$reg),
11765               as_Register($src2$$reg),
11766               Assembler::LSR,
11767               $src3$$constant & 0x3f);
11768   %}
11769 
11770   ins_pipe(ialu_reg_reg_shift);
11771 %}
11772 
11773 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11774                          iRegIorL2I src1, iRegIorL2I src2,
11775                          immI src3, rFlagsReg cr) %{
11776   match(Set dst (XorI src1 (RShiftI src2 src3)));
11777 
11778   ins_cost(1.9 * INSN_COST);
11779   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11780 
11781   ins_encode %{
11782     __ eorw(as_Register($dst$$reg),
11783               as_Register($src1$$reg),
11784               as_Register($src2$$reg),
11785               Assembler::ASR,
11786               $src3$$constant & 0x1f);
11787   %}
11788 
11789   ins_pipe(ialu_reg_reg_shift);
11790 %}
11791 
11792 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11793                          iRegL src1, iRegL src2,
11794                          immI src3, rFlagsReg cr) %{
11795   match(Set dst (XorL src1 (RShiftL src2 src3)));
11796 
11797   ins_cost(1.9 * INSN_COST);
11798   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11799 
11800   ins_encode %{
11801     __ eor(as_Register($dst$$reg),
11802               as_Register($src1$$reg),
11803               as_Register($src2$$reg),
11804               Assembler::ASR,
11805               $src3$$constant & 0x3f);
11806   %}
11807 
11808   ins_pipe(ialu_reg_reg_shift);
11809 %}
11810 
11811 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11812                          iRegIorL2I src1, iRegIorL2I src2,
11813                          immI src3, rFlagsReg cr) %{
11814   match(Set dst (XorI src1 (LShiftI src2 src3)));
11815 
11816   ins_cost(1.9 * INSN_COST);
11817   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11818 
11819   ins_encode %{
11820     __ eorw(as_Register($dst$$reg),
11821               as_Register($src1$$reg),
11822               as_Register($src2$$reg),
11823               Assembler::LSL,
11824               $src3$$constant & 0x1f);
11825   %}
11826 
11827   ins_pipe(ialu_reg_reg_shift);
11828 %}
11829 
11830 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11831                          iRegL src1, iRegL src2,
11832                          immI src3, rFlagsReg cr) %{
11833   match(Set dst (XorL src1 (LShiftL src2 src3)));
11834 
11835   ins_cost(1.9 * INSN_COST);
11836   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11837 
11838   ins_encode %{
11839     __ eor(as_Register($dst$$reg),
11840               as_Register($src1$$reg),
11841               as_Register($src2$$reg),
11842               Assembler::LSL,
11843               $src3$$constant & 0x3f);
11844   %}
11845 
11846   ins_pipe(ialu_reg_reg_shift);
11847 %}
11848 
11849 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11850                          iRegIorL2I src1, iRegIorL2I src2,
11851                          immI src3, rFlagsReg cr) %{
11852   match(Set dst (OrI src1 (URShiftI src2 src3)));
11853 
11854   ins_cost(1.9 * INSN_COST);
11855   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11856 
11857   ins_encode %{
11858     __ orrw(as_Register($dst$$reg),
11859               as_Register($src1$$reg),
11860               as_Register($src2$$reg),
11861               Assembler::LSR,
11862               $src3$$constant & 0x1f);
11863   %}
11864 
11865   ins_pipe(ialu_reg_reg_shift);
11866 %}
11867 
11868 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11869                          iRegL src1, iRegL src2,
11870                          immI src3, rFlagsReg cr) %{
11871   match(Set dst (OrL src1 (URShiftL src2 src3)));
11872 
11873   ins_cost(1.9 * INSN_COST);
11874   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11875 
11876   ins_encode %{
11877     __ orr(as_Register($dst$$reg),
11878               as_Register($src1$$reg),
11879               as_Register($src2$$reg),
11880               Assembler::LSR,
11881               $src3$$constant & 0x3f);
11882   %}
11883 
11884   ins_pipe(ialu_reg_reg_shift);
11885 %}
11886 
11887 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11888                          iRegIorL2I src1, iRegIorL2I src2,
11889                          immI src3, rFlagsReg cr) %{
11890   match(Set dst (OrI src1 (RShiftI src2 src3)));
11891 
11892   ins_cost(1.9 * INSN_COST);
11893   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11894 
11895   ins_encode %{
11896     __ orrw(as_Register($dst$$reg),
11897               as_Register($src1$$reg),
11898               as_Register($src2$$reg),
11899               Assembler::ASR,
11900               $src3$$constant & 0x1f);
11901   %}
11902 
11903   ins_pipe(ialu_reg_reg_shift);
11904 %}
11905 
11906 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11907                          iRegL src1, iRegL src2,
11908                          immI src3, rFlagsReg cr) %{
11909   match(Set dst (OrL src1 (RShiftL src2 src3)));
11910 
11911   ins_cost(1.9 * INSN_COST);
11912   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11913 
11914   ins_encode %{
11915     __ orr(as_Register($dst$$reg),
11916               as_Register($src1$$reg),
11917               as_Register($src2$$reg),
11918               Assembler::ASR,
11919               $src3$$constant & 0x3f);
11920   %}
11921 
11922   ins_pipe(ialu_reg_reg_shift);
11923 %}
11924 
11925 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11926                          iRegIorL2I src1, iRegIorL2I src2,
11927                          immI src3, rFlagsReg cr) %{
11928   match(Set dst (OrI src1 (LShiftI src2 src3)));
11929 
11930   ins_cost(1.9 * INSN_COST);
11931   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11932 
11933   ins_encode %{
11934     __ orrw(as_Register($dst$$reg),
11935               as_Register($src1$$reg),
11936               as_Register($src2$$reg),
11937               Assembler::LSL,
11938               $src3$$constant & 0x1f);
11939   %}
11940 
11941   ins_pipe(ialu_reg_reg_shift);
11942 %}
11943 
11944 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11945                          iRegL src1, iRegL src2,
11946                          immI src3, rFlagsReg cr) %{
11947   match(Set dst (OrL src1 (LShiftL src2 src3)));
11948 
11949   ins_cost(1.9 * INSN_COST);
11950   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11951 
11952   ins_encode %{
11953     __ orr(as_Register($dst$$reg),
11954               as_Register($src1$$reg),
11955               as_Register($src2$$reg),
11956               Assembler::LSL,
11957               $src3$$constant & 0x3f);
11958   %}
11959 
11960   ins_pipe(ialu_reg_reg_shift);
11961 %}
11962 
11963 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11964                          iRegIorL2I src1, iRegIorL2I src2,
11965                          immI src3, rFlagsReg cr) %{
11966   match(Set dst (AddI src1 (URShiftI src2 src3)));
11967 
11968   ins_cost(1.9 * INSN_COST);
11969   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11970 
11971   ins_encode %{
11972     __ addw(as_Register($dst$$reg),
11973               as_Register($src1$$reg),
11974               as_Register($src2$$reg),
11975               Assembler::LSR,
11976               $src3$$constant & 0x1f);
11977   %}
11978 
11979   ins_pipe(ialu_reg_reg_shift);
11980 %}
11981 
11982 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11983                          iRegL src1, iRegL src2,
11984                          immI src3, rFlagsReg cr) %{
11985   match(Set dst (AddL src1 (URShiftL src2 src3)));
11986 
11987   ins_cost(1.9 * INSN_COST);
11988   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11989 
11990   ins_encode %{
11991     __ add(as_Register($dst$$reg),
11992               as_Register($src1$$reg),
11993               as_Register($src2$$reg),
11994               Assembler::LSR,
11995               $src3$$constant & 0x3f);
11996   %}
11997 
11998   ins_pipe(ialu_reg_reg_shift);
11999 %}
12000 
12001 instruct AddI_reg_RShift_reg(iRegINoSp dst,
12002                          iRegIorL2I src1, iRegIorL2I src2,
12003                          immI src3, rFlagsReg cr) %{
12004   match(Set dst (AddI src1 (RShiftI src2 src3)));
12005 
12006   ins_cost(1.9 * INSN_COST);
12007   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
12008 
12009   ins_encode %{
12010     __ addw(as_Register($dst$$reg),
12011               as_Register($src1$$reg),
12012               as_Register($src2$$reg),
12013               Assembler::ASR,
12014               $src3$$constant & 0x1f);
12015   %}
12016 
12017   ins_pipe(ialu_reg_reg_shift);
12018 %}
12019 
12020 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
12021                          iRegL src1, iRegL src2,
12022                          immI src3, rFlagsReg cr) %{
12023   match(Set dst (AddL src1 (RShiftL src2 src3)));
12024 
12025   ins_cost(1.9 * INSN_COST);
12026   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
12027 
12028   ins_encode %{
12029     __ add(as_Register($dst$$reg),
12030               as_Register($src1$$reg),
12031               as_Register($src2$$reg),
12032               Assembler::ASR,
12033               $src3$$constant & 0x3f);
12034   %}
12035 
12036   ins_pipe(ialu_reg_reg_shift);
12037 %}
12038 
12039 instruct AddI_reg_LShift_reg(iRegINoSp dst,
12040                          iRegIorL2I src1, iRegIorL2I src2,
12041                          immI src3, rFlagsReg cr) %{
12042   match(Set dst (AddI src1 (LShiftI src2 src3)));
12043 
12044   ins_cost(1.9 * INSN_COST);
12045   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
12046 
12047   ins_encode %{
12048     __ addw(as_Register($dst$$reg),
12049               as_Register($src1$$reg),
12050               as_Register($src2$$reg),
12051               Assembler::LSL,
12052               $src3$$constant & 0x1f);
12053   %}
12054 
12055   ins_pipe(ialu_reg_reg_shift);
12056 %}
12057 
12058 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
12059                          iRegL src1, iRegL src2,
12060                          immI src3, rFlagsReg cr) %{
12061   match(Set dst (AddL src1 (LShiftL src2 src3)));
12062 
12063   ins_cost(1.9 * INSN_COST);
12064   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
12065 
12066   ins_encode %{
12067     __ add(as_Register($dst$$reg),
12068               as_Register($src1$$reg),
12069               as_Register($src2$$reg),
12070               Assembler::LSL,
12071               $src3$$constant & 0x3f);
12072   %}
12073 
12074   ins_pipe(ialu_reg_reg_shift);
12075 %}
12076 
12077 instruct SubI_reg_URShift_reg(iRegINoSp dst,
12078                          iRegIorL2I src1, iRegIorL2I src2,
12079                          immI src3, rFlagsReg cr) %{
12080   match(Set dst (SubI src1 (URShiftI src2 src3)));
12081 
12082   ins_cost(1.9 * INSN_COST);
12083   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
12084 
12085   ins_encode %{
12086     __ subw(as_Register($dst$$reg),
12087               as_Register($src1$$reg),
12088               as_Register($src2$$reg),
12089               Assembler::LSR,
12090               $src3$$constant & 0x1f);
12091   %}
12092 
12093   ins_pipe(ialu_reg_reg_shift);
12094 %}
12095 
12096 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
12097                          iRegL src1, iRegL src2,
12098                          immI src3, rFlagsReg cr) %{
12099   match(Set dst (SubL src1 (URShiftL src2 src3)));
12100 
12101   ins_cost(1.9 * INSN_COST);
12102   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
12103 
12104   ins_encode %{
12105     __ sub(as_Register($dst$$reg),
12106               as_Register($src1$$reg),
12107               as_Register($src2$$reg),
12108               Assembler::LSR,
12109               $src3$$constant & 0x3f);
12110   %}
12111 
12112   ins_pipe(ialu_reg_reg_shift);
12113 %}
12114 
12115 instruct SubI_reg_RShift_reg(iRegINoSp dst,
12116                          iRegIorL2I src1, iRegIorL2I src2,
12117                          immI src3, rFlagsReg cr) %{
12118   match(Set dst (SubI src1 (RShiftI src2 src3)));
12119 
12120   ins_cost(1.9 * INSN_COST);
12121   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
12122 
12123   ins_encode %{
12124     __ subw(as_Register($dst$$reg),
12125               as_Register($src1$$reg),
12126               as_Register($src2$$reg),
12127               Assembler::ASR,
12128               $src3$$constant & 0x1f);
12129   %}
12130 
12131   ins_pipe(ialu_reg_reg_shift);
12132 %}
12133 
12134 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
12135                          iRegL src1, iRegL src2,
12136                          immI src3, rFlagsReg cr) %{
12137   match(Set dst (SubL src1 (RShiftL src2 src3)));
12138 
12139   ins_cost(1.9 * INSN_COST);
12140   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
12141 
12142   ins_encode %{
12143     __ sub(as_Register($dst$$reg),
12144               as_Register($src1$$reg),
12145               as_Register($src2$$reg),
12146               Assembler::ASR,
12147               $src3$$constant & 0x3f);
12148   %}
12149 
12150   ins_pipe(ialu_reg_reg_shift);
12151 %}
12152 
12153 instruct SubI_reg_LShift_reg(iRegINoSp dst,
12154                          iRegIorL2I src1, iRegIorL2I src2,
12155                          immI src3, rFlagsReg cr) %{
12156   match(Set dst (SubI src1 (LShiftI src2 src3)));
12157 
12158   ins_cost(1.9 * INSN_COST);
12159   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
12160 
12161   ins_encode %{
12162     __ subw(as_Register($dst$$reg),
12163               as_Register($src1$$reg),
12164               as_Register($src2$$reg),
12165               Assembler::LSL,
12166               $src3$$constant & 0x1f);
12167   %}
12168 
12169   ins_pipe(ialu_reg_reg_shift);
12170 %}
12171 
12172 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
12173                          iRegL src1, iRegL src2,
12174                          immI src3, rFlagsReg cr) %{
12175   match(Set dst (SubL src1 (LShiftL src2 src3)));
12176 
12177   ins_cost(1.9 * INSN_COST);
12178   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
12179 
12180   ins_encode %{
12181     __ sub(as_Register($dst$$reg),
12182               as_Register($src1$$reg),
12183               as_Register($src2$$reg),
12184               Assembler::LSL,
12185               $src3$$constant & 0x3f);
12186   %}
12187 
12188   ins_pipe(ialu_reg_reg_shift);
12189 %}
12190 
12191 
12192 
12193 // Shift Left followed by Shift Right.
12194 // This idiom is used by the compiler for the i2b bytecode etc.
12195 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12196 %{
12197   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
12198   ins_cost(INSN_COST * 2);
12199   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12200   ins_encode %{
12201     int lshift = $lshift_count$$constant & 63;
12202     int rshift = $rshift_count$$constant & 63;
12203     int s = 63 - lshift;
12204     int r = (rshift - lshift) & 63;
12205     __ sbfm(as_Register($dst$$reg),
12206             as_Register($src$$reg),
12207             r, s);
12208   %}
12209 
12210   ins_pipe(ialu_reg_shift);
12211 %}
12212 
12213 // Shift Left followed by Shift Right.
12214 // This idiom is used by the compiler for the i2b bytecode etc.
12215 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12216 %{
12217   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
12218   ins_cost(INSN_COST * 2);
12219   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12220   ins_encode %{
12221     int lshift = $lshift_count$$constant & 31;
12222     int rshift = $rshift_count$$constant & 31;
12223     int s = 31 - lshift;
12224     int r = (rshift - lshift) & 31;
12225     __ sbfmw(as_Register($dst$$reg),
12226             as_Register($src$$reg),
12227             r, s);
12228   %}
12229 
12230   ins_pipe(ialu_reg_shift);
12231 %}
12232 
12233 // Shift Left followed by Shift Right.
12234 // This idiom is used by the compiler for the i2b bytecode etc.
12235 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12236 %{
12237   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
12238   ins_cost(INSN_COST * 2);
12239   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12240   ins_encode %{
12241     int lshift = $lshift_count$$constant & 63;
12242     int rshift = $rshift_count$$constant & 63;
12243     int s = 63 - lshift;
12244     int r = (rshift - lshift) & 63;
12245     __ ubfm(as_Register($dst$$reg),
12246             as_Register($src$$reg),
12247             r, s);
12248   %}
12249 
12250   ins_pipe(ialu_reg_shift);
12251 %}
12252 
12253 // Shift Left followed by Shift Right.
12254 // This idiom is used by the compiler for the i2b bytecode etc.
12255 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12256 %{
12257   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
12258   ins_cost(INSN_COST * 2);
12259   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12260   ins_encode %{
12261     int lshift = $lshift_count$$constant & 31;
12262     int rshift = $rshift_count$$constant & 31;
12263     int s = 31 - lshift;
12264     int r = (rshift - lshift) & 31;
12265     __ ubfmw(as_Register($dst$$reg),
12266             as_Register($src$$reg),
12267             r, s);
12268   %}
12269 
12270   ins_pipe(ialu_reg_shift);
12271 %}
12272 // Bitfield extract with shift & mask
12273 
12274 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12275 %{
12276   match(Set dst (AndI (URShiftI src rshift) mask));
12277   // Make sure we are not going to exceed what ubfxw can do.
12278   predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12279 
12280   ins_cost(INSN_COST);
12281   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
12282   ins_encode %{
12283     int rshift = $rshift$$constant & 31;
12284     long mask = $mask$$constant;
12285     int width = exact_log2(mask+1);
12286     __ ubfxw(as_Register($dst$$reg),
12287             as_Register($src$$reg), rshift, width);
12288   %}
12289   ins_pipe(ialu_reg_shift);
12290 %}
12291 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
12292 %{
12293   match(Set dst (AndL (URShiftL src rshift) mask));
12294   // Make sure we are not going to exceed what ubfx can do.
12295   predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
12296 
12297   ins_cost(INSN_COST);
12298   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12299   ins_encode %{
12300     int rshift = $rshift$$constant & 63;
12301     long mask = $mask$$constant;
12302     int width = exact_log2_long(mask+1);
12303     __ ubfx(as_Register($dst$$reg),
12304             as_Register($src$$reg), rshift, width);
12305   %}
12306   ins_pipe(ialu_reg_shift);
12307 %}
12308 
12309 // We can use ubfx when extending an And with a mask when we know mask
12310 // is positive.  We know that because immI_bitmask guarantees it.
12311 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12312 %{
12313   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
12314   // Make sure we are not going to exceed what ubfxw can do.
12315   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12316 
12317   ins_cost(INSN_COST * 2);
12318   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12319   ins_encode %{
12320     int rshift = $rshift$$constant & 31;
12321     long mask = $mask$$constant;
12322     int width = exact_log2(mask+1);
12323     __ ubfx(as_Register($dst$$reg),
12324             as_Register($src$$reg), rshift, width);
12325   %}
12326   ins_pipe(ialu_reg_shift);
12327 %}
12328 
12329 // We can use ubfiz when masking by a positive number and then left shifting the result.
12330 // We know that the mask is positive because immI_bitmask guarantees it.
12331 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12332 %{
12333   match(Set dst (LShiftI (AndI src mask) lshift));
12334   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 31)) <= (31 + 1));
12335 
12336   ins_cost(INSN_COST);
12337   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12338   ins_encode %{
12339     int lshift = $lshift$$constant & 31;
12340     long mask = $mask$$constant;
12341     int width = exact_log2(mask+1);
12342     __ ubfizw(as_Register($dst$$reg),
12343           as_Register($src$$reg), lshift, width);
12344   %}
12345   ins_pipe(ialu_reg_shift);
12346 %}
12347 // We can use ubfiz when masking by a positive number and then left shifting the result.
12348 // We know that the mask is positive because immL_bitmask guarantees it.
12349 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
12350 %{
12351   match(Set dst (LShiftL (AndL src mask) lshift));
12352   predicate((exact_log2_long(n->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12353 
12354   ins_cost(INSN_COST);
12355   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12356   ins_encode %{
12357     int lshift = $lshift$$constant & 63;
12358     long mask = $mask$$constant;
12359     int width = exact_log2_long(mask+1);
12360     __ ubfiz(as_Register($dst$$reg),
12361           as_Register($src$$reg), lshift, width);
12362   %}
12363   ins_pipe(ialu_reg_shift);
12364 %}
12365 
12366 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
12367 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12368 %{
12369   match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
12370   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12371 
12372   ins_cost(INSN_COST);
12373   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12374   ins_encode %{
12375     int lshift = $lshift$$constant & 63;
12376     long mask = $mask$$constant;
12377     int width = exact_log2(mask+1);
12378     __ ubfiz(as_Register($dst$$reg),
12379              as_Register($src$$reg), lshift, width);
12380   %}
12381   ins_pipe(ialu_reg_shift);
12382 %}
12383 
12384 // Rotations
12385 
12386 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12387 %{
12388   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12389   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12390 
12391   ins_cost(INSN_COST);
12392   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12393 
12394   ins_encode %{
12395     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12396             $rshift$$constant & 63);
12397   %}
12398   ins_pipe(ialu_reg_reg_extr);
12399 %}
12400 
12401 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12402 %{
12403   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12404   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12405 
12406   ins_cost(INSN_COST);
12407   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12408 
12409   ins_encode %{
12410     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12411             $rshift$$constant & 31);
12412   %}
12413   ins_pipe(ialu_reg_reg_extr);
12414 %}
12415 
12416 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12417 %{
12418   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12419   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12420 
12421   ins_cost(INSN_COST);
12422   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12423 
12424   ins_encode %{
12425     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12426             $rshift$$constant & 63);
12427   %}
12428   ins_pipe(ialu_reg_reg_extr);
12429 %}
12430 
12431 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12432 %{
12433   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12434   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12435 
12436   ins_cost(INSN_COST);
12437   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12438 
12439   ins_encode %{
12440     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12441             $rshift$$constant & 31);
12442   %}
12443   ins_pipe(ialu_reg_reg_extr);
12444 %}
12445 
12446 
12447 // rol expander
12448 
12449 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12450 %{
12451   effect(DEF dst, USE src, USE shift);
12452 
12453   format %{ "rol    $dst, $src, $shift" %}
12454   ins_cost(INSN_COST * 3);
12455   ins_encode %{
12456     __ subw(rscratch1, zr, as_Register($shift$$reg));
12457     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12458             rscratch1);
12459     %}
12460   ins_pipe(ialu_reg_reg_vshift);
12461 %}
12462 
12463 // rol expander
12464 
12465 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12466 %{
12467   effect(DEF dst, USE src, USE shift);
12468 
12469   format %{ "rol    $dst, $src, $shift" %}
12470   ins_cost(INSN_COST * 3);
12471   ins_encode %{
12472     __ subw(rscratch1, zr, as_Register($shift$$reg));
12473     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12474             rscratch1);
12475     %}
12476   ins_pipe(ialu_reg_reg_vshift);
12477 %}
12478 
12479 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12480 %{
12481   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
12482 
12483   expand %{
12484     rolL_rReg(dst, src, shift, cr);
12485   %}
12486 %}
12487 
12488 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12489 %{
12490   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
12491 
12492   expand %{
12493     rolL_rReg(dst, src, shift, cr);
12494   %}
12495 %}
12496 
12497 instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12498 %{
12499   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
12500 
12501   expand %{
12502     rolI_rReg(dst, src, shift, cr);
12503   %}
12504 %}
12505 
12506 instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12507 %{
12508   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
12509 
12510   expand %{
12511     rolI_rReg(dst, src, shift, cr);
12512   %}
12513 %}
12514 
12515 // ror expander
12516 
12517 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12518 %{
12519   effect(DEF dst, USE src, USE shift);
12520 
12521   format %{ "ror    $dst, $src, $shift" %}
12522   ins_cost(INSN_COST);
12523   ins_encode %{
12524     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12525             as_Register($shift$$reg));
12526     %}
12527   ins_pipe(ialu_reg_reg_vshift);
12528 %}
12529 
12530 // ror expander
12531 
12532 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12533 %{
12534   effect(DEF dst, USE src, USE shift);
12535 
12536   format %{ "ror    $dst, $src, $shift" %}
12537   ins_cost(INSN_COST);
12538   ins_encode %{
12539     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12540             as_Register($shift$$reg));
12541     %}
12542   ins_pipe(ialu_reg_reg_vshift);
12543 %}
12544 
12545 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12546 %{
12547   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
12548 
12549   expand %{
12550     rorL_rReg(dst, src, shift, cr);
12551   %}
12552 %}
12553 
12554 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12555 %{
12556   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
12557 
12558   expand %{
12559     rorL_rReg(dst, src, shift, cr);
12560   %}
12561 %}
12562 
12563 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12564 %{
12565   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
12566 
12567   expand %{
12568     rorI_rReg(dst, src, shift, cr);
12569   %}
12570 %}
12571 
12572 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12573 %{
12574   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
12575 
12576   expand %{
12577     rorI_rReg(dst, src, shift, cr);
12578   %}
12579 %}
12580 
12581 // Add/subtract (extended)
12582 
12583 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12584 %{
12585   match(Set dst (AddL src1 (ConvI2L src2)));
12586   ins_cost(INSN_COST);
12587   format %{ "add  $dst, $src1, $src2, sxtw" %}
12588 
12589    ins_encode %{
12590      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12591             as_Register($src2$$reg), ext::sxtw);
12592    %}
12593   ins_pipe(ialu_reg_reg);
12594 %};
12595 
12596 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12597 %{
12598   match(Set dst (SubL src1 (ConvI2L src2)));
12599   ins_cost(INSN_COST);
12600   format %{ "sub  $dst, $src1, $src2, sxtw" %}
12601 
12602    ins_encode %{
12603      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12604             as_Register($src2$$reg), ext::sxtw);
12605    %}
12606   ins_pipe(ialu_reg_reg);
12607 %};
12608 
12609 
12610 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12611 %{
12612   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12613   ins_cost(INSN_COST);
12614   format %{ "add  $dst, $src1, $src2, sxth" %}
12615 
12616    ins_encode %{
12617      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12618             as_Register($src2$$reg), ext::sxth);
12619    %}
12620   ins_pipe(ialu_reg_reg);
12621 %}
12622 
12623 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12624 %{
12625   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12626   ins_cost(INSN_COST);
12627   format %{ "add  $dst, $src1, $src2, sxtb" %}
12628 
12629    ins_encode %{
12630      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12631             as_Register($src2$$reg), ext::sxtb);
12632    %}
12633   ins_pipe(ialu_reg_reg);
12634 %}
12635 
12636 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12637 %{
12638   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12639   ins_cost(INSN_COST);
12640   format %{ "add  $dst, $src1, $src2, uxtb" %}
12641 
12642    ins_encode %{
12643      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12644             as_Register($src2$$reg), ext::uxtb);
12645    %}
12646   ins_pipe(ialu_reg_reg);
12647 %}
12648 
12649 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12650 %{
12651   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12652   ins_cost(INSN_COST);
12653   format %{ "add  $dst, $src1, $src2, sxth" %}
12654 
12655    ins_encode %{
12656      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12657             as_Register($src2$$reg), ext::sxth);
12658    %}
12659   ins_pipe(ialu_reg_reg);
12660 %}
12661 
12662 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12663 %{
12664   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12665   ins_cost(INSN_COST);
12666   format %{ "add  $dst, $src1, $src2, sxtw" %}
12667 
12668    ins_encode %{
12669      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12670             as_Register($src2$$reg), ext::sxtw);
12671    %}
12672   ins_pipe(ialu_reg_reg);
12673 %}
12674 
12675 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12676 %{
12677   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12678   ins_cost(INSN_COST);
12679   format %{ "add  $dst, $src1, $src2, sxtb" %}
12680 
12681    ins_encode %{
12682      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12683             as_Register($src2$$reg), ext::sxtb);
12684    %}
12685   ins_pipe(ialu_reg_reg);
12686 %}
12687 
12688 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12689 %{
12690   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
12691   ins_cost(INSN_COST);
12692   format %{ "add  $dst, $src1, $src2, uxtb" %}
12693 
12694    ins_encode %{
12695      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12696             as_Register($src2$$reg), ext::uxtb);
12697    %}
12698   ins_pipe(ialu_reg_reg);
12699 %}
12700 
12701 
12702 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12703 %{
12704   match(Set dst (AddI src1 (AndI src2 mask)));
12705   ins_cost(INSN_COST);
12706   format %{ "addw  $dst, $src1, $src2, uxtb" %}
12707 
12708    ins_encode %{
12709      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12710             as_Register($src2$$reg), ext::uxtb);
12711    %}
12712   ins_pipe(ialu_reg_reg);
12713 %}
12714 
12715 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12716 %{
12717   match(Set dst (AddI src1 (AndI src2 mask)));
12718   ins_cost(INSN_COST);
12719   format %{ "addw  $dst, $src1, $src2, uxth" %}
12720 
12721    ins_encode %{
12722      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12723             as_Register($src2$$reg), ext::uxth);
12724    %}
12725   ins_pipe(ialu_reg_reg);
12726 %}
12727 
12728 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12729 %{
12730   match(Set dst (AddL src1 (AndL src2 mask)));
12731   ins_cost(INSN_COST);
12732   format %{ "add  $dst, $src1, $src2, uxtb" %}
12733 
12734    ins_encode %{
12735      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12736             as_Register($src2$$reg), ext::uxtb);
12737    %}
12738   ins_pipe(ialu_reg_reg);
12739 %}
12740 
12741 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12742 %{
12743   match(Set dst (AddL src1 (AndL src2 mask)));
12744   ins_cost(INSN_COST);
12745   format %{ "add  $dst, $src1, $src2, uxth" %}
12746 
12747    ins_encode %{
12748      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12749             as_Register($src2$$reg), ext::uxth);
12750    %}
12751   ins_pipe(ialu_reg_reg);
12752 %}
12753 
12754 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12755 %{
12756   match(Set dst (AddL src1 (AndL src2 mask)));
12757   ins_cost(INSN_COST);
12758   format %{ "add  $dst, $src1, $src2, uxtw" %}
12759 
12760    ins_encode %{
12761      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12762             as_Register($src2$$reg), ext::uxtw);
12763    %}
12764   ins_pipe(ialu_reg_reg);
12765 %}
12766 
12767 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12768 %{
12769   match(Set dst (SubI src1 (AndI src2 mask)));
12770   ins_cost(INSN_COST);
12771   format %{ "subw  $dst, $src1, $src2, uxtb" %}
12772 
12773    ins_encode %{
12774      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12775             as_Register($src2$$reg), ext::uxtb);
12776    %}
12777   ins_pipe(ialu_reg_reg);
12778 %}
12779 
12780 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12781 %{
12782   match(Set dst (SubI src1 (AndI src2 mask)));
12783   ins_cost(INSN_COST);
12784   format %{ "subw  $dst, $src1, $src2, uxth" %}
12785 
12786    ins_encode %{
12787      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12788             as_Register($src2$$reg), ext::uxth);
12789    %}
12790   ins_pipe(ialu_reg_reg);
12791 %}
12792 
12793 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12794 %{
12795   match(Set dst (SubL src1 (AndL src2 mask)));
12796   ins_cost(INSN_COST);
12797   format %{ "sub  $dst, $src1, $src2, uxtb" %}
12798 
12799    ins_encode %{
12800      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12801             as_Register($src2$$reg), ext::uxtb);
12802    %}
12803   ins_pipe(ialu_reg_reg);
12804 %}
12805 
12806 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12807 %{
12808   match(Set dst (SubL src1 (AndL src2 mask)));
12809   ins_cost(INSN_COST);
12810   format %{ "sub  $dst, $src1, $src2, uxth" %}
12811 
12812    ins_encode %{
12813      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12814             as_Register($src2$$reg), ext::uxth);
12815    %}
12816   ins_pipe(ialu_reg_reg);
12817 %}
12818 
12819 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12820 %{
12821   match(Set dst (SubL src1 (AndL src2 mask)));
12822   ins_cost(INSN_COST);
12823   format %{ "sub  $dst, $src1, $src2, uxtw" %}
12824 
12825    ins_encode %{
12826      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12827             as_Register($src2$$reg), ext::uxtw);
12828    %}
12829   ins_pipe(ialu_reg_reg);
12830 %}
12831 
12832 
12833 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12834 %{
12835   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12836   ins_cost(1.9 * INSN_COST);
12837   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
12838 
12839    ins_encode %{
12840      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12841             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12842    %}
12843   ins_pipe(ialu_reg_reg_shift);
12844 %}
12845 
12846 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12847 %{
12848   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12849   ins_cost(1.9 * INSN_COST);
12850   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
12851 
12852    ins_encode %{
12853      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12854             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12855    %}
12856   ins_pipe(ialu_reg_reg_shift);
12857 %}
12858 
12859 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12860 %{
12861   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12862   ins_cost(1.9 * INSN_COST);
12863   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
12864 
12865    ins_encode %{
12866      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12867             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12868    %}
12869   ins_pipe(ialu_reg_reg_shift);
12870 %}
12871 
12872 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12873 %{
12874   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12875   ins_cost(1.9 * INSN_COST);
12876   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
12877 
12878    ins_encode %{
12879      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12880             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12881    %}
12882   ins_pipe(ialu_reg_reg_shift);
12883 %}
12884 
12885 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12886 %{
12887   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12888   ins_cost(1.9 * INSN_COST);
12889   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
12890 
12891    ins_encode %{
12892      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12893             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12894    %}
12895   ins_pipe(ialu_reg_reg_shift);
12896 %}
12897 
12898 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12899 %{
12900   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12901   ins_cost(1.9 * INSN_COST);
12902   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
12903 
12904    ins_encode %{
12905      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12906             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12907    %}
12908   ins_pipe(ialu_reg_reg_shift);
12909 %}
12910 
12911 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12912 %{
12913   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12914   ins_cost(1.9 * INSN_COST);
12915   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
12916 
12917    ins_encode %{
12918      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12919             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12920    %}
12921   ins_pipe(ialu_reg_reg_shift);
12922 %}
12923 
12924 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12925 %{
12926   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12927   ins_cost(1.9 * INSN_COST);
12928   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
12929 
12930    ins_encode %{
12931      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12932             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12933    %}
12934   ins_pipe(ialu_reg_reg_shift);
12935 %}
12936 
12937 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12938 %{
12939   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12940   ins_cost(1.9 * INSN_COST);
12941   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
12942 
12943    ins_encode %{
12944      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12945             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12946    %}
12947   ins_pipe(ialu_reg_reg_shift);
12948 %}
12949 
12950 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12951 %{
12952   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12953   ins_cost(1.9 * INSN_COST);
12954   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
12955 
12956    ins_encode %{
12957      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12958             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12959    %}
12960   ins_pipe(ialu_reg_reg_shift);
12961 %}
12962 
12963 
12964 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12965 %{
12966   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
12967   ins_cost(1.9 * INSN_COST);
12968   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
12969 
12970    ins_encode %{
12971      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12972             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12973    %}
12974   ins_pipe(ialu_reg_reg_shift);
12975 %};
12976 
12977 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12978 %{
12979   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
12980   ins_cost(1.9 * INSN_COST);
12981   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
12982 
12983    ins_encode %{
12984      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12985             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12986    %}
12987   ins_pipe(ialu_reg_reg_shift);
12988 %};
12989 
12990 
12991 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
12992 %{
12993   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12994   ins_cost(1.9 * INSN_COST);
12995   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
12996 
12997    ins_encode %{
12998      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12999             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13000    %}
13001   ins_pipe(ialu_reg_reg_shift);
13002 %}
13003 
13004 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13005 %{
13006   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13007   ins_cost(1.9 * INSN_COST);
13008   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
13009 
13010    ins_encode %{
13011      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13012             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13013    %}
13014   ins_pipe(ialu_reg_reg_shift);
13015 %}
13016 
13017 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13018 %{
13019   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13020   ins_cost(1.9 * INSN_COST);
13021   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
13022 
13023    ins_encode %{
13024      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13025             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13026    %}
13027   ins_pipe(ialu_reg_reg_shift);
13028 %}
13029 
13030 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13031 %{
13032   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13033   ins_cost(1.9 * INSN_COST);
13034   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
13035 
13036    ins_encode %{
13037      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13038             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13039    %}
13040   ins_pipe(ialu_reg_reg_shift);
13041 %}
13042 
13043 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13044 %{
13045   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13046   ins_cost(1.9 * INSN_COST);
13047   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
13048 
13049    ins_encode %{
13050      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13051             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13052    %}
13053   ins_pipe(ialu_reg_reg_shift);
13054 %}
13055 
13056 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13057 %{
13058   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13059   ins_cost(1.9 * INSN_COST);
13060   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
13061 
13062    ins_encode %{
13063      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13064             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13065    %}
13066   ins_pipe(ialu_reg_reg_shift);
13067 %}
13068 
13069 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13070 %{
13071   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13072   ins_cost(1.9 * INSN_COST);
13073   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
13074 
13075    ins_encode %{
13076      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13077             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13078    %}
13079   ins_pipe(ialu_reg_reg_shift);
13080 %}
13081 
13082 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13083 %{
13084   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13085   ins_cost(1.9 * INSN_COST);
13086   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
13087 
13088    ins_encode %{
13089      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13090             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13091    %}
13092   ins_pipe(ialu_reg_reg_shift);
13093 %}
13094 
13095 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13096 %{
13097   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13098   ins_cost(1.9 * INSN_COST);
13099   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
13100 
13101    ins_encode %{
13102      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13103             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13104    %}
13105   ins_pipe(ialu_reg_reg_shift);
13106 %}
13107 
13108 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13109 %{
13110   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13111   ins_cost(1.9 * INSN_COST);
13112   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
13113 
13114    ins_encode %{
13115      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13116             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13117    %}
13118   ins_pipe(ialu_reg_reg_shift);
13119 %}
13120 
13121 instruct reduce_and8B(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
13122 %{
13123   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
13124   match(Set dst (AndReductionV src1 src2));
13125   ins_cost(INSN_COST);
13126   effect(TEMP_DEF dst, TEMP tmp);
13127   format %{ "umov   $tmp, $src2, S, 0\n\t"
13128             "umov   $dst, $src2, S, 1\n\t"
13129             "andw   $dst, $dst, $tmp\n\t"
13130             "andw   $dst, $dst, $dst, LSR #16\n\t"
13131             "andw   $dst, $dst, $dst, LSR #8\n\t"
13132             "andw   $dst, $src1, $dst\n\t"
13133             "sxtb   $dst, $dst\t and reduction8B"
13134   %}
13135   ins_encode %{
13136     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
13137     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ S, 1);
13138     __ andw($dst$$Register, $dst$$Register, $tmp$$Register);
13139     __ andw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
13140     __ andw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 8);
13141     __ andw($dst$$Register, $src1$$Register, $dst$$Register);
13142     __ sxtb($dst$$Register, $dst$$Register);
13143   %}
13144   ins_pipe(pipe_class_default);
13145 %}
13146 
13147 instruct reduce_orr8B(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
13148 %{
13149   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
13150   match(Set dst (OrReductionV src1 src2));
13151   ins_cost(INSN_COST);
13152   effect(TEMP_DEF dst, TEMP tmp);
13153   format %{ "umov   $tmp, $src2, S, 0\n\t"
13154             "umov   $dst, $src2, S, 1\n\t"
13155             "orrw   $dst, $dst, $tmp\n\t"
13156             "orrw   $dst, $dst, $dst, LSR #16\n\t"
13157             "orrw   $dst, $dst, $dst, LSR #8\n\t"
13158             "orrw   $dst, $src1, $dst\n\t"
13159             "sxtb   $dst, $dst\t orr reduction8B"
13160   %}
13161   ins_encode %{
13162     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
13163     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ S, 1);
13164     __ orrw($dst$$Register, $dst$$Register, $tmp$$Register);
13165     __ orrw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
13166     __ orrw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 8);
13167     __ orrw($dst$$Register, $src1$$Register, $dst$$Register);
13168     __ sxtb($dst$$Register, $dst$$Register);
13169   %}
13170   ins_pipe(pipe_class_default);
13171 %}
13172 
13173 instruct reduce_eor8B(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
13174 %{
13175   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
13176   match(Set dst (XorReductionV src1 src2));
13177   ins_cost(INSN_COST);
13178   effect(TEMP_DEF dst, TEMP tmp);
13179   format %{ "umov   $tmp, $src2, S, 0\n\t"
13180             "umov   $dst, $src2, S, 1\n\t"
13181             "eorw   $dst, $dst, $tmp\n\t"
13182             "eorw   $dst, $dst, $dst, LSR #16\n\t"
13183             "eorw   $dst, $dst, $dst, LSR #8\n\t"
13184             "eorw   $dst, $src1, $dst\n\t"
13185             "sxtb   $dst, $dst\t eor reduction8B"
13186   %}
13187   ins_encode %{
13188     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
13189     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ S, 1);
13190     __ eorw($dst$$Register, $dst$$Register, $tmp$$Register);
13191     __ eorw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
13192     __ eorw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 8);
13193     __ eorw($dst$$Register, $src1$$Register, $dst$$Register);
13194     __ sxtb($dst$$Register, $dst$$Register);
13195   %}
13196   ins_pipe(pipe_class_default);
13197 %}
13198 
13199 instruct reduce_and16B(iRegINoSp dst, iRegIorL2I src1, vecX src2, iRegINoSp tmp)
13200 %{
13201   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
13202   match(Set dst (AndReductionV src1 src2));
13203   ins_cost(INSN_COST);
13204   effect(TEMP_DEF dst, TEMP tmp);
13205   format %{ "umov   $tmp, $src2, D, 0\n\t"
13206             "umov   $dst, $src2, D, 1\n\t"
13207             "andr   $dst, $dst, $tmp\n\t"
13208             "andr   $dst, $dst, $dst, LSR #32\n\t"
13209             "andw   $dst, $dst, $dst, LSR #16\n\t"
13210             "andw   $dst, $dst, $dst, LSR #8\n\t"
13211             "andw   $dst, $src1, $dst\n\t"
13212             "sxtb   $dst, $dst\t and reduction16B"
13213   %}
13214   ins_encode %{
13215     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
13216     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ D, 1);
13217     __ andr($dst$$Register, $dst$$Register, $tmp$$Register);
13218     __ andr($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
13219     __ andw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
13220     __ andw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 8);
13221     __ andw($dst$$Register, $src1$$Register, $dst$$Register);
13222     __ sxtb($dst$$Register, $dst$$Register);
13223   %}
13224   ins_pipe(pipe_class_default);
13225 %}
13226 
13227 instruct reduce_orr16B(iRegINoSp dst, iRegIorL2I src1, vecX src2, iRegINoSp tmp)
13228 %{
13229   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
13230   match(Set dst (OrReductionV src1 src2));
13231   ins_cost(INSN_COST);
13232   effect(TEMP_DEF dst, TEMP tmp);
13233   format %{ "umov   $tmp, $src2, D, 0\n\t"
13234             "umov   $dst, $src2, D, 1\n\t"
13235             "orr    $dst, $dst, $tmp\n\t"
13236             "orr    $dst, $dst, $dst, LSR #32\n\t"
13237             "orrw   $dst, $dst, $dst, LSR #16\n\t"
13238             "orrw   $dst, $dst, $dst, LSR #8\n\t"
13239             "orrw   $dst, $src1, $dst\n\t"
13240             "sxtb   $dst, $dst\t orr reduction16B"
13241   %}
13242   ins_encode %{
13243     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
13244     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ D, 1);
13245     __ orr ($dst$$Register, $dst$$Register, $tmp$$Register);
13246     __ orr ($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
13247     __ orrw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
13248     __ orrw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 8);
13249     __ orrw($dst$$Register, $src1$$Register, $dst$$Register);
13250     __ sxtb($dst$$Register, $dst$$Register);
13251   %}
13252   ins_pipe(pipe_class_default);
13253 %}
13254 
13255 instruct reduce_eor16B(iRegINoSp dst, iRegIorL2I src1, vecX src2, iRegINoSp tmp)
13256 %{
13257   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
13258   match(Set dst (XorReductionV src1 src2));
13259   ins_cost(INSN_COST);
13260   effect(TEMP_DEF dst, TEMP tmp);
13261   format %{ "umov   $tmp, $src2, D, 0\n\t"
13262             "umov   $dst, $src2, D, 1\n\t"
13263             "eor    $dst, $dst, $tmp\n\t"
13264             "eor    $dst, $dst, $dst, LSR #32\n\t"
13265             "eorw   $dst, $dst, $dst, LSR #16\n\t"
13266             "eorw   $dst, $dst, $dst, LSR #8\n\t"
13267             "eorw   $dst, $src1, $dst\n\t"
13268             "sxtb   $dst, $dst\t eor reduction16B"
13269   %}
13270   ins_encode %{
13271     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
13272     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ D, 1);
13273     __ eor ($dst$$Register, $dst$$Register, $tmp$$Register);
13274     __ eor ($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
13275     __ eorw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
13276     __ eorw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 8);
13277     __ eorw($dst$$Register, $src1$$Register, $dst$$Register);
13278     __ sxtb($dst$$Register, $dst$$Register);
13279   %}
13280   ins_pipe(pipe_class_default);
13281 %}
13282 
13283 instruct reduce_and4S(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
13284 %{
13285   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
13286   match(Set dst (AndReductionV src1 src2));
13287   ins_cost(INSN_COST);
13288   effect(TEMP_DEF dst, TEMP tmp);
13289   format %{ "umov   $tmp, $src2, S, 0\n\t"
13290             "umov   $dst, $src2, S, 1\n\t"
13291             "andw   $dst, $dst, $tmp\n\t"
13292             "andw   $dst, $dst, $dst, LSR #16\n\t"
13293             "andw   $dst, $src1, $dst\n\t"
13294             "sxth   $dst, $dst\t and reduction4S"
13295   %}
13296   ins_encode %{
13297     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
13298     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ S, 1);
13299     __ andw($dst$$Register, $dst$$Register, $tmp$$Register);
13300     __ andw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
13301     __ andw($dst$$Register, $src1$$Register, $dst$$Register);
13302     __ sxth($dst$$Register, $dst$$Register);
13303   %}
13304   ins_pipe(pipe_class_default);
13305 %}
13306 
13307 instruct reduce_orr4S(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
13308 %{
13309   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
13310   match(Set dst (OrReductionV src1 src2));
13311   ins_cost(INSN_COST);
13312   effect(TEMP_DEF dst, TEMP tmp);
13313   format %{ "umov   $tmp, $src2, S, 0\n\t"
13314             "umov   $dst, $src2, S, 1\n\t"
13315             "orrw   $dst, $dst, $tmp\n\t"
13316             "orrw   $dst, $dst, $dst, LSR #16\n\t"
13317             "orrw   $dst, $src1, $dst\n\t"
13318             "sxth   $dst, $dst\t orr reduction4S"
13319   %}
13320   ins_encode %{
13321     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
13322     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ S, 1);
13323     __ orrw($dst$$Register, $dst$$Register, $tmp$$Register);
13324     __ orrw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
13325     __ orrw($dst$$Register, $src1$$Register, $dst$$Register);
13326     __ sxth($dst$$Register, $dst$$Register);
13327   %}
13328   ins_pipe(pipe_class_default);
13329 %}
13330 
13331 instruct reduce_eor4S(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
13332 %{
13333   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
13334   match(Set dst (XorReductionV src1 src2));
13335   ins_cost(INSN_COST);
13336   effect(TEMP_DEF dst, TEMP tmp);
13337   format %{ "umov   $tmp, $src2, S, 0\n\t"
13338             "umov   $dst, $src2, S, 1\n\t"
13339             "eorw   $dst, $dst, $tmp\n\t"
13340             "eorw   $dst, $dst, $dst, LSR #16\n\t"
13341             "eorw   $dst, $src1, $dst\n\t"
13342             "sxth   $dst, $dst\t eor reduction4S"
13343   %}
13344   ins_encode %{
13345     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
13346     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ S, 1);
13347     __ eorw($dst$$Register, $dst$$Register, $tmp$$Register);
13348     __ eorw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
13349     __ eorw($dst$$Register, $src1$$Register, $dst$$Register);
13350     __ sxth($dst$$Register, $dst$$Register);
13351   %}
13352   ins_pipe(pipe_class_default);
13353 %}
13354 
13355 instruct reduce_and8S(iRegINoSp dst, iRegIorL2I src1, vecX src2, iRegINoSp tmp)
13356 %{
13357   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
13358   match(Set dst (AndReductionV src1 src2));
13359   ins_cost(INSN_COST);
13360   effect(TEMP_DEF dst, TEMP tmp);
13361   format %{ "umov   $tmp, $src2, D, 0\n\t"
13362             "umov   $dst, $src2, D, 1\n\t"
13363             "andr   $dst, $dst, $tmp\n\t"
13364             "andr   $dst, $dst, $dst, LSR #32\n\t"
13365             "andw   $dst, $dst, $dst, LSR #16\n\t"
13366             "andw   $dst, $src1, $dst\n\t"
13367             "sxth   $dst, $dst\t and reduction8S"
13368   %}
13369   ins_encode %{
13370     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
13371     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ D, 1);
13372     __ andr($dst$$Register, $dst$$Register, $tmp$$Register);
13373     __ andr($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
13374     __ andw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
13375     __ andw($dst$$Register, $src1$$Register, $dst$$Register);
13376     __ sxth($dst$$Register, $dst$$Register);
13377   %}
13378   ins_pipe(pipe_class_default);
13379 %}
13380 
13381 instruct reduce_orr8S(iRegINoSp dst, iRegIorL2I src1, vecX src2, iRegINoSp tmp)
13382 %{
13383   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
13384   match(Set dst (OrReductionV src1 src2));
13385   ins_cost(INSN_COST);
13386   effect(TEMP_DEF dst, TEMP tmp);
13387   format %{ "umov   $tmp, $src2, D, 0\n\t"
13388             "umov   $dst, $src2, D, 1\n\t"
13389             "orr    $dst, $dst, $tmp\n\t"
13390             "orr    $dst, $dst, $dst, LSR #32\n\t"
13391             "orrw   $dst, $dst, $dst, LSR #16\n\t"
13392             "orrw   $dst, $src1, $dst\n\t"
13393             "sxth   $dst, $dst\t orr reduction8S"
13394   %}
13395   ins_encode %{
13396     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
13397     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ D, 1);
13398     __ orr ($dst$$Register, $dst$$Register, $tmp$$Register);
13399     __ orr ($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
13400     __ orrw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
13401     __ orrw($dst$$Register, $src1$$Register, $dst$$Register);
13402     __ sxth($dst$$Register, $dst$$Register);
13403   %}
13404   ins_pipe(pipe_class_default);
13405 %}
13406 
13407 instruct reduce_eor8S(iRegINoSp dst, iRegIorL2I src1, vecX src2, iRegINoSp tmp)
13408 %{
13409   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
13410   match(Set dst (XorReductionV src1 src2));
13411   ins_cost(INSN_COST);
13412   effect(TEMP_DEF dst, TEMP tmp);
13413   format %{ "umov   $tmp, $src2, D, 0\n\t"
13414             "umov   $dst, $src2, D, 1\n\t"
13415             "eor    $dst, $dst, $tmp\n\t"
13416             "eor    $dst, $dst, $dst, LSR #32\n\t"
13417             "eorw   $dst, $dst, $dst, LSR #16\n\t"
13418             "eorw   $dst, $src1, $dst\n\t"
13419             "sxth   $dst, $dst\t eor reduction8S"
13420   %}
13421   ins_encode %{
13422     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
13423     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ D, 1);
13424     __ eor ($dst$$Register, $dst$$Register, $tmp$$Register);
13425     __ eor ($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
13426     __ eorw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
13427     __ eorw($dst$$Register, $src1$$Register, $dst$$Register);
13428     __ sxth($dst$$Register, $dst$$Register);
13429   %}
13430   ins_pipe(pipe_class_default);
13431 %}
13432 
13433 instruct reduce_and2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
13434 %{
13435   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
13436   match(Set dst (AndReductionV src1 src2));
13437   ins_cost(INSN_COST);
13438   effect(TEMP_DEF dst, TEMP tmp);
13439   format %{ "umov  $tmp, $src2, S, 0\n\t"
13440             "andw  $dst, $tmp, $src1\n\t"
13441             "umov  $tmp, $src2, S, 1\n\t"
13442             "andw  $dst, $tmp, $dst\t and reduction2I"
13443   %}
13444   ins_encode %{
13445     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
13446     __ andw($dst$$Register, $tmp$$Register, $src1$$Register);
13447     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
13448     __ andw($dst$$Register, $tmp$$Register, $dst$$Register);
13449   %}
13450   ins_pipe(pipe_class_default);
13451 %}
13452 
13453 instruct reduce_orr2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
13454 %{
13455   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
13456   match(Set dst (OrReductionV src1 src2));
13457   ins_cost(INSN_COST);
13458   effect(TEMP_DEF dst, TEMP tmp);
13459   format %{ "umov  $tmp, $src2, S, 0\n\t"
13460             "orrw  $dst, $tmp, $src1\n\t"
13461             "umov  $tmp, $src2, S, 1\n\t"
13462             "orrw  $dst, $tmp, $dst\t orr reduction2I"
13463   %}
13464   ins_encode %{
13465     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
13466     __ orrw($dst$$Register, $tmp$$Register, $src1$$Register);
13467     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
13468     __ orrw($dst$$Register, $tmp$$Register, $dst$$Register);
13469   %}
13470   ins_pipe(pipe_class_default);
13471 %}
13472 
13473 instruct reduce_eor2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
13474 %{
13475   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
13476   match(Set dst (XorReductionV src1 src2));
13477   ins_cost(INSN_COST);
13478   effect(TEMP_DEF dst, TEMP tmp);
13479   format %{ "umov  $tmp, $src2, S, 0\n\t"
13480             "eorw  $dst, $tmp, $src1\n\t"
13481             "umov  $tmp, $src2, S, 1\n\t"
13482             "eorw  $dst, $tmp, $dst\t eor reduction2I"
13483   %}
13484   ins_encode %{
13485     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
13486     __ eorw($dst$$Register, $tmp$$Register, $src1$$Register);
13487     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
13488     __ eorw($dst$$Register, $tmp$$Register, $dst$$Register);
13489   %}
13490   ins_pipe(pipe_class_default);
13491 %}
13492 
13493 instruct reduce_and4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, iRegINoSp tmp)
13494 %{
13495   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
13496   match(Set dst (AndReductionV src1 src2));
13497   ins_cost(INSN_COST);
13498   effect(TEMP_DEF dst, TEMP tmp);
13499   format %{ "umov   $tmp, $src2, D, 0\n\t"
13500             "umov   $dst, $src2, D, 1\n\t"
13501             "andr   $dst, $dst, $tmp\n\t"
13502             "andr   $dst, $dst, $dst, LSR #32\n\t"
13503             "andw   $dst, $src1, $dst\t and reduction4I"
13504   %}
13505   ins_encode %{
13506     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
13507     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ D, 1);
13508     __ andr($dst$$Register, $dst$$Register, $tmp$$Register);
13509     __ andr($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
13510     __ andw($dst$$Register, $src1$$Register, $dst$$Register);
13511   %}
13512   ins_pipe(pipe_class_default);
13513 %}
13514 
13515 instruct reduce_orr4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, iRegINoSp tmp)
13516 %{
13517   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
13518   match(Set dst (OrReductionV src1 src2));
13519   ins_cost(INSN_COST);
13520   effect(TEMP_DEF dst, TEMP tmp);
13521   format %{ "umov   $tmp, $src2, D, 0\n\t"
13522             "umov   $dst, $src2, D, 1\n\t"
13523             "orr    $dst, $dst, $tmp\n\t"
13524             "orr    $dst, $dst, $dst, LSR #32\n\t"
13525             "orrw   $dst, $src1, $dst\t orr reduction4I"
13526   %}
13527   ins_encode %{
13528     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
13529     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ D, 1);
13530     __ orr ($dst$$Register, $dst$$Register, $tmp$$Register);
13531     __ orr ($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
13532     __ orrw($dst$$Register, $src1$$Register, $dst$$Register);
13533   %}
13534   ins_pipe(pipe_class_default);
13535 %}
13536 
13537 instruct reduce_eor4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, iRegINoSp tmp)
13538 %{
13539   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
13540   match(Set dst (XorReductionV src1 src2));
13541   ins_cost(INSN_COST);
13542   effect(TEMP_DEF dst, TEMP tmp);
13543   format %{ "umov   $tmp, $src2, D, 0\n\t"
13544             "umov   $dst, $src2, D, 1\n\t"
13545             "eor    $dst, $dst, $tmp\n\t"
13546             "eor    $dst, $dst, $dst, LSR #32\n\t"
13547             "eorw   $dst, $src1, $dst\t eor reduction4I"
13548   %}
13549   ins_encode %{
13550     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
13551     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ D, 1);
13552     __ eor ($dst$$Register, $dst$$Register, $tmp$$Register);
13553     __ eor ($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
13554     __ eorw($dst$$Register, $src1$$Register, $dst$$Register);
13555   %}
13556   ins_pipe(pipe_class_default);
13557 %}
13558 
13559 instruct reduce_and2L(iRegLNoSp dst, iRegL src1, vecX src2, iRegLNoSp tmp)
13560 %{
13561   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
13562   match(Set dst (AndReductionV src1 src2));
13563   ins_cost(INSN_COST);
13564   effect(TEMP_DEF dst, TEMP tmp);
13565   format %{ "umov  $tmp, $src2, D, 0\n\t"
13566             "andr  $dst, $src1, $tmp\n\t"
13567             "umov  $tmp, $src2, D, 1\n\t"
13568             "andr  $dst, $dst, $tmp\t and reduction2L"
13569   %}
13570   ins_encode %{
13571     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
13572     __ andr($dst$$Register, $src1$$Register, $tmp$$Register);
13573     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 1);
13574     __ andr($dst$$Register, $dst$$Register, $tmp$$Register);
13575   %}
13576   ins_pipe(pipe_class_default);
13577 %}
13578 
13579 instruct reduce_orr2L(iRegLNoSp dst, iRegL src1, vecX src2, iRegLNoSp tmp)
13580 %{
13581   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
13582   match(Set dst (OrReductionV src1 src2));
13583   ins_cost(INSN_COST);
13584   effect(TEMP_DEF dst, TEMP tmp);
13585   format %{ "umov  $tmp, $src2, D, 0\n\t"
13586             "orr   $dst, $src1, $tmp\n\t"
13587             "umov  $tmp, $src2, D, 1\n\t"
13588             "orr   $dst, $dst, $tmp\t orr reduction2L"
13589   %}
13590   ins_encode %{
13591     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
13592     __ orr ($dst$$Register, $src1$$Register, $tmp$$Register);
13593     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 1);
13594     __ orr ($dst$$Register, $dst$$Register, $tmp$$Register);
13595   %}
13596   ins_pipe(pipe_class_default);
13597 %}
13598 
13599 instruct reduce_eor2L(iRegLNoSp dst, iRegL src1, vecX src2, iRegLNoSp tmp)
13600 %{
13601   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
13602   match(Set dst (XorReductionV src1 src2));
13603   ins_cost(INSN_COST);
13604   effect(TEMP_DEF dst, TEMP tmp);
13605   format %{ "umov  $tmp, $src2, D, 0\n\t"
13606             "eor   $dst, $src1, $tmp\n\t"
13607             "umov  $tmp, $src2, D, 1\n\t"
13608             "eor   $dst, $dst, $tmp\t eor reduction2L"
13609   %}
13610   ins_encode %{
13611     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
13612     __ eor ($dst$$Register, $src1$$Register, $tmp$$Register);
13613     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 1);
13614     __ eor ($dst$$Register, $dst$$Register, $tmp$$Register);
13615   %}
13616   ins_pipe(pipe_class_default);
13617 %}
13618 
13619 // ------------------------------ Vector insert ---------------------------------
13620 
13621 instruct insert8B(vecD dst, vecD src, iRegIorL2I val, immI idx)
13622 %{
13623   predicate(n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
13624   match(Set dst (VectorInsert (Binary src val) idx));
13625   ins_cost(INSN_COST);
13626   format %{ "orr    $dst, T8B, $src, $src\n\t"
13627             "mov    $dst, T8B, $idx, $val\t# insert into vector(8B)" %}
13628   ins_encode %{
13629     if (as_FloatRegister($dst$$reg) != as_FloatRegister($src$$reg)) {
13630       __ orr(as_FloatRegister($dst$$reg), __ T8B,
13631              as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
13632     }
13633     __ mov(as_FloatRegister($dst$$reg), __ T8B, $idx$$constant, $val$$Register);
13634   %}
13635   ins_pipe(pipe_class_default);
13636 %}
13637 
13638 instruct insert16B(vecX dst, vecX src, iRegIorL2I val, immI idx)
13639 %{
13640   predicate(n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
13641   match(Set dst (VectorInsert (Binary src val) idx));
13642   ins_cost(INSN_COST);
13643   format %{ "orr    $dst, T16B, $src, $src\n\t"
13644             "mov    $dst, T16B, $idx, $val\t# insert into vector(16B)" %}
13645   ins_encode %{
13646     if (as_FloatRegister($dst$$reg) != as_FloatRegister($src$$reg)) {
13647       __ orr(as_FloatRegister($dst$$reg), __ T16B,
13648              as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
13649     }
13650     __ mov(as_FloatRegister($dst$$reg), __ T16B, $idx$$constant, $val$$Register);
13651   %}
13652   ins_pipe(pipe_class_default);
13653 %}
13654 
13655 instruct insert4S(vecD dst, vecD src, iRegIorL2I val, immI idx)
13656 %{
13657   predicate(n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
13658   match(Set dst (VectorInsert (Binary src val) idx));
13659   ins_cost(INSN_COST);
13660   format %{ "orr    $dst, T8B, $src, $src\n\t"
13661             "mov    $dst, T4H, $idx, $val\t# insert into vector(4S)" %}
13662   ins_encode %{
13663     if (as_FloatRegister($dst$$reg) != as_FloatRegister($src$$reg)) {
13664       __ orr(as_FloatRegister($dst$$reg), __ T8B,
13665              as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
13666     }
13667     __ mov(as_FloatRegister($dst$$reg), __ T4H, $idx$$constant, $val$$Register);
13668   %}
13669   ins_pipe(pipe_class_default);
13670 %}
13671 
13672 instruct insert8S(vecX dst, vecX src, iRegIorL2I val, immI idx)
13673 %{
13674   predicate(n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
13675   match(Set dst (VectorInsert (Binary src val) idx));
13676   ins_cost(INSN_COST);
13677   format %{ "orr    $dst, T16B, $src, $src\n\t"
13678             "mov    $dst, T8H, $idx, $val\t# insert into vector(8S)" %}
13679   ins_encode %{
13680     if (as_FloatRegister($dst$$reg) != as_FloatRegister($src$$reg)) {
13681       __ orr(as_FloatRegister($dst$$reg), __ T16B,
13682              as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
13683     }
13684     __ mov(as_FloatRegister($dst$$reg), __ T8H, $idx$$constant, $val$$Register);
13685   %}
13686   ins_pipe(pipe_class_default);
13687 %}
13688 
13689 instruct insert2I(vecD dst, vecD src, iRegIorL2I val, immI idx)
13690 %{
13691   predicate(n->bottom_type()->is_vect()->element_basic_type() == T_INT);
13692   match(Set dst (VectorInsert (Binary src val) idx));
13693   ins_cost(INSN_COST);
13694   format %{ "orr    $dst, T8B, $src, $src\n\t"
13695             "mov    $dst, T2S, $idx, $val\t# insert into vector(2I)" %}
13696   ins_encode %{
13697     if (as_FloatRegister($dst$$reg) != as_FloatRegister($src$$reg)) {
13698       __ orr(as_FloatRegister($dst$$reg), __ T8B,
13699              as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
13700     }
13701     __ mov(as_FloatRegister($dst$$reg), __ T2S, $idx$$constant, $val$$Register);
13702   %}
13703   ins_pipe(pipe_class_default);
13704 %}
13705 
13706 instruct insert4I(vecX dst, vecX src, iRegIorL2I val, immI idx)
13707 %{
13708   predicate(n->bottom_type()->is_vect()->element_basic_type() == T_INT);
13709   match(Set dst (VectorInsert (Binary src val) idx));
13710   ins_cost(INSN_COST);
13711   format %{ "orr    $dst, T16B, $src, $src\n\t"
13712             "mov    $dst, T4S, $idx, $val\t# insert into vector(4I)" %}
13713   ins_encode %{
13714     if (as_FloatRegister($dst$$reg) != as_FloatRegister($src$$reg)) {
13715       __ orr(as_FloatRegister($dst$$reg), __ T16B,
13716              as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
13717     }
13718     __ mov(as_FloatRegister($dst$$reg), __ T4S, $idx$$constant, $val$$Register);
13719   %}
13720   ins_pipe(pipe_class_default);
13721 %}
13722 
13723 instruct insert2L(vecX dst, vecX src, iRegL val, immI idx)
13724 %{
13725   predicate(n->bottom_type()->is_vect()->element_basic_type() == T_LONG);
13726   match(Set dst (VectorInsert (Binary src val) idx));
13727   ins_cost(INSN_COST);
13728   format %{ "orr    $dst, T16B, $src, $src\n\t"
13729             "mov    $dst, T2D, $idx, $val\t# insert into vector(2L)" %}
13730   ins_encode %{
13731     if (as_FloatRegister($dst$$reg) != as_FloatRegister($src$$reg)) {
13732       __ orr(as_FloatRegister($dst$$reg), __ T16B,
13733              as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
13734     }
13735     __ mov(as_FloatRegister($dst$$reg), __ T2D, $idx$$constant, $val$$Register);
13736   %}
13737   ins_pipe(pipe_class_default);
13738 %}
13739 
13740 instruct insert2F(vecD dst, vecD src, vRegF val, immI idx)
13741 %{
13742   predicate(n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
13743   match(Set dst (VectorInsert (Binary src val) idx));
13744   ins_cost(INSN_COST);
13745   effect(TEMP_DEF dst);
13746   format %{ "orr    $dst, T8B, $src, $src\n\t"
13747             "ins    $dst, S, $val, $idx, 0\t# insert into vector(2F)" %}
13748   ins_encode %{
13749     __ orr(as_FloatRegister($dst$$reg), __ T8B,
13750            as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
13751     __ ins(as_FloatRegister($dst$$reg), __ S,
13752            as_FloatRegister($val$$reg), $idx$$constant, 0);
13753   %}
13754   ins_pipe(pipe_class_default);
13755 %}
13756 
13757 instruct insert4F(vecX dst, vecX src, vRegF val, immI idx)
13758 %{
13759   predicate(n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
13760   match(Set dst (VectorInsert (Binary src val) idx));
13761   ins_cost(INSN_COST);
13762   effect(TEMP_DEF dst);
13763   format %{ "orr    $dst, T16B, $src, $src\n\t"
13764             "ins    $dst, S, $val, $idx, 0\t# insert into vector(4F)" %}
13765   ins_encode %{
13766     __ orr(as_FloatRegister($dst$$reg), __ T16B,
13767            as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
13768     __ ins(as_FloatRegister($dst$$reg), __ S,
13769            as_FloatRegister($val$$reg), $idx$$constant, 0);
13770   %}
13771   ins_pipe(pipe_class_default);
13772 %}
13773 
13774 instruct insert2D(vecX dst, vecX src, vRegD val, immI idx)
13775 %{
13776   predicate(n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
13777   match(Set dst (VectorInsert (Binary src val) idx));
13778   ins_cost(INSN_COST);
13779   effect(TEMP_DEF dst);
13780   format %{ "orr    $dst, T16B, $src, $src\n\t"
13781             "ins    $dst, D, $val, $idx, 0\t# insert into vector(2D)" %}
13782   ins_encode %{
13783     __ orr(as_FloatRegister($dst$$reg), __ T16B,
13784            as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
13785     __ ins(as_FloatRegister($dst$$reg), __ D,
13786            as_FloatRegister($val$$reg), $idx$$constant, 0);
13787   %}
13788   ins_pipe(pipe_class_default);
13789 %}
13790 
13791 // ------------------------------ Vector extract ---------------------------------
13792 
13793 instruct extract8B(iRegINoSp dst, vecD src, immI idx)
13794 %{
13795   predicate(n->in(1)->bottom_type()->is_vect()->length() == 8);
13796   match(Set dst (ExtractB src idx));
13797   ins_cost(INSN_COST);
13798   format %{ "smov    $dst, $src, B, $idx\t# extract from vector(8B)" %}
13799   ins_encode %{
13800     __ smov($dst$$Register, as_FloatRegister($src$$reg), __ B, $idx$$constant);
13801   %}
13802   ins_pipe(pipe_class_default);
13803 %}
13804 
13805 instruct extract16B(iRegINoSp dst, vecX src, immI idx)
13806 %{
13807   predicate(n->in(1)->bottom_type()->is_vect()->length() == 16);
13808   match(Set dst (ExtractB src idx));
13809   ins_cost(INSN_COST);
13810   format %{ "smov    $dst, $src, B, $idx\t# extract from vector(16B)" %}
13811   ins_encode %{
13812     __ smov($dst$$Register, as_FloatRegister($src$$reg), __ B, $idx$$constant);
13813   %}
13814   ins_pipe(pipe_class_default);
13815 %}
13816 
13817 instruct extract4S(iRegINoSp dst, vecD src, immI idx)
13818 %{
13819   predicate(n->in(1)->bottom_type()->is_vect()->length() == 4);
13820   match(Set dst (ExtractS src idx));
13821   ins_cost(INSN_COST);
13822   format %{ "smov    $dst, $src, H, $idx\t# extract from vector(4S)" %}
13823   ins_encode %{
13824     __ smov($dst$$Register, as_FloatRegister($src$$reg), __ H, $idx$$constant);
13825   %}
13826   ins_pipe(pipe_class_default);
13827 %}
13828 
13829 instruct extract8S(iRegINoSp dst, vecX src, immI idx)
13830 %{
13831   predicate(n->in(1)->bottom_type()->is_vect()->length() == 8);
13832   match(Set dst (ExtractS src idx));
13833   ins_cost(INSN_COST);
13834   format %{ "smov    $dst, $src, H, $idx\t# extract from vector(8S)" %}
13835   ins_encode %{
13836     __ smov($dst$$Register, as_FloatRegister($src$$reg), __ H, $idx$$constant);
13837   %}
13838   ins_pipe(pipe_class_default);
13839 %}
13840 
13841 instruct extract2I(iRegINoSp dst, vecD src, immI idx)
13842 %{
13843   predicate(n->in(1)->bottom_type()->is_vect()->length() == 2);
13844   match(Set dst (ExtractI src idx));
13845   ins_cost(INSN_COST);
13846   format %{ "umov    $dst, $src, S, $idx\t# extract from vector(2I)" %}
13847   ins_encode %{
13848     __ umov($dst$$Register, as_FloatRegister($src$$reg), __ S, $idx$$constant);
13849   %}
13850   ins_pipe(pipe_class_default);
13851 %}
13852 
13853 instruct extract4I(iRegINoSp dst, vecX src, immI idx)
13854 %{
13855   predicate(n->in(1)->bottom_type()->is_vect()->length() == 4);
13856   match(Set dst (ExtractI src idx));
13857   ins_cost(INSN_COST);
13858   format %{ "umov    $dst, $src, S, $idx\t# extract from vector(4I)" %}
13859   ins_encode %{
13860     __ umov($dst$$Register, as_FloatRegister($src$$reg), __ S, $idx$$constant);
13861   %}
13862   ins_pipe(pipe_class_default);
13863 %}
13864 
13865 instruct extract2L(iRegLNoSp dst, vecX src, immI idx)
13866 %{
13867   predicate(n->in(1)->bottom_type()->is_vect()->length() == 2);
13868   match(Set dst (ExtractL src idx));
13869   ins_cost(INSN_COST);
13870   format %{ "umov    $dst, $src, D, $idx\t# extract from vector(2L)" %}
13871   ins_encode %{
13872     __ umov($dst$$Register, as_FloatRegister($src$$reg), __ D, $idx$$constant);
13873   %}
13874   ins_pipe(pipe_class_default);
13875 %}
13876 
13877 instruct extract2F(vRegF dst, vecD src, immI idx)
13878 %{
13879   predicate(n->in(1)->bottom_type()->is_vect()->length() == 2);
13880   match(Set dst (ExtractF src idx));
13881   ins_cost(INSN_COST);
13882   format %{ "ins   $dst, S, $src, 0, $idx\t# extract from vector(2F)" %}
13883   ins_encode %{
13884     __ ins(as_FloatRegister($dst$$reg), __ S,
13885            as_FloatRegister($src$$reg), 0, $idx$$constant);
13886   %}
13887   ins_pipe(pipe_class_default);
13888 %}
13889 
13890 instruct extract4F(vRegF dst, vecX src, immI idx)
13891 %{
13892   predicate(n->in(1)->bottom_type()->is_vect()->length() == 4);
13893   match(Set dst (ExtractF src idx));
13894   ins_cost(INSN_COST);
13895   format %{ "ins   $dst, S, $src, 0, $idx\t# extract from vector(4F)" %}
13896   ins_encode %{
13897     __ ins(as_FloatRegister($dst$$reg), __ S,
13898            as_FloatRegister($src$$reg), 0, $idx$$constant);
13899   %}
13900   ins_pipe(pipe_class_default);
13901 %}
13902 
13903 instruct extract2D(vRegD dst, vecX src, immI idx)
13904 %{
13905   predicate(n->in(1)->bottom_type()->is_vect()->length() == 2);
13906   match(Set dst (ExtractD src idx));
13907   ins_cost(INSN_COST);
13908   format %{ "ins   $dst, D, $src, 0, $idx\t# extract from vector(2D)" %}
13909   ins_encode %{
13910     __ ins(as_FloatRegister($dst$$reg), __ D,
13911            as_FloatRegister($src$$reg), 0, $idx$$constant);
13912   %}
13913   ins_pipe(pipe_class_default);
13914 %}
13915 // END This section of the file is automatically generated. Do not edit --------------
13916 
13917 // ============================================================================
13918 // Floating Point Arithmetic Instructions
13919 
13920 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13921   match(Set dst (AddF src1 src2));
13922 
13923   ins_cost(INSN_COST * 5);
13924   format %{ "fadds   $dst, $src1, $src2" %}
13925 
13926   ins_encode %{
13927     __ fadds(as_FloatRegister($dst$$reg),
13928              as_FloatRegister($src1$$reg),
13929              as_FloatRegister($src2$$reg));
13930   %}
13931 
13932   ins_pipe(fp_dop_reg_reg_s);
13933 %}
13934 
13935 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13936   match(Set dst (AddD src1 src2));
13937 
13938   ins_cost(INSN_COST * 5);
13939   format %{ "faddd   $dst, $src1, $src2" %}
13940 
13941   ins_encode %{
13942     __ faddd(as_FloatRegister($dst$$reg),
13943              as_FloatRegister($src1$$reg),
13944              as_FloatRegister($src2$$reg));
13945   %}
13946 
13947   ins_pipe(fp_dop_reg_reg_d);
13948 %}
13949 
13950 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13951   match(Set dst (SubF src1 src2));
13952 
13953   ins_cost(INSN_COST * 5);
13954   format %{ "fsubs   $dst, $src1, $src2" %}
13955 
13956   ins_encode %{
13957     __ fsubs(as_FloatRegister($dst$$reg),
13958              as_FloatRegister($src1$$reg),
13959              as_FloatRegister($src2$$reg));
13960   %}
13961 
13962   ins_pipe(fp_dop_reg_reg_s);
13963 %}
13964 
13965 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13966   match(Set dst (SubD src1 src2));
13967 
13968   ins_cost(INSN_COST * 5);
13969   format %{ "fsubd   $dst, $src1, $src2" %}
13970 
13971   ins_encode %{
13972     __ fsubd(as_FloatRegister($dst$$reg),
13973              as_FloatRegister($src1$$reg),
13974              as_FloatRegister($src2$$reg));
13975   %}
13976 
13977   ins_pipe(fp_dop_reg_reg_d);
13978 %}
13979 
13980 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13981   match(Set dst (MulF src1 src2));
13982 
13983   ins_cost(INSN_COST * 6);
13984   format %{ "fmuls   $dst, $src1, $src2" %}
13985 
13986   ins_encode %{
13987     __ fmuls(as_FloatRegister($dst$$reg),
13988              as_FloatRegister($src1$$reg),
13989              as_FloatRegister($src2$$reg));
13990   %}
13991 
13992   ins_pipe(fp_dop_reg_reg_s);
13993 %}
13994 
13995 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13996   match(Set dst (MulD src1 src2));
13997 
13998   ins_cost(INSN_COST * 6);
13999   format %{ "fmuld   $dst, $src1, $src2" %}
14000 
14001   ins_encode %{
14002     __ fmuld(as_FloatRegister($dst$$reg),
14003              as_FloatRegister($src1$$reg),
14004              as_FloatRegister($src2$$reg));
14005   %}
14006 
14007   ins_pipe(fp_dop_reg_reg_d);
14008 %}
14009 
14010 // src1 * src2 + src3
14011 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
14012   predicate(UseFMA);
14013   match(Set dst (FmaF src3 (Binary src1 src2)));
14014 
14015   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
14016 
14017   ins_encode %{
14018     __ fmadds(as_FloatRegister($dst$$reg),
14019              as_FloatRegister($src1$$reg),
14020              as_FloatRegister($src2$$reg),
14021              as_FloatRegister($src3$$reg));
14022   %}
14023 
14024   ins_pipe(pipe_class_default);
14025 %}
14026 
14027 // src1 * src2 + src3
14028 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
14029   predicate(UseFMA);
14030   match(Set dst (FmaD src3 (Binary src1 src2)));
14031 
14032   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
14033 
14034   ins_encode %{
14035     __ fmaddd(as_FloatRegister($dst$$reg),
14036              as_FloatRegister($src1$$reg),
14037              as_FloatRegister($src2$$reg),
14038              as_FloatRegister($src3$$reg));
14039   %}
14040 
14041   ins_pipe(pipe_class_default);
14042 %}
14043 
14044 // -src1 * src2 + src3
14045 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
14046   predicate(UseFMA);
14047   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
14048   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
14049 
14050   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
14051 
14052   ins_encode %{
14053     __ fmsubs(as_FloatRegister($dst$$reg),
14054               as_FloatRegister($src1$$reg),
14055               as_FloatRegister($src2$$reg),
14056               as_FloatRegister($src3$$reg));
14057   %}
14058 
14059   ins_pipe(pipe_class_default);
14060 %}
14061 
14062 // -src1 * src2 + src3
14063 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
14064   predicate(UseFMA);
14065   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
14066   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
14067 
14068   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
14069 
14070   ins_encode %{
14071     __ fmsubd(as_FloatRegister($dst$$reg),
14072               as_FloatRegister($src1$$reg),
14073               as_FloatRegister($src2$$reg),
14074               as_FloatRegister($src3$$reg));
14075   %}
14076 
14077   ins_pipe(pipe_class_default);
14078 %}
14079 
14080 // -src1 * src2 - src3
14081 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
14082   predicate(UseFMA);
14083   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
14084   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
14085 
14086   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
14087 
14088   ins_encode %{
14089     __ fnmadds(as_FloatRegister($dst$$reg),
14090                as_FloatRegister($src1$$reg),
14091                as_FloatRegister($src2$$reg),
14092                as_FloatRegister($src3$$reg));
14093   %}
14094 
14095   ins_pipe(pipe_class_default);
14096 %}
14097 
14098 // -src1 * src2 - src3
14099 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
14100   predicate(UseFMA);
14101   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
14102   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
14103 
14104   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
14105 
14106   ins_encode %{
14107     __ fnmaddd(as_FloatRegister($dst$$reg),
14108                as_FloatRegister($src1$$reg),
14109                as_FloatRegister($src2$$reg),
14110                as_FloatRegister($src3$$reg));
14111   %}
14112 
14113   ins_pipe(pipe_class_default);
14114 %}
14115 
14116 // src1 * src2 - src3
14117 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
14118   predicate(UseFMA);
14119   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
14120 
14121   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
14122 
14123   ins_encode %{
14124     __ fnmsubs(as_FloatRegister($dst$$reg),
14125                as_FloatRegister($src1$$reg),
14126                as_FloatRegister($src2$$reg),
14127                as_FloatRegister($src3$$reg));
14128   %}
14129 
14130   ins_pipe(pipe_class_default);
14131 %}
14132 
14133 // src1 * src2 - src3
14134 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
14135   predicate(UseFMA);
14136   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
14137 
14138   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
14139 
14140   ins_encode %{
14141   // n.b. insn name should be fnmsubd
14142     __ fnmsub(as_FloatRegister($dst$$reg),
14143               as_FloatRegister($src1$$reg),
14144               as_FloatRegister($src2$$reg),
14145               as_FloatRegister($src3$$reg));
14146   %}
14147 
14148   ins_pipe(pipe_class_default);
14149 %}
14150 
14151 
14152 // Math.max(FF)F
14153 instruct maxF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14154   match(Set dst (MaxF src1 src2));
14155 
14156   format %{ "fmaxs   $dst, $src1, $src2" %}
14157   ins_encode %{
14158     __ fmaxs(as_FloatRegister($dst$$reg),
14159              as_FloatRegister($src1$$reg),
14160              as_FloatRegister($src2$$reg));
14161   %}
14162 
14163   ins_pipe(fp_dop_reg_reg_s);
14164 %}
14165 
14166 // Math.min(FF)F
14167 instruct minF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14168   match(Set dst (MinF src1 src2));
14169 
14170   format %{ "fmins   $dst, $src1, $src2" %}
14171   ins_encode %{
14172     __ fmins(as_FloatRegister($dst$$reg),
14173              as_FloatRegister($src1$$reg),
14174              as_FloatRegister($src2$$reg));
14175   %}
14176 
14177   ins_pipe(fp_dop_reg_reg_s);
14178 %}
14179 
14180 // Math.max(DD)D
14181 instruct maxD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14182   match(Set dst (MaxD src1 src2));
14183 
14184   format %{ "fmaxd   $dst, $src1, $src2" %}
14185   ins_encode %{
14186     __ fmaxd(as_FloatRegister($dst$$reg),
14187              as_FloatRegister($src1$$reg),
14188              as_FloatRegister($src2$$reg));
14189   %}
14190 
14191   ins_pipe(fp_dop_reg_reg_d);
14192 %}
14193 
14194 // Math.min(DD)D
14195 instruct minD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14196   match(Set dst (MinD src1 src2));
14197 
14198   format %{ "fmind   $dst, $src1, $src2" %}
14199   ins_encode %{
14200     __ fmind(as_FloatRegister($dst$$reg),
14201              as_FloatRegister($src1$$reg),
14202              as_FloatRegister($src2$$reg));
14203   %}
14204 
14205   ins_pipe(fp_dop_reg_reg_d);
14206 %}
14207 
14208 
14209 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14210   match(Set dst (DivF src1  src2));
14211 
14212   ins_cost(INSN_COST * 18);
14213   format %{ "fdivs   $dst, $src1, $src2" %}
14214 
14215   ins_encode %{
14216     __ fdivs(as_FloatRegister($dst$$reg),
14217              as_FloatRegister($src1$$reg),
14218              as_FloatRegister($src2$$reg));
14219   %}
14220 
14221   ins_pipe(fp_div_s);
14222 %}
14223 
14224 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14225   match(Set dst (DivD src1  src2));
14226 
14227   ins_cost(INSN_COST * 32);
14228   format %{ "fdivd   $dst, $src1, $src2" %}
14229 
14230   ins_encode %{
14231     __ fdivd(as_FloatRegister($dst$$reg),
14232              as_FloatRegister($src1$$reg),
14233              as_FloatRegister($src2$$reg));
14234   %}
14235 
14236   ins_pipe(fp_div_d);
14237 %}
14238 
14239 instruct negF_reg_reg(vRegF dst, vRegF src) %{
14240   match(Set dst (NegF src));
14241 
14242   ins_cost(INSN_COST * 3);
14243   format %{ "fneg   $dst, $src" %}
14244 
14245   ins_encode %{
14246     __ fnegs(as_FloatRegister($dst$$reg),
14247              as_FloatRegister($src$$reg));
14248   %}
14249 
14250   ins_pipe(fp_uop_s);
14251 %}
14252 
14253 instruct negD_reg_reg(vRegD dst, vRegD src) %{
14254   match(Set dst (NegD src));
14255 
14256   ins_cost(INSN_COST * 3);
14257   format %{ "fnegd   $dst, $src" %}
14258 
14259   ins_encode %{
14260     __ fnegd(as_FloatRegister($dst$$reg),
14261              as_FloatRegister($src$$reg));
14262   %}
14263 
14264   ins_pipe(fp_uop_d);
14265 %}
14266 
14267 instruct absF_reg(vRegF dst, vRegF src) %{
14268   match(Set dst (AbsF src));
14269 
14270   ins_cost(INSN_COST * 3);
14271   format %{ "fabss   $dst, $src" %}
14272   ins_encode %{
14273     __ fabss(as_FloatRegister($dst$$reg),
14274              as_FloatRegister($src$$reg));
14275   %}
14276 
14277   ins_pipe(fp_uop_s);
14278 %}
14279 
14280 instruct absD_reg(vRegD dst, vRegD src) %{
14281   match(Set dst (AbsD src));
14282 
14283   ins_cost(INSN_COST * 3);
14284   format %{ "fabsd   $dst, $src" %}
14285   ins_encode %{
14286     __ fabsd(as_FloatRegister($dst$$reg),
14287              as_FloatRegister($src$$reg));
14288   %}
14289 
14290   ins_pipe(fp_uop_d);
14291 %}
14292 
14293 instruct sqrtD_reg(vRegD dst, vRegD src) %{
14294   match(Set dst (SqrtD src));
14295 
14296   ins_cost(INSN_COST * 50);
14297   format %{ "fsqrtd  $dst, $src" %}
14298   ins_encode %{
14299     __ fsqrtd(as_FloatRegister($dst$$reg),
14300              as_FloatRegister($src$$reg));
14301   %}
14302 
14303   ins_pipe(fp_div_s);
14304 %}
14305 
14306 instruct sqrtF_reg(vRegF dst, vRegF src) %{
14307   match(Set dst (SqrtF src));
14308 
14309   ins_cost(INSN_COST * 50);
14310   format %{ "fsqrts  $dst, $src" %}
14311   ins_encode %{
14312     __ fsqrts(as_FloatRegister($dst$$reg),
14313              as_FloatRegister($src$$reg));
14314   %}
14315 
14316   ins_pipe(fp_div_d);
14317 %}
14318 
14319 // Math.rint, floor, ceil
14320 instruct roundD_reg(vRegD dst, vRegD src, immI rmode) %{
14321   match(Set dst (RoundDoubleMode src rmode));
14322   format %{ "frint  $dst, $src, $rmode" %}
14323   ins_encode %{
14324     switch ($rmode$$constant) {
14325       case RoundDoubleModeNode::rmode_rint:
14326         __ frintnd(as_FloatRegister($dst$$reg),
14327                    as_FloatRegister($src$$reg));
14328         break;
14329       case RoundDoubleModeNode::rmode_floor:
14330         __ frintmd(as_FloatRegister($dst$$reg),
14331                    as_FloatRegister($src$$reg));
14332         break;
14333       case RoundDoubleModeNode::rmode_ceil:
14334         __ frintpd(as_FloatRegister($dst$$reg),
14335                    as_FloatRegister($src$$reg));
14336         break;
14337     }
14338   %}
14339   ins_pipe(fp_uop_d);
14340 %}
14341 
14342 // ============================================================================
14343 // Logical Instructions
14344 
14345 // Integer Logical Instructions
14346 
14347 // And Instructions
14348 
14349 
14350 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
14351   match(Set dst (AndI src1 src2));
14352 
14353   format %{ "andw  $dst, $src1, $src2\t# int" %}
14354 
14355   ins_cost(INSN_COST);
14356   ins_encode %{
14357     __ andw(as_Register($dst$$reg),
14358             as_Register($src1$$reg),
14359             as_Register($src2$$reg));
14360   %}
14361 
14362   ins_pipe(ialu_reg_reg);
14363 %}
14364 
14365 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
14366   match(Set dst (AndI src1 src2));
14367 
14368   format %{ "andsw  $dst, $src1, $src2\t# int" %}
14369 
14370   ins_cost(INSN_COST);
14371   ins_encode %{
14372     __ andw(as_Register($dst$$reg),
14373             as_Register($src1$$reg),
14374             (unsigned long)($src2$$constant));
14375   %}
14376 
14377   ins_pipe(ialu_reg_imm);
14378 %}
14379 
14380 // Or Instructions
14381 
14382 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14383   match(Set dst (OrI src1 src2));
14384 
14385   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14386 
14387   ins_cost(INSN_COST);
14388   ins_encode %{
14389     __ orrw(as_Register($dst$$reg),
14390             as_Register($src1$$reg),
14391             as_Register($src2$$reg));
14392   %}
14393 
14394   ins_pipe(ialu_reg_reg);
14395 %}
14396 
14397 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14398   match(Set dst (OrI src1 src2));
14399 
14400   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14401 
14402   ins_cost(INSN_COST);
14403   ins_encode %{
14404     __ orrw(as_Register($dst$$reg),
14405             as_Register($src1$$reg),
14406             (unsigned long)($src2$$constant));
14407   %}
14408 
14409   ins_pipe(ialu_reg_imm);
14410 %}
14411 
14412 // Xor Instructions
14413 
14414 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14415   match(Set dst (XorI src1 src2));
14416 
14417   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14418 
14419   ins_cost(INSN_COST);
14420   ins_encode %{
14421     __ eorw(as_Register($dst$$reg),
14422             as_Register($src1$$reg),
14423             as_Register($src2$$reg));
14424   %}
14425 
14426   ins_pipe(ialu_reg_reg);
14427 %}
14428 
14429 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14430   match(Set dst (XorI src1 src2));
14431 
14432   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14433 
14434   ins_cost(INSN_COST);
14435   ins_encode %{
14436     __ eorw(as_Register($dst$$reg),
14437             as_Register($src1$$reg),
14438             (unsigned long)($src2$$constant));
14439   %}
14440 
14441   ins_pipe(ialu_reg_imm);
14442 %}
14443 
14444 // Long Logical Instructions
14445 // TODO
14446 
14447 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
14448   match(Set dst (AndL src1 src2));
14449 
14450   format %{ "and  $dst, $src1, $src2\t# int" %}
14451 
14452   ins_cost(INSN_COST);
14453   ins_encode %{
14454     __ andr(as_Register($dst$$reg),
14455             as_Register($src1$$reg),
14456             as_Register($src2$$reg));
14457   %}
14458 
14459   ins_pipe(ialu_reg_reg);
14460 %}
14461 
14462 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
14463   match(Set dst (AndL src1 src2));
14464 
14465   format %{ "and  $dst, $src1, $src2\t# int" %}
14466 
14467   ins_cost(INSN_COST);
14468   ins_encode %{
14469     __ andr(as_Register($dst$$reg),
14470             as_Register($src1$$reg),
14471             (unsigned long)($src2$$constant));
14472   %}
14473 
14474   ins_pipe(ialu_reg_imm);
14475 %}
14476 
14477 // Or Instructions
14478 
14479 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14480   match(Set dst (OrL src1 src2));
14481 
14482   format %{ "orr  $dst, $src1, $src2\t# int" %}
14483 
14484   ins_cost(INSN_COST);
14485   ins_encode %{
14486     __ orr(as_Register($dst$$reg),
14487            as_Register($src1$$reg),
14488            as_Register($src2$$reg));
14489   %}
14490 
14491   ins_pipe(ialu_reg_reg);
14492 %}
14493 
14494 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14495   match(Set dst (OrL src1 src2));
14496 
14497   format %{ "orr  $dst, $src1, $src2\t# int" %}
14498 
14499   ins_cost(INSN_COST);
14500   ins_encode %{
14501     __ orr(as_Register($dst$$reg),
14502            as_Register($src1$$reg),
14503            (unsigned long)($src2$$constant));
14504   %}
14505 
14506   ins_pipe(ialu_reg_imm);
14507 %}
14508 
14509 // Xor Instructions
14510 
14511 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14512   match(Set dst (XorL src1 src2));
14513 
14514   format %{ "eor  $dst, $src1, $src2\t# int" %}
14515 
14516   ins_cost(INSN_COST);
14517   ins_encode %{
14518     __ eor(as_Register($dst$$reg),
14519            as_Register($src1$$reg),
14520            as_Register($src2$$reg));
14521   %}
14522 
14523   ins_pipe(ialu_reg_reg);
14524 %}
14525 
14526 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14527   match(Set dst (XorL src1 src2));
14528 
14529   ins_cost(INSN_COST);
14530   format %{ "eor  $dst, $src1, $src2\t# int" %}
14531 
14532   ins_encode %{
14533     __ eor(as_Register($dst$$reg),
14534            as_Register($src1$$reg),
14535            (unsigned long)($src2$$constant));
14536   %}
14537 
14538   ins_pipe(ialu_reg_imm);
14539 %}
14540 
14541 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
14542 %{
14543   match(Set dst (ConvI2L src));
14544 
14545   ins_cost(INSN_COST);
14546   format %{ "sxtw  $dst, $src\t# i2l" %}
14547   ins_encode %{
14548     __ sbfm($dst$$Register, $src$$Register, 0, 31);
14549   %}
14550   ins_pipe(ialu_reg_shift);
14551 %}
14552 
14553 // this pattern occurs in bigmath arithmetic
14554 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
14555 %{
14556   match(Set dst (AndL (ConvI2L src) mask));
14557 
14558   ins_cost(INSN_COST);
14559   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
14560   ins_encode %{
14561     __ ubfm($dst$$Register, $src$$Register, 0, 31);
14562   %}
14563 
14564   ins_pipe(ialu_reg_shift);
14565 %}
14566 
14567 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
14568   match(Set dst (ConvL2I src));
14569 
14570   ins_cost(INSN_COST);
14571   format %{ "movw  $dst, $src \t// l2i" %}
14572 
14573   ins_encode %{
14574     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
14575   %}
14576 
14577   ins_pipe(ialu_reg);
14578 %}
14579 
14580 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
14581 %{
14582   match(Set dst (Conv2B src));
14583   effect(KILL cr);
14584 
14585   format %{
14586     "cmpw $src, zr\n\t"
14587     "cset $dst, ne"
14588   %}
14589 
14590   ins_encode %{
14591     __ cmpw(as_Register($src$$reg), zr);
14592     __ cset(as_Register($dst$$reg), Assembler::NE);
14593   %}
14594 
14595   ins_pipe(ialu_reg);
14596 %}
14597 
14598 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
14599 %{
14600   match(Set dst (Conv2B src));
14601   effect(KILL cr);
14602 
14603   format %{
14604     "cmp  $src, zr\n\t"
14605     "cset $dst, ne"
14606   %}
14607 
14608   ins_encode %{
14609     __ cmp(as_Register($src$$reg), zr);
14610     __ cset(as_Register($dst$$reg), Assembler::NE);
14611   %}
14612 
14613   ins_pipe(ialu_reg);
14614 %}
14615 
14616 instruct convD2F_reg(vRegF dst, vRegD src) %{
14617   match(Set dst (ConvD2F src));
14618 
14619   ins_cost(INSN_COST * 5);
14620   format %{ "fcvtd  $dst, $src \t// d2f" %}
14621 
14622   ins_encode %{
14623     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14624   %}
14625 
14626   ins_pipe(fp_d2f);
14627 %}
14628 
14629 instruct convF2D_reg(vRegD dst, vRegF src) %{
14630   match(Set dst (ConvF2D src));
14631 
14632   ins_cost(INSN_COST * 5);
14633   format %{ "fcvts  $dst, $src \t// f2d" %}
14634 
14635   ins_encode %{
14636     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14637   %}
14638 
14639   ins_pipe(fp_f2d);
14640 %}
14641 
14642 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14643   match(Set dst (ConvF2I src));
14644 
14645   ins_cost(INSN_COST * 5);
14646   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
14647 
14648   ins_encode %{
14649     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14650   %}
14651 
14652   ins_pipe(fp_f2i);
14653 %}
14654 
14655 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
14656   match(Set dst (ConvF2L src));
14657 
14658   ins_cost(INSN_COST * 5);
14659   format %{ "fcvtzs  $dst, $src \t// f2l" %}
14660 
14661   ins_encode %{
14662     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14663   %}
14664 
14665   ins_pipe(fp_f2l);
14666 %}
14667 
14668 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
14669   match(Set dst (ConvI2F src));
14670 
14671   ins_cost(INSN_COST * 5);
14672   format %{ "scvtfws  $dst, $src \t// i2f" %}
14673 
14674   ins_encode %{
14675     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14676   %}
14677 
14678   ins_pipe(fp_i2f);
14679 %}
14680 
14681 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
14682   match(Set dst (ConvL2F src));
14683 
14684   ins_cost(INSN_COST * 5);
14685   format %{ "scvtfs  $dst, $src \t// l2f" %}
14686 
14687   ins_encode %{
14688     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14689   %}
14690 
14691   ins_pipe(fp_l2f);
14692 %}
14693 
14694 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
14695   match(Set dst (ConvD2I src));
14696 
14697   ins_cost(INSN_COST * 5);
14698   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
14699 
14700   ins_encode %{
14701     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14702   %}
14703 
14704   ins_pipe(fp_d2i);
14705 %}
14706 
14707 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14708   match(Set dst (ConvD2L src));
14709 
14710   ins_cost(INSN_COST * 5);
14711   format %{ "fcvtzd  $dst, $src \t// d2l" %}
14712 
14713   ins_encode %{
14714     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14715   %}
14716 
14717   ins_pipe(fp_d2l);
14718 %}
14719 
14720 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
14721   match(Set dst (ConvI2D src));
14722 
14723   ins_cost(INSN_COST * 5);
14724   format %{ "scvtfwd  $dst, $src \t// i2d" %}
14725 
14726   ins_encode %{
14727     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14728   %}
14729 
14730   ins_pipe(fp_i2d);
14731 %}
14732 
14733 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
14734   match(Set dst (ConvL2D src));
14735 
14736   ins_cost(INSN_COST * 5);
14737   format %{ "scvtfd  $dst, $src \t// l2d" %}
14738 
14739   ins_encode %{
14740     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14741   %}
14742 
14743   ins_pipe(fp_l2d);
14744 %}
14745 
14746 // stack <-> reg and reg <-> reg shuffles with no conversion
14747 
14748 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
14749 
14750   match(Set dst (MoveF2I src));
14751 
14752   effect(DEF dst, USE src);
14753 
14754   ins_cost(4 * INSN_COST);
14755 
14756   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
14757 
14758   ins_encode %{
14759     __ ldrw($dst$$Register, Address(sp, $src$$disp));
14760   %}
14761 
14762   ins_pipe(iload_reg_reg);
14763 
14764 %}
14765 
14766 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
14767 
14768   match(Set dst (MoveI2F src));
14769 
14770   effect(DEF dst, USE src);
14771 
14772   ins_cost(4 * INSN_COST);
14773 
14774   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
14775 
14776   ins_encode %{
14777     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14778   %}
14779 
14780   ins_pipe(pipe_class_memory);
14781 
14782 %}
14783 
14784 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
14785 
14786   match(Set dst (MoveD2L src));
14787 
14788   effect(DEF dst, USE src);
14789 
14790   ins_cost(4 * INSN_COST);
14791 
14792   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
14793 
14794   ins_encode %{
14795     __ ldr($dst$$Register, Address(sp, $src$$disp));
14796   %}
14797 
14798   ins_pipe(iload_reg_reg);
14799 
14800 %}
14801 
14802 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
14803 
14804   match(Set dst (MoveL2D src));
14805 
14806   effect(DEF dst, USE src);
14807 
14808   ins_cost(4 * INSN_COST);
14809 
14810   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
14811 
14812   ins_encode %{
14813     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14814   %}
14815 
14816   ins_pipe(pipe_class_memory);
14817 
14818 %}
14819 
14820 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
14821 
14822   match(Set dst (MoveF2I src));
14823 
14824   effect(DEF dst, USE src);
14825 
14826   ins_cost(INSN_COST);
14827 
14828   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
14829 
14830   ins_encode %{
14831     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14832   %}
14833 
14834   ins_pipe(pipe_class_memory);
14835 
14836 %}
14837 
14838 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
14839 
14840   match(Set dst (MoveI2F src));
14841 
14842   effect(DEF dst, USE src);
14843 
14844   ins_cost(INSN_COST);
14845 
14846   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
14847 
14848   ins_encode %{
14849     __ strw($src$$Register, Address(sp, $dst$$disp));
14850   %}
14851 
14852   ins_pipe(istore_reg_reg);
14853 
14854 %}
14855 
14856 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
14857 
14858   match(Set dst (MoveD2L src));
14859 
14860   effect(DEF dst, USE src);
14861 
14862   ins_cost(INSN_COST);
14863 
14864   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
14865 
14866   ins_encode %{
14867     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14868   %}
14869 
14870   ins_pipe(pipe_class_memory);
14871 
14872 %}
14873 
14874 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
14875 
14876   match(Set dst (MoveL2D src));
14877 
14878   effect(DEF dst, USE src);
14879 
14880   ins_cost(INSN_COST);
14881 
14882   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
14883 
14884   ins_encode %{
14885     __ str($src$$Register, Address(sp, $dst$$disp));
14886   %}
14887 
14888   ins_pipe(istore_reg_reg);
14889 
14890 %}
14891 
14892 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14893 
14894   match(Set dst (MoveF2I src));
14895 
14896   effect(DEF dst, USE src);
14897 
14898   ins_cost(INSN_COST);
14899 
14900   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
14901 
14902   ins_encode %{
14903     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
14904   %}
14905 
14906   ins_pipe(fp_f2i);
14907 
14908 %}
14909 
14910 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
14911 
14912   match(Set dst (MoveI2F src));
14913 
14914   effect(DEF dst, USE src);
14915 
14916   ins_cost(INSN_COST);
14917 
14918   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
14919 
14920   ins_encode %{
14921     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
14922   %}
14923 
14924   ins_pipe(fp_i2f);
14925 
14926 %}
14927 
14928 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14929 
14930   match(Set dst (MoveD2L src));
14931 
14932   effect(DEF dst, USE src);
14933 
14934   ins_cost(INSN_COST);
14935 
14936   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
14937 
14938   ins_encode %{
14939     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
14940   %}
14941 
14942   ins_pipe(fp_d2l);
14943 
14944 %}
14945 
14946 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
14947 
14948   match(Set dst (MoveL2D src));
14949 
14950   effect(DEF dst, USE src);
14951 
14952   ins_cost(INSN_COST);
14953 
14954   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
14955 
14956   ins_encode %{
14957     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
14958   %}
14959 
14960   ins_pipe(fp_l2d);
14961 
14962 %}
14963 
14964 // ============================================================================
14965 // clearing of an array
14966 
14967 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
14968 %{
14969   match(Set dummy (ClearArray cnt base));
14970   effect(USE_KILL cnt, USE_KILL base);
14971 
14972   ins_cost(4 * INSN_COST);
14973   format %{ "ClearArray $cnt, $base" %}
14974 
14975   ins_encode %{
14976     __ zero_words($base$$Register, $cnt$$Register);
14977   %}
14978 
14979   ins_pipe(pipe_class_memory);
14980 %}
14981 
14982 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
14983 %{
14984   predicate((u_int64_t)n->in(2)->get_long()
14985             < (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
14986   match(Set dummy (ClearArray cnt base));
14987   effect(USE_KILL base);
14988 
14989   ins_cost(4 * INSN_COST);
14990   format %{ "ClearArray $cnt, $base" %}
14991 
14992   ins_encode %{
14993     __ zero_words($base$$Register, (u_int64_t)$cnt$$constant);
14994   %}
14995 
14996   ins_pipe(pipe_class_memory);
14997 %}
14998 
14999 // ============================================================================
15000 // Overflow Math Instructions
15001 
15002 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15003 %{
15004   match(Set cr (OverflowAddI op1 op2));
15005 
15006   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
15007   ins_cost(INSN_COST);
15008   ins_encode %{
15009     __ cmnw($op1$$Register, $op2$$Register);
15010   %}
15011 
15012   ins_pipe(icmp_reg_reg);
15013 %}
15014 
15015 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
15016 %{
15017   match(Set cr (OverflowAddI op1 op2));
15018 
15019   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
15020   ins_cost(INSN_COST);
15021   ins_encode %{
15022     __ cmnw($op1$$Register, $op2$$constant);
15023   %}
15024 
15025   ins_pipe(icmp_reg_imm);
15026 %}
15027 
15028 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15029 %{
15030   match(Set cr (OverflowAddL op1 op2));
15031 
15032   format %{ "cmn   $op1, $op2\t# overflow check long" %}
15033   ins_cost(INSN_COST);
15034   ins_encode %{
15035     __ cmn($op1$$Register, $op2$$Register);
15036   %}
15037 
15038   ins_pipe(icmp_reg_reg);
15039 %}
15040 
15041 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
15042 %{
15043   match(Set cr (OverflowAddL op1 op2));
15044 
15045   format %{ "cmn   $op1, $op2\t# overflow check long" %}
15046   ins_cost(INSN_COST);
15047   ins_encode %{
15048     __ cmn($op1$$Register, $op2$$constant);
15049   %}
15050 
15051   ins_pipe(icmp_reg_imm);
15052 %}
15053 
15054 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15055 %{
15056   match(Set cr (OverflowSubI op1 op2));
15057 
15058   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
15059   ins_cost(INSN_COST);
15060   ins_encode %{
15061     __ cmpw($op1$$Register, $op2$$Register);
15062   %}
15063 
15064   ins_pipe(icmp_reg_reg);
15065 %}
15066 
15067 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
15068 %{
15069   match(Set cr (OverflowSubI op1 op2));
15070 
15071   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
15072   ins_cost(INSN_COST);
15073   ins_encode %{
15074     __ cmpw($op1$$Register, $op2$$constant);
15075   %}
15076 
15077   ins_pipe(icmp_reg_imm);
15078 %}
15079 
15080 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15081 %{
15082   match(Set cr (OverflowSubL op1 op2));
15083 
15084   format %{ "cmp   $op1, $op2\t# overflow check long" %}
15085   ins_cost(INSN_COST);
15086   ins_encode %{
15087     __ cmp($op1$$Register, $op2$$Register);
15088   %}
15089 
15090   ins_pipe(icmp_reg_reg);
15091 %}
15092 
15093 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
15094 %{
15095   match(Set cr (OverflowSubL op1 op2));
15096 
15097   format %{ "cmp   $op1, $op2\t# overflow check long" %}
15098   ins_cost(INSN_COST);
15099   ins_encode %{
15100     __ subs(zr, $op1$$Register, $op2$$constant);
15101   %}
15102 
15103   ins_pipe(icmp_reg_imm);
15104 %}
15105 
15106 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
15107 %{
15108   match(Set cr (OverflowSubI zero op1));
15109 
15110   format %{ "cmpw  zr, $op1\t# overflow check int" %}
15111   ins_cost(INSN_COST);
15112   ins_encode %{
15113     __ cmpw(zr, $op1$$Register);
15114   %}
15115 
15116   ins_pipe(icmp_reg_imm);
15117 %}
15118 
15119 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
15120 %{
15121   match(Set cr (OverflowSubL zero op1));
15122 
15123   format %{ "cmp   zr, $op1\t# overflow check long" %}
15124   ins_cost(INSN_COST);
15125   ins_encode %{
15126     __ cmp(zr, $op1$$Register);
15127   %}
15128 
15129   ins_pipe(icmp_reg_imm);
15130 %}
15131 
15132 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15133 %{
15134   match(Set cr (OverflowMulI op1 op2));
15135 
15136   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
15137             "cmp   rscratch1, rscratch1, sxtw\n\t"
15138             "movw  rscratch1, #0x80000000\n\t"
15139             "cselw rscratch1, rscratch1, zr, NE\n\t"
15140             "cmpw  rscratch1, #1" %}
15141   ins_cost(5 * INSN_COST);
15142   ins_encode %{
15143     __ smull(rscratch1, $op1$$Register, $op2$$Register);
15144     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
15145     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
15146     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
15147     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
15148   %}
15149 
15150   ins_pipe(pipe_slow);
15151 %}
15152 
15153 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
15154 %{
15155   match(If cmp (OverflowMulI op1 op2));
15156   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
15157             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
15158   effect(USE labl, KILL cr);
15159 
15160   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
15161             "cmp   rscratch1, rscratch1, sxtw\n\t"
15162             "b$cmp   $labl" %}
15163   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
15164   ins_encode %{
15165     Label* L = $labl$$label;
15166     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15167     __ smull(rscratch1, $op1$$Register, $op2$$Register);
15168     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
15169     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
15170   %}
15171 
15172   ins_pipe(pipe_serial);
15173 %}
15174 
15175 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15176 %{
15177   match(Set cr (OverflowMulL op1 op2));
15178 
15179   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
15180             "smulh rscratch2, $op1, $op2\n\t"
15181             "cmp   rscratch2, rscratch1, ASR #63\n\t"
15182             "movw  rscratch1, #0x80000000\n\t"
15183             "cselw rscratch1, rscratch1, zr, NE\n\t"
15184             "cmpw  rscratch1, #1" %}
15185   ins_cost(6 * INSN_COST);
15186   ins_encode %{
15187     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
15188     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
15189     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
15190     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
15191     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
15192     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
15193   %}
15194 
15195   ins_pipe(pipe_slow);
15196 %}
15197 
15198 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
15199 %{
15200   match(If cmp (OverflowMulL op1 op2));
15201   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
15202             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
15203   effect(USE labl, KILL cr);
15204 
15205   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
15206             "smulh rscratch2, $op1, $op2\n\t"
15207             "cmp   rscratch2, rscratch1, ASR #63\n\t"
15208             "b$cmp $labl" %}
15209   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
15210   ins_encode %{
15211     Label* L = $labl$$label;
15212     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15213     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
15214     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
15215     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
15216     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
15217   %}
15218 
15219   ins_pipe(pipe_serial);
15220 %}
15221 
15222 // ============================================================================
15223 // Compare Instructions
15224 
15225 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
15226 %{
15227   match(Set cr (CmpI op1 op2));
15228 
15229   effect(DEF cr, USE op1, USE op2);
15230 
15231   ins_cost(INSN_COST);
15232   format %{ "cmpw  $op1, $op2" %}
15233 
15234   ins_encode(aarch64_enc_cmpw(op1, op2));
15235 
15236   ins_pipe(icmp_reg_reg);
15237 %}
15238 
15239 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
15240 %{
15241   match(Set cr (CmpI op1 zero));
15242 
15243   effect(DEF cr, USE op1);
15244 
15245   ins_cost(INSN_COST);
15246   format %{ "cmpw $op1, 0" %}
15247 
15248   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
15249 
15250   ins_pipe(icmp_reg_imm);
15251 %}
15252 
15253 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
15254 %{
15255   match(Set cr (CmpI op1 op2));
15256 
15257   effect(DEF cr, USE op1);
15258 
15259   ins_cost(INSN_COST);
15260   format %{ "cmpw  $op1, $op2" %}
15261 
15262   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
15263 
15264   ins_pipe(icmp_reg_imm);
15265 %}
15266 
15267 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
15268 %{
15269   match(Set cr (CmpI op1 op2));
15270 
15271   effect(DEF cr, USE op1);
15272 
15273   ins_cost(INSN_COST * 2);
15274   format %{ "cmpw  $op1, $op2" %}
15275 
15276   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
15277 
15278   ins_pipe(icmp_reg_imm);
15279 %}
15280 
15281 // Unsigned compare Instructions; really, same as signed compare
15282 // except it should only be used to feed an If or a CMovI which takes a
15283 // cmpOpU.
15284 
15285 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
15286 %{
15287   match(Set cr (CmpU op1 op2));
15288 
15289   effect(DEF cr, USE op1, USE op2);
15290 
15291   ins_cost(INSN_COST);
15292   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15293 
15294   ins_encode(aarch64_enc_cmpw(op1, op2));
15295 
15296   ins_pipe(icmp_reg_reg);
15297 %}
15298 
15299 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
15300 %{
15301   match(Set cr (CmpU op1 zero));
15302 
15303   effect(DEF cr, USE op1);
15304 
15305   ins_cost(INSN_COST);
15306   format %{ "cmpw $op1, #0\t# unsigned" %}
15307 
15308   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
15309 
15310   ins_pipe(icmp_reg_imm);
15311 %}
15312 
15313 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
15314 %{
15315   match(Set cr (CmpU op1 op2));
15316 
15317   effect(DEF cr, USE op1);
15318 
15319   ins_cost(INSN_COST);
15320   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15321 
15322   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
15323 
15324   ins_pipe(icmp_reg_imm);
15325 %}
15326 
15327 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
15328 %{
15329   match(Set cr (CmpU op1 op2));
15330 
15331   effect(DEF cr, USE op1);
15332 
15333   ins_cost(INSN_COST * 2);
15334   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15335 
15336   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
15337 
15338   ins_pipe(icmp_reg_imm);
15339 %}
15340 
15341 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15342 %{
15343   match(Set cr (CmpL op1 op2));
15344 
15345   effect(DEF cr, USE op1, USE op2);
15346 
15347   ins_cost(INSN_COST);
15348   format %{ "cmp  $op1, $op2" %}
15349 
15350   ins_encode(aarch64_enc_cmp(op1, op2));
15351 
15352   ins_pipe(icmp_reg_reg);
15353 %}
15354 
15355 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
15356 %{
15357   match(Set cr (CmpL op1 zero));
15358 
15359   effect(DEF cr, USE op1);
15360 
15361   ins_cost(INSN_COST);
15362   format %{ "tst  $op1" %}
15363 
15364   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
15365 
15366   ins_pipe(icmp_reg_imm);
15367 %}
15368 
15369 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
15370 %{
15371   match(Set cr (CmpL op1 op2));
15372 
15373   effect(DEF cr, USE op1);
15374 
15375   ins_cost(INSN_COST);
15376   format %{ "cmp  $op1, $op2" %}
15377 
15378   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15379 
15380   ins_pipe(icmp_reg_imm);
15381 %}
15382 
15383 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
15384 %{
15385   match(Set cr (CmpL op1 op2));
15386 
15387   effect(DEF cr, USE op1);
15388 
15389   ins_cost(INSN_COST * 2);
15390   format %{ "cmp  $op1, $op2" %}
15391 
15392   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15393 
15394   ins_pipe(icmp_reg_imm);
15395 %}
15396 
15397 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
15398 %{
15399   match(Set cr (CmpUL op1 op2));
15400 
15401   effect(DEF cr, USE op1, USE op2);
15402 
15403   ins_cost(INSN_COST);
15404   format %{ "cmp  $op1, $op2" %}
15405 
15406   ins_encode(aarch64_enc_cmp(op1, op2));
15407 
15408   ins_pipe(icmp_reg_reg);
15409 %}
15410 
15411 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
15412 %{
15413   match(Set cr (CmpUL op1 zero));
15414 
15415   effect(DEF cr, USE op1);
15416 
15417   ins_cost(INSN_COST);
15418   format %{ "tst  $op1" %}
15419 
15420   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
15421 
15422   ins_pipe(icmp_reg_imm);
15423 %}
15424 
15425 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
15426 %{
15427   match(Set cr (CmpUL op1 op2));
15428 
15429   effect(DEF cr, USE op1);
15430 
15431   ins_cost(INSN_COST);
15432   format %{ "cmp  $op1, $op2" %}
15433 
15434   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15435 
15436   ins_pipe(icmp_reg_imm);
15437 %}
15438 
15439 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
15440 %{
15441   match(Set cr (CmpUL op1 op2));
15442 
15443   effect(DEF cr, USE op1);
15444 
15445   ins_cost(INSN_COST * 2);
15446   format %{ "cmp  $op1, $op2" %}
15447 
15448   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15449 
15450   ins_pipe(icmp_reg_imm);
15451 %}
15452 
15453 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
15454 %{
15455   match(Set cr (CmpP op1 op2));
15456 
15457   effect(DEF cr, USE op1, USE op2);
15458 
15459   ins_cost(INSN_COST);
15460   format %{ "cmp  $op1, $op2\t // ptr" %}
15461 
15462   ins_encode(aarch64_enc_cmpp(op1, op2));
15463 
15464   ins_pipe(icmp_reg_reg);
15465 %}
15466 
15467 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
15468 %{
15469   match(Set cr (CmpN op1 op2));
15470 
15471   effect(DEF cr, USE op1, USE op2);
15472 
15473   ins_cost(INSN_COST);
15474   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
15475 
15476   ins_encode(aarch64_enc_cmpn(op1, op2));
15477 
15478   ins_pipe(icmp_reg_reg);
15479 %}
15480 
15481 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
15482 %{
15483   match(Set cr (CmpP op1 zero));
15484 
15485   effect(DEF cr, USE op1, USE zero);
15486 
15487   ins_cost(INSN_COST);
15488   format %{ "cmp  $op1, 0\t // ptr" %}
15489 
15490   ins_encode(aarch64_enc_testp(op1));
15491 
15492   ins_pipe(icmp_reg_imm);
15493 %}
15494 
15495 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
15496 %{
15497   match(Set cr (CmpN op1 zero));
15498 
15499   effect(DEF cr, USE op1, USE zero);
15500 
15501   ins_cost(INSN_COST);
15502   format %{ "cmp  $op1, 0\t // compressed ptr" %}
15503 
15504   ins_encode(aarch64_enc_testn(op1));
15505 
15506   ins_pipe(icmp_reg_imm);
15507 %}
15508 
15509 // FP comparisons
15510 //
15511 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
15512 // using normal cmpOp. See declaration of rFlagsReg for details.
15513 
15514 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
15515 %{
15516   match(Set cr (CmpF src1 src2));
15517 
15518   ins_cost(3 * INSN_COST);
15519   format %{ "fcmps $src1, $src2" %}
15520 
15521   ins_encode %{
15522     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15523   %}
15524 
15525   ins_pipe(pipe_class_compare);
15526 %}
15527 
15528 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
15529 %{
15530   match(Set cr (CmpF src1 src2));
15531 
15532   ins_cost(3 * INSN_COST);
15533   format %{ "fcmps $src1, 0.0" %}
15534 
15535   ins_encode %{
15536     __ fcmps(as_FloatRegister($src1$$reg), 0.0);
15537   %}
15538 
15539   ins_pipe(pipe_class_compare);
15540 %}
15541 // FROM HERE
15542 
15543 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
15544 %{
15545   match(Set cr (CmpD src1 src2));
15546 
15547   ins_cost(3 * INSN_COST);
15548   format %{ "fcmpd $src1, $src2" %}
15549 
15550   ins_encode %{
15551     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15552   %}
15553 
15554   ins_pipe(pipe_class_compare);
15555 %}
15556 
15557 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
15558 %{
15559   match(Set cr (CmpD src1 src2));
15560 
15561   ins_cost(3 * INSN_COST);
15562   format %{ "fcmpd $src1, 0.0" %}
15563 
15564   ins_encode %{
15565     __ fcmpd(as_FloatRegister($src1$$reg), 0.0);
15566   %}
15567 
15568   ins_pipe(pipe_class_compare);
15569 %}
15570 
15571 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
15572 %{
15573   match(Set dst (CmpF3 src1 src2));
15574   effect(KILL cr);
15575 
15576   ins_cost(5 * INSN_COST);
15577   format %{ "fcmps $src1, $src2\n\t"
15578             "csinvw($dst, zr, zr, eq\n\t"
15579             "csnegw($dst, $dst, $dst, lt)"
15580   %}
15581 
15582   ins_encode %{
15583     Label done;
15584     FloatRegister s1 = as_FloatRegister($src1$$reg);
15585     FloatRegister s2 = as_FloatRegister($src2$$reg);
15586     Register d = as_Register($dst$$reg);
15587     __ fcmps(s1, s2);
15588     // installs 0 if EQ else -1
15589     __ csinvw(d, zr, zr, Assembler::EQ);
15590     // keeps -1 if less or unordered else installs 1
15591     __ csnegw(d, d, d, Assembler::LT);
15592     __ bind(done);
15593   %}
15594 
15595   ins_pipe(pipe_class_default);
15596 
15597 %}
15598 
15599 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
15600 %{
15601   match(Set dst (CmpD3 src1 src2));
15602   effect(KILL cr);
15603 
15604   ins_cost(5 * INSN_COST);
15605   format %{ "fcmpd $src1, $src2\n\t"
15606             "csinvw($dst, zr, zr, eq\n\t"
15607             "csnegw($dst, $dst, $dst, lt)"
15608   %}
15609 
15610   ins_encode %{
15611     Label done;
15612     FloatRegister s1 = as_FloatRegister($src1$$reg);
15613     FloatRegister s2 = as_FloatRegister($src2$$reg);
15614     Register d = as_Register($dst$$reg);
15615     __ fcmpd(s1, s2);
15616     // installs 0 if EQ else -1
15617     __ csinvw(d, zr, zr, Assembler::EQ);
15618     // keeps -1 if less or unordered else installs 1
15619     __ csnegw(d, d, d, Assembler::LT);
15620     __ bind(done);
15621   %}
15622   ins_pipe(pipe_class_default);
15623 
15624 %}
15625 
15626 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
15627 %{
15628   match(Set dst (CmpF3 src1 zero));
15629   effect(KILL cr);
15630 
15631   ins_cost(5 * INSN_COST);
15632   format %{ "fcmps $src1, 0.0\n\t"
15633             "csinvw($dst, zr, zr, eq\n\t"
15634             "csnegw($dst, $dst, $dst, lt)"
15635   %}
15636 
15637   ins_encode %{
15638     Label done;
15639     FloatRegister s1 = as_FloatRegister($src1$$reg);
15640     Register d = as_Register($dst$$reg);
15641     __ fcmps(s1, 0.0);
15642     // installs 0 if EQ else -1
15643     __ csinvw(d, zr, zr, Assembler::EQ);
15644     // keeps -1 if less or unordered else installs 1
15645     __ csnegw(d, d, d, Assembler::LT);
15646     __ bind(done);
15647   %}
15648 
15649   ins_pipe(pipe_class_default);
15650 
15651 %}
15652 
15653 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
15654 %{
15655   match(Set dst (CmpD3 src1 zero));
15656   effect(KILL cr);
15657 
15658   ins_cost(5 * INSN_COST);
15659   format %{ "fcmpd $src1, 0.0\n\t"
15660             "csinvw($dst, zr, zr, eq\n\t"
15661             "csnegw($dst, $dst, $dst, lt)"
15662   %}
15663 
15664   ins_encode %{
15665     Label done;
15666     FloatRegister s1 = as_FloatRegister($src1$$reg);
15667     Register d = as_Register($dst$$reg);
15668     __ fcmpd(s1, 0.0);
15669     // installs 0 if EQ else -1
15670     __ csinvw(d, zr, zr, Assembler::EQ);
15671     // keeps -1 if less or unordered else installs 1
15672     __ csnegw(d, d, d, Assembler::LT);
15673     __ bind(done);
15674   %}
15675   ins_pipe(pipe_class_default);
15676 
15677 %}
15678 
15679 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
15680 %{
15681   match(Set dst (CmpLTMask p q));
15682   effect(KILL cr);
15683 
15684   ins_cost(3 * INSN_COST);
15685 
15686   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
15687             "csetw $dst, lt\n\t"
15688             "subw $dst, zr, $dst"
15689   %}
15690 
15691   ins_encode %{
15692     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
15693     __ csetw(as_Register($dst$$reg), Assembler::LT);
15694     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
15695   %}
15696 
15697   ins_pipe(ialu_reg_reg);
15698 %}
15699 
15700 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
15701 %{
15702   match(Set dst (CmpLTMask src zero));
15703   effect(KILL cr);
15704 
15705   ins_cost(INSN_COST);
15706 
15707   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
15708 
15709   ins_encode %{
15710     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
15711   %}
15712 
15713   ins_pipe(ialu_reg_shift);
15714 %}
15715 
15716 // ============================================================================
15717 // Max and Min
15718 
15719 instruct cmovI_reg_reg_lt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
15720 %{
15721   effect( DEF dst, USE src1, USE src2, USE cr );
15722 
15723   ins_cost(INSN_COST * 2);
15724   format %{ "cselw $dst, $src1, $src2 lt\t"  %}
15725 
15726   ins_encode %{
15727     __ cselw(as_Register($dst$$reg),
15728              as_Register($src1$$reg),
15729              as_Register($src2$$reg),
15730              Assembler::LT);
15731   %}
15732 
15733   ins_pipe(icond_reg_reg);
15734 %}
15735 
15736 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
15737 %{
15738   match(Set dst (MinI src1 src2));
15739   ins_cost(INSN_COST * 3);
15740 
15741   expand %{
15742     rFlagsReg cr;
15743     compI_reg_reg(cr, src1, src2);
15744     cmovI_reg_reg_lt(dst, src1, src2, cr);
15745   %}
15746 
15747 %}
15748 // FROM HERE
15749 
15750 instruct cmovI_reg_reg_gt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
15751 %{
15752   effect( DEF dst, USE src1, USE src2, USE cr );
15753 
15754   ins_cost(INSN_COST * 2);
15755   format %{ "cselw $dst, $src1, $src2 gt\t"  %}
15756 
15757   ins_encode %{
15758     __ cselw(as_Register($dst$$reg),
15759              as_Register($src1$$reg),
15760              as_Register($src2$$reg),
15761              Assembler::GT);
15762   %}
15763 
15764   ins_pipe(icond_reg_reg);
15765 %}
15766 
15767 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
15768 %{
15769   match(Set dst (MaxI src1 src2));
15770   ins_cost(INSN_COST * 3);
15771   expand %{
15772     rFlagsReg cr;
15773     compI_reg_reg(cr, src1, src2);
15774     cmovI_reg_reg_gt(dst, src1, src2, cr);
15775   %}
15776 %}
15777 
15778 // ============================================================================
15779 // Branch Instructions
15780 
15781 // Direct Branch.
15782 instruct branch(label lbl)
15783 %{
15784   match(Goto);
15785 
15786   effect(USE lbl);
15787 
15788   ins_cost(BRANCH_COST);
15789   format %{ "b  $lbl" %}
15790 
15791   ins_encode(aarch64_enc_b(lbl));
15792 
15793   ins_pipe(pipe_branch);
15794 %}
15795 
15796 // Conditional Near Branch
15797 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
15798 %{
15799   // Same match rule as `branchConFar'.
15800   match(If cmp cr);
15801 
15802   effect(USE lbl);
15803 
15804   ins_cost(BRANCH_COST);
15805   // If set to 1 this indicates that the current instruction is a
15806   // short variant of a long branch. This avoids using this
15807   // instruction in first-pass matching. It will then only be used in
15808   // the `Shorten_branches' pass.
15809   // ins_short_branch(1);
15810   format %{ "b$cmp  $lbl" %}
15811 
15812   ins_encode(aarch64_enc_br_con(cmp, lbl));
15813 
15814   ins_pipe(pipe_branch_cond);
15815 %}
15816 
15817 // Conditional Near Branch Unsigned
15818 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
15819 %{
15820   // Same match rule as `branchConFar'.
15821   match(If cmp cr);
15822 
15823   effect(USE lbl);
15824 
15825   ins_cost(BRANCH_COST);
15826   // If set to 1 this indicates that the current instruction is a
15827   // short variant of a long branch. This avoids using this
15828   // instruction in first-pass matching. It will then only be used in
15829   // the `Shorten_branches' pass.
15830   // ins_short_branch(1);
15831   format %{ "b$cmp  $lbl\t# unsigned" %}
15832 
15833   ins_encode(aarch64_enc_br_conU(cmp, lbl));
15834 
15835   ins_pipe(pipe_branch_cond);
15836 %}
15837 
15838 // Make use of CBZ and CBNZ.  These instructions, as well as being
15839 // shorter than (cmp; branch), have the additional benefit of not
15840 // killing the flags.
15841 
15842 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
15843   match(If cmp (CmpI op1 op2));
15844   effect(USE labl);
15845 
15846   ins_cost(BRANCH_COST);
15847   format %{ "cbw$cmp   $op1, $labl" %}
15848   ins_encode %{
15849     Label* L = $labl$$label;
15850     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15851     if (cond == Assembler::EQ)
15852       __ cbzw($op1$$Register, *L);
15853     else
15854       __ cbnzw($op1$$Register, *L);
15855   %}
15856   ins_pipe(pipe_cmp_branch);
15857 %}
15858 
15859 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
15860   match(If cmp (CmpL op1 op2));
15861   effect(USE labl);
15862 
15863   ins_cost(BRANCH_COST);
15864   format %{ "cb$cmp   $op1, $labl" %}
15865   ins_encode %{
15866     Label* L = $labl$$label;
15867     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15868     if (cond == Assembler::EQ)
15869       __ cbz($op1$$Register, *L);
15870     else
15871       __ cbnz($op1$$Register, *L);
15872   %}
15873   ins_pipe(pipe_cmp_branch);
15874 %}
15875 
15876 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
15877   match(If cmp (CmpP op1 op2));
15878   effect(USE labl);
15879 
15880   ins_cost(BRANCH_COST);
15881   format %{ "cb$cmp   $op1, $labl" %}
15882   ins_encode %{
15883     Label* L = $labl$$label;
15884     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15885     if (cond == Assembler::EQ)
15886       __ cbz($op1$$Register, *L);
15887     else
15888       __ cbnz($op1$$Register, *L);
15889   %}
15890   ins_pipe(pipe_cmp_branch);
15891 %}
15892 
15893 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
15894   match(If cmp (CmpN op1 op2));
15895   effect(USE labl);
15896 
15897   ins_cost(BRANCH_COST);
15898   format %{ "cbw$cmp   $op1, $labl" %}
15899   ins_encode %{
15900     Label* L = $labl$$label;
15901     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15902     if (cond == Assembler::EQ)
15903       __ cbzw($op1$$Register, *L);
15904     else
15905       __ cbnzw($op1$$Register, *L);
15906   %}
15907   ins_pipe(pipe_cmp_branch);
15908 %}
15909 
15910 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
15911   match(If cmp (CmpP (DecodeN oop) zero));
15912   effect(USE labl);
15913 
15914   ins_cost(BRANCH_COST);
15915   format %{ "cb$cmp   $oop, $labl" %}
15916   ins_encode %{
15917     Label* L = $labl$$label;
15918     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15919     if (cond == Assembler::EQ)
15920       __ cbzw($oop$$Register, *L);
15921     else
15922       __ cbnzw($oop$$Register, *L);
15923   %}
15924   ins_pipe(pipe_cmp_branch);
15925 %}
15926 
15927 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
15928   match(If cmp (CmpU op1 op2));
15929   effect(USE labl);
15930 
15931   ins_cost(BRANCH_COST);
15932   format %{ "cbw$cmp   $op1, $labl" %}
15933   ins_encode %{
15934     Label* L = $labl$$label;
15935     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15936     if (cond == Assembler::EQ || cond == Assembler::LS)
15937       __ cbzw($op1$$Register, *L);
15938     else
15939       __ cbnzw($op1$$Register, *L);
15940   %}
15941   ins_pipe(pipe_cmp_branch);
15942 %}
15943 
15944 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
15945   match(If cmp (CmpUL op1 op2));
15946   effect(USE labl);
15947 
15948   ins_cost(BRANCH_COST);
15949   format %{ "cb$cmp   $op1, $labl" %}
15950   ins_encode %{
15951     Label* L = $labl$$label;
15952     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15953     if (cond == Assembler::EQ || cond == Assembler::LS)
15954       __ cbz($op1$$Register, *L);
15955     else
15956       __ cbnz($op1$$Register, *L);
15957   %}
15958   ins_pipe(pipe_cmp_branch);
15959 %}
15960 
15961 // Test bit and Branch
15962 
15963 // Patterns for short (< 32KiB) variants
15964 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
15965   match(If cmp (CmpL op1 op2));
15966   effect(USE labl);
15967 
15968   ins_cost(BRANCH_COST);
15969   format %{ "cb$cmp   $op1, $labl # long" %}
15970   ins_encode %{
15971     Label* L = $labl$$label;
15972     Assembler::Condition cond =
15973       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15974     __ tbr(cond, $op1$$Register, 63, *L);
15975   %}
15976   ins_pipe(pipe_cmp_branch);
15977   ins_short_branch(1);
15978 %}
15979 
15980 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15981   match(If cmp (CmpI op1 op2));
15982   effect(USE labl);
15983 
15984   ins_cost(BRANCH_COST);
15985   format %{ "cb$cmp   $op1, $labl # int" %}
15986   ins_encode %{
15987     Label* L = $labl$$label;
15988     Assembler::Condition cond =
15989       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15990     __ tbr(cond, $op1$$Register, 31, *L);
15991   %}
15992   ins_pipe(pipe_cmp_branch);
15993   ins_short_branch(1);
15994 %}
15995 
15996 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
15997   match(If cmp (CmpL (AndL op1 op2) op3));
15998   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
15999   effect(USE labl);
16000 
16001   ins_cost(BRANCH_COST);
16002   format %{ "tb$cmp   $op1, $op2, $labl" %}
16003   ins_encode %{
16004     Label* L = $labl$$label;
16005     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16006     int bit = exact_log2_long($op2$$constant);
16007     __ tbr(cond, $op1$$Register, bit, *L);
16008   %}
16009   ins_pipe(pipe_cmp_branch);
16010   ins_short_branch(1);
16011 %}
16012 
16013 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
16014   match(If cmp (CmpI (AndI op1 op2) op3));
16015   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
16016   effect(USE labl);
16017 
16018   ins_cost(BRANCH_COST);
16019   format %{ "tb$cmp   $op1, $op2, $labl" %}
16020   ins_encode %{
16021     Label* L = $labl$$label;
16022     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16023     int bit = exact_log2((juint)$op2$$constant);
16024     __ tbr(cond, $op1$$Register, bit, *L);
16025   %}
16026   ins_pipe(pipe_cmp_branch);
16027   ins_short_branch(1);
16028 %}
16029 
16030 // And far variants
16031 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
16032   match(If cmp (CmpL op1 op2));
16033   effect(USE labl);
16034 
16035   ins_cost(BRANCH_COST);
16036   format %{ "cb$cmp   $op1, $labl # long" %}
16037   ins_encode %{
16038     Label* L = $labl$$label;
16039     Assembler::Condition cond =
16040       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16041     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
16042   %}
16043   ins_pipe(pipe_cmp_branch);
16044 %}
16045 
16046 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
16047   match(If cmp (CmpI op1 op2));
16048   effect(USE labl);
16049 
16050   ins_cost(BRANCH_COST);
16051   format %{ "cb$cmp   $op1, $labl # int" %}
16052   ins_encode %{
16053     Label* L = $labl$$label;
16054     Assembler::Condition cond =
16055       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16056     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
16057   %}
16058   ins_pipe(pipe_cmp_branch);
16059 %}
16060 
16061 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
16062   match(If cmp (CmpL (AndL op1 op2) op3));
16063   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
16064   effect(USE labl);
16065 
16066   ins_cost(BRANCH_COST);
16067   format %{ "tb$cmp   $op1, $op2, $labl" %}
16068   ins_encode %{
16069     Label* L = $labl$$label;
16070     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16071     int bit = exact_log2_long($op2$$constant);
16072     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
16073   %}
16074   ins_pipe(pipe_cmp_branch);
16075 %}
16076 
16077 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
16078   match(If cmp (CmpI (AndI op1 op2) op3));
16079   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
16080   effect(USE labl);
16081 
16082   ins_cost(BRANCH_COST);
16083   format %{ "tb$cmp   $op1, $op2, $labl" %}
16084   ins_encode %{
16085     Label* L = $labl$$label;
16086     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16087     int bit = exact_log2((juint)$op2$$constant);
16088     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
16089   %}
16090   ins_pipe(pipe_cmp_branch);
16091 %}
16092 
16093 // Test bits
16094 
16095 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
16096   match(Set cr (CmpL (AndL op1 op2) op3));
16097   predicate(Assembler::operand_valid_for_logical_immediate
16098             (/*is_32*/false, n->in(1)->in(2)->get_long()));
16099 
16100   ins_cost(INSN_COST);
16101   format %{ "tst $op1, $op2 # long" %}
16102   ins_encode %{
16103     __ tst($op1$$Register, $op2$$constant);
16104   %}
16105   ins_pipe(ialu_reg_reg);
16106 %}
16107 
16108 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
16109   match(Set cr (CmpI (AndI op1 op2) op3));
16110   predicate(Assembler::operand_valid_for_logical_immediate
16111             (/*is_32*/true, n->in(1)->in(2)->get_int()));
16112 
16113   ins_cost(INSN_COST);
16114   format %{ "tst $op1, $op2 # int" %}
16115   ins_encode %{
16116     __ tstw($op1$$Register, $op2$$constant);
16117   %}
16118   ins_pipe(ialu_reg_reg);
16119 %}
16120 
16121 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
16122   match(Set cr (CmpL (AndL op1 op2) op3));
16123 
16124   ins_cost(INSN_COST);
16125   format %{ "tst $op1, $op2 # long" %}
16126   ins_encode %{
16127     __ tst($op1$$Register, $op2$$Register);
16128   %}
16129   ins_pipe(ialu_reg_reg);
16130 %}
16131 
16132 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
16133   match(Set cr (CmpI (AndI op1 op2) op3));
16134 
16135   ins_cost(INSN_COST);
16136   format %{ "tstw $op1, $op2 # int" %}
16137   ins_encode %{
16138     __ tstw($op1$$Register, $op2$$Register);
16139   %}
16140   ins_pipe(ialu_reg_reg);
16141 %}
16142 
16143 
16144 // Conditional Far Branch
16145 // Conditional Far Branch Unsigned
16146 // TODO: fixme
16147 
16148 // counted loop end branch near
16149 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
16150 %{
16151   match(CountedLoopEnd cmp cr);
16152 
16153   effect(USE lbl);
16154 
16155   ins_cost(BRANCH_COST);
16156   // short variant.
16157   // ins_short_branch(1);
16158   format %{ "b$cmp $lbl \t// counted loop end" %}
16159 
16160   ins_encode(aarch64_enc_br_con(cmp, lbl));
16161 
16162   ins_pipe(pipe_branch);
16163 %}
16164 
16165 // counted loop end branch near Unsigned
16166 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
16167 %{
16168   match(CountedLoopEnd cmp cr);
16169 
16170   effect(USE lbl);
16171 
16172   ins_cost(BRANCH_COST);
16173   // short variant.
16174   // ins_short_branch(1);
16175   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
16176 
16177   ins_encode(aarch64_enc_br_conU(cmp, lbl));
16178 
16179   ins_pipe(pipe_branch);
16180 %}
16181 
16182 // counted loop end branch far
16183 // counted loop end branch far unsigned
16184 // TODO: fixme
16185 
16186 // ============================================================================
16187 // inlined locking and unlocking
16188 
16189 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
16190 %{
16191   match(Set cr (FastLock object box));
16192   effect(TEMP tmp, TEMP tmp2);
16193 
16194   // TODO
16195   // identify correct cost
16196   ins_cost(5 * INSN_COST);
16197   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
16198 
16199   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
16200 
16201   ins_pipe(pipe_serial);
16202 %}
16203 
16204 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
16205 %{
16206   match(Set cr (FastUnlock object box));
16207   effect(TEMP tmp, TEMP tmp2);
16208 
16209   ins_cost(5 * INSN_COST);
16210   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
16211 
16212   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
16213 
16214   ins_pipe(pipe_serial);
16215 %}
16216 
16217 
16218 // ============================================================================
16219 // Safepoint Instructions
16220 
16221 // TODO
16222 // provide a near and far version of this code
16223 
16224 instruct safePoint(rFlagsReg cr, iRegP poll)
16225 %{
16226   match(SafePoint poll);
16227   effect(KILL cr);
16228 
16229   format %{
16230     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
16231   %}
16232   ins_encode %{
16233     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
16234   %}
16235   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
16236 %}
16237 
16238 
16239 // ============================================================================
16240 // Procedure Call/Return Instructions
16241 
16242 // Call Java Static Instruction
16243 
16244 instruct CallStaticJavaDirect(method meth)
16245 %{
16246   match(CallStaticJava);
16247 
16248   effect(USE meth);
16249 
16250   ins_cost(CALL_COST);
16251 
16252   format %{ "call,static $meth \t// ==> " %}
16253 
16254   ins_encode( aarch64_enc_java_static_call(meth),
16255               aarch64_enc_call_epilog );
16256 
16257   ins_pipe(pipe_class_call);
16258 %}
16259 
16260 // TO HERE
16261 
16262 // Call Java Dynamic Instruction
16263 instruct CallDynamicJavaDirect(method meth)
16264 %{
16265   match(CallDynamicJava);
16266 
16267   effect(USE meth);
16268 
16269   ins_cost(CALL_COST);
16270 
16271   format %{ "CALL,dynamic $meth \t// ==> " %}
16272 
16273   ins_encode( aarch64_enc_java_dynamic_call(meth),
16274                aarch64_enc_call_epilog );
16275 
16276   ins_pipe(pipe_class_call);
16277 %}
16278 
16279 // Call Runtime Instruction
16280 
16281 instruct CallRuntimeDirect(method meth)
16282 %{
16283   match(CallRuntime);
16284 
16285   effect(USE meth);
16286 
16287   ins_cost(CALL_COST);
16288 
16289   format %{ "CALL, runtime $meth" %}
16290 
16291   ins_encode( aarch64_enc_java_to_runtime(meth) );
16292 
16293   ins_pipe(pipe_class_call);
16294 %}
16295 
16296 // Call Runtime Instruction
16297 
16298 instruct CallLeafDirect(method meth)
16299 %{
16300   match(CallLeaf);
16301 
16302   effect(USE meth);
16303 
16304   ins_cost(CALL_COST);
16305 
16306   format %{ "CALL, runtime leaf $meth" %}
16307 
16308   ins_encode( aarch64_enc_java_to_runtime(meth) );
16309 
16310   ins_pipe(pipe_class_call);
16311 %}
16312 
16313 // Call Runtime Instruction
16314 
16315 instruct CallLeafNoFPDirect(method meth)
16316 %{
16317   match(CallLeafNoFP);
16318 
16319   effect(USE meth);
16320 
16321   ins_cost(CALL_COST);
16322 
16323   format %{ "CALL, runtime leaf nofp $meth" %}
16324 
16325   ins_encode( aarch64_enc_java_to_runtime(meth) );
16326 
16327   ins_pipe(pipe_class_call);
16328 %}
16329 
16330 // Tail Call; Jump from runtime stub to Java code.
16331 // Also known as an 'interprocedural jump'.
16332 // Target of jump will eventually return to caller.
16333 // TailJump below removes the return address.
16334 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
16335 %{
16336   match(TailCall jump_target method_oop);
16337 
16338   ins_cost(CALL_COST);
16339 
16340   format %{ "br $jump_target\t# $method_oop holds method oop" %}
16341 
16342   ins_encode(aarch64_enc_tail_call(jump_target));
16343 
16344   ins_pipe(pipe_class_call);
16345 %}
16346 
16347 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
16348 %{
16349   match(TailJump jump_target ex_oop);
16350 
16351   ins_cost(CALL_COST);
16352 
16353   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
16354 
16355   ins_encode(aarch64_enc_tail_jmp(jump_target));
16356 
16357   ins_pipe(pipe_class_call);
16358 %}
16359 
16360 // Create exception oop: created by stack-crawling runtime code.
16361 // Created exception is now available to this handler, and is setup
16362 // just prior to jumping to this handler. No code emitted.
16363 // TODO check
16364 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
16365 instruct CreateException(iRegP_R0 ex_oop)
16366 %{
16367   match(Set ex_oop (CreateEx));
16368 
16369   format %{ " -- \t// exception oop; no code emitted" %}
16370 
16371   size(0);
16372 
16373   ins_encode( /*empty*/ );
16374 
16375   ins_pipe(pipe_class_empty);
16376 %}
16377 
16378 // Rethrow exception: The exception oop will come in the first
16379 // argument position. Then JUMP (not call) to the rethrow stub code.
16380 instruct RethrowException() %{
16381   match(Rethrow);
16382   ins_cost(CALL_COST);
16383 
16384   format %{ "b rethrow_stub" %}
16385 
16386   ins_encode( aarch64_enc_rethrow() );
16387 
16388   ins_pipe(pipe_class_call);
16389 %}
16390 
16391 
16392 // Return Instruction
16393 // epilog node loads ret address into lr as part of frame pop
16394 instruct Ret()
16395 %{
16396   match(Return);
16397 
16398   format %{ "ret\t// return register" %}
16399 
16400   ins_encode( aarch64_enc_ret() );
16401 
16402   ins_pipe(pipe_branch);
16403 %}
16404 
16405 // Die now.
16406 instruct ShouldNotReachHere() %{
16407   match(Halt);
16408 
16409   ins_cost(CALL_COST);
16410   format %{ "ShouldNotReachHere" %}
16411 
16412   ins_encode %{
16413     // +1 so NativeInstruction::is_sigill_zombie_not_entrant() doesn't
16414     // return true
16415     __ dpcs1(0xdead + 1);
16416   %}
16417 
16418   ins_pipe(pipe_class_default);
16419 %}
16420 
16421 // ============================================================================
16422 // Partial Subtype Check
16423 //
16424 // superklass array for an instance of the superklass.  Set a hidden
16425 // internal cache on a hit (cache is checked with exposed code in
16426 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
16427 // encoding ALSO sets flags.
16428 
16429 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
16430 %{
16431   match(Set result (PartialSubtypeCheck sub super));
16432   effect(KILL cr, KILL temp);
16433 
16434   ins_cost(1100);  // slightly larger than the next version
16435   format %{ "partialSubtypeCheck $result, $sub, $super" %}
16436 
16437   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
16438 
16439   opcode(0x1); // Force zero of result reg on hit
16440 
16441   ins_pipe(pipe_class_memory);
16442 %}
16443 
16444 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
16445 %{
16446   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
16447   effect(KILL temp, KILL result);
16448 
16449   ins_cost(1100);  // slightly larger than the next version
16450   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
16451 
16452   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
16453 
16454   opcode(0x0); // Don't zero result reg on hit
16455 
16456   ins_pipe(pipe_class_memory);
16457 %}
16458 
16459 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16460                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16461 %{
16462   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
16463   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16464   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16465 
16466   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16467   ins_encode %{
16468     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16469     __ string_compare($str1$$Register, $str2$$Register,
16470                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16471                       $tmp1$$Register, $tmp2$$Register,
16472                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::UU);
16473   %}
16474   ins_pipe(pipe_class_memory);
16475 %}
16476 
16477 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16478                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16479 %{
16480   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
16481   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16482   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16483 
16484   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16485   ins_encode %{
16486     __ string_compare($str1$$Register, $str2$$Register,
16487                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16488                       $tmp1$$Register, $tmp2$$Register,
16489                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::LL);
16490   %}
16491   ins_pipe(pipe_class_memory);
16492 %}
16493 
16494 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16495                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16496                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16497 %{
16498   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
16499   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16500   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16501          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16502 
16503   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16504   ins_encode %{
16505     __ string_compare($str1$$Register, $str2$$Register,
16506                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16507                       $tmp1$$Register, $tmp2$$Register,
16508                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16509                       $vtmp3$$FloatRegister, StrIntrinsicNode::UL);
16510   %}
16511   ins_pipe(pipe_class_memory);
16512 %}
16513 
16514 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16515                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16516                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16517 %{
16518   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
16519   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16520   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16521          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16522 
16523   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16524   ins_encode %{
16525     __ string_compare($str1$$Register, $str2$$Register,
16526                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16527                       $tmp1$$Register, $tmp2$$Register,
16528                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16529                       $vtmp3$$FloatRegister,StrIntrinsicNode::LU);
16530   %}
16531   ins_pipe(pipe_class_memory);
16532 %}
16533 
16534 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16535        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
16536        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
16537 %{
16538   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16539   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16540   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16541          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
16542   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
16543 
16544   ins_encode %{
16545     __ string_indexof($str1$$Register, $str2$$Register,
16546                       $cnt1$$Register, $cnt2$$Register,
16547                       $tmp1$$Register, $tmp2$$Register,
16548                       $tmp3$$Register, $tmp4$$Register,
16549                       $tmp5$$Register, $tmp6$$Register,
16550                       -1, $result$$Register, StrIntrinsicNode::UU);
16551   %}
16552   ins_pipe(pipe_class_memory);
16553 %}
16554 
16555 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16556        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
16557        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
16558 %{
16559   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16560   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16561   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16562          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
16563   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
16564 
16565   ins_encode %{
16566     __ string_indexof($str1$$Register, $str2$$Register,
16567                       $cnt1$$Register, $cnt2$$Register,
16568                       $tmp1$$Register, $tmp2$$Register,
16569                       $tmp3$$Register, $tmp4$$Register,
16570                       $tmp5$$Register, $tmp6$$Register,
16571                       -1, $result$$Register, StrIntrinsicNode::LL);
16572   %}
16573   ins_pipe(pipe_class_memory);
16574 %}
16575 
16576 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16577        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
16578        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
16579 %{
16580   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16581   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16582   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16583          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
16584   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
16585 
16586   ins_encode %{
16587     __ string_indexof($str1$$Register, $str2$$Register,
16588                       $cnt1$$Register, $cnt2$$Register,
16589                       $tmp1$$Register, $tmp2$$Register,
16590                       $tmp3$$Register, $tmp4$$Register,
16591                       $tmp5$$Register, $tmp6$$Register,
16592                       -1, $result$$Register, StrIntrinsicNode::UL);
16593   %}
16594   ins_pipe(pipe_class_memory);
16595 %}
16596 
16597 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16598                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16599                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16600 %{
16601   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16602   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16603   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16604          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16605   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
16606 
16607   ins_encode %{
16608     int icnt2 = (int)$int_cnt2$$constant;
16609     __ string_indexof($str1$$Register, $str2$$Register,
16610                       $cnt1$$Register, zr,
16611                       $tmp1$$Register, $tmp2$$Register,
16612                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16613                       icnt2, $result$$Register, StrIntrinsicNode::UU);
16614   %}
16615   ins_pipe(pipe_class_memory);
16616 %}
16617 
16618 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16619                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16620                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16621 %{
16622   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16623   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16624   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16625          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16626   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
16627 
16628   ins_encode %{
16629     int icnt2 = (int)$int_cnt2$$constant;
16630     __ string_indexof($str1$$Register, $str2$$Register,
16631                       $cnt1$$Register, zr,
16632                       $tmp1$$Register, $tmp2$$Register,
16633                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16634                       icnt2, $result$$Register, StrIntrinsicNode::LL);
16635   %}
16636   ins_pipe(pipe_class_memory);
16637 %}
16638 
16639 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16640                  immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16641                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16642 %{
16643   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16644   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16645   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16646          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16647   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
16648 
16649   ins_encode %{
16650     int icnt2 = (int)$int_cnt2$$constant;
16651     __ string_indexof($str1$$Register, $str2$$Register,
16652                       $cnt1$$Register, zr,
16653                       $tmp1$$Register, $tmp2$$Register,
16654                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16655                       icnt2, $result$$Register, StrIntrinsicNode::UL);
16656   %}
16657   ins_pipe(pipe_class_memory);
16658 %}
16659 
16660 instruct string_indexofU_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16661                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16662                               iRegINoSp tmp3, rFlagsReg cr)
16663 %{
16664   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
16665   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
16666          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16667 
16668   format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result" %}
16669 
16670   ins_encode %{
16671     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
16672                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
16673                            $tmp3$$Register);
16674   %}
16675   ins_pipe(pipe_class_memory);
16676 %}
16677 
16678 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
16679                         iRegI_R0 result, rFlagsReg cr)
16680 %{
16681   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
16682   match(Set result (StrEquals (Binary str1 str2) cnt));
16683   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
16684 
16685   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
16686   ins_encode %{
16687     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16688     __ string_equals($str1$$Register, $str2$$Register,
16689                      $result$$Register, $cnt$$Register, 1);
16690   %}
16691   ins_pipe(pipe_class_memory);
16692 %}
16693 
16694 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
16695                         iRegI_R0 result, rFlagsReg cr)
16696 %{
16697   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
16698   match(Set result (StrEquals (Binary str1 str2) cnt));
16699   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
16700 
16701   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
16702   ins_encode %{
16703     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16704     __ string_equals($str1$$Register, $str2$$Register,
16705                      $result$$Register, $cnt$$Register, 2);
16706   %}
16707   ins_pipe(pipe_class_memory);
16708 %}
16709 
16710 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
16711                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
16712                        iRegP_R10 tmp, rFlagsReg cr)
16713 %{
16714   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
16715   match(Set result (AryEq ary1 ary2));
16716   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16717 
16718   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
16719   ins_encode %{
16720     __ arrays_equals($ary1$$Register, $ary2$$Register,
16721                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
16722                      $result$$Register, $tmp$$Register, 1);
16723     %}
16724   ins_pipe(pipe_class_memory);
16725 %}
16726 
16727 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
16728                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
16729                        iRegP_R10 tmp, rFlagsReg cr)
16730 %{
16731   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
16732   match(Set result (AryEq ary1 ary2));
16733   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16734 
16735   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
16736   ins_encode %{
16737     __ arrays_equals($ary1$$Register, $ary2$$Register,
16738                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
16739                      $result$$Register, $tmp$$Register, 2);
16740   %}
16741   ins_pipe(pipe_class_memory);
16742 %}
16743 
16744 instruct has_negatives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
16745 %{
16746   match(Set result (HasNegatives ary1 len));
16747   effect(USE_KILL ary1, USE_KILL len, KILL cr);
16748   format %{ "has negatives byte[] $ary1,$len -> $result" %}
16749   ins_encode %{
16750     __ has_negatives($ary1$$Register, $len$$Register, $result$$Register);
16751   %}
16752   ins_pipe( pipe_slow );
16753 %}
16754 
16755 // fast char[] to byte[] compression
16756 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
16757                          vRegD_V0 tmp1, vRegD_V1 tmp2,
16758                          vRegD_V2 tmp3, vRegD_V3 tmp4,
16759                          iRegI_R0 result, rFlagsReg cr)
16760 %{
16761   match(Set result (StrCompressedCopy src (Binary dst len)));
16762   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
16763 
16764   format %{ "String Compress $src,$dst -> $result    // KILL R1, R2, R3, R4" %}
16765   ins_encode %{
16766     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
16767                            $tmp1$$FloatRegister, $tmp2$$FloatRegister,
16768                            $tmp3$$FloatRegister, $tmp4$$FloatRegister,
16769                            $result$$Register);
16770   %}
16771   ins_pipe( pipe_slow );
16772 %}
16773 
16774 // fast byte[] to char[] inflation
16775 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len,
16776                         vRegD_V0 tmp1, vRegD_V1 tmp2, vRegD_V2 tmp3, iRegP_R3 tmp4, rFlagsReg cr)
16777 %{
16778   match(Set dummy (StrInflatedCopy src (Binary dst len)));
16779   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
16780 
16781   format %{ "String Inflate $src,$dst    // KILL $tmp1, $tmp2" %}
16782   ins_encode %{
16783     __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
16784                           $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister, $tmp4$$Register);
16785   %}
16786   ins_pipe(pipe_class_memory);
16787 %}
16788 
16789 // encode char[] to byte[] in ISO_8859_1
16790 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
16791                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
16792                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
16793                           iRegI_R0 result, rFlagsReg cr)
16794 %{
16795   match(Set result (EncodeISOArray src (Binary dst len)));
16796   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
16797          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
16798 
16799   format %{ "Encode array $src,$dst,$len -> $result" %}
16800   ins_encode %{
16801     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
16802          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
16803          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
16804   %}
16805   ins_pipe( pipe_class_memory );
16806 %}
16807 
16808 // ============================================================================
16809 // This name is KNOWN by the ADLC and cannot be changed.
16810 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
16811 // for this guy.
16812 instruct tlsLoadP(thread_RegP dst)
16813 %{
16814   match(Set dst (ThreadLocal));
16815 
16816   ins_cost(0);
16817 
16818   format %{ " -- \t// $dst=Thread::current(), empty" %}
16819 
16820   size(0);
16821 
16822   ins_encode( /*empty*/ );
16823 
16824   ins_pipe(pipe_class_empty);
16825 %}
16826 
16827 // ====================VECTOR INSTRUCTIONS=====================================
16828 
16829 instruct reinterpretD(vecD dst) %{
16830   predicate(n->bottom_type()->is_vect()->length_in_bytes() == 8 &&
16831             n->in(1)->bottom_type()->is_vect()->length_in_bytes() == 8);
16832   match(Set dst (VectorReinterpret dst));
16833   ins_cost(0);
16834   format %{ " # reinterpret $dst" %}
16835   ins_encode %{
16836     // empty
16837   %}
16838   ins_pipe(pipe_class_empty);
16839 %}
16840 
16841 instruct reinterpretD2X(vecX dst, vecD src) %{
16842   predicate(n->bottom_type()->is_vect()->length_in_bytes() == 16 &&
16843             n->in(1)->bottom_type()->is_vect()->length_in_bytes() == 8);
16844   match(Set dst (VectorReinterpret src));
16845   ins_cost(INSN_COST);
16846   format %{ " # reinterpret $dst,$src" %}
16847   ins_encode %{
16848     if (as_FloatRegister($dst$$reg) != as_FloatRegister($src$$reg)) {
16849       __ orr(as_FloatRegister($dst$$reg), __ T8B,
16850              as_FloatRegister($src$$reg),
16851              as_FloatRegister($src$$reg));
16852     }
16853   %}
16854   ins_pipe(vlogical64);
16855 %}
16856 
16857 instruct reinterpretX2D(vecD dst, vecX src) %{
16858   predicate(n->bottom_type()->is_vect()->length_in_bytes() == 8 &&
16859             n->in(1)->bottom_type()->is_vect()->length_in_bytes() == 16);
16860   match(Set dst (VectorReinterpret src));
16861   ins_cost(INSN_COST);
16862   format %{ " # reinterpret $dst,$src" %}
16863   ins_encode %{
16864     // If register is the same, then move is not needed.
16865     if (as_FloatRegister($dst$$reg) != as_FloatRegister($src$$reg)) {
16866       __ orr(as_FloatRegister($dst$$reg), __ T8B,
16867              as_FloatRegister($src$$reg),
16868              as_FloatRegister($src$$reg));
16869     }
16870   %}
16871   ins_pipe(vlogical64);
16872 %}
16873 
16874 instruct reinterpretX(vecX dst) %{
16875   predicate(n->bottom_type()->is_vect()->length_in_bytes() == 16 &&
16876             n->in(1)->bottom_type()->is_vect()->length_in_bytes() == 16);
16877   match(Set dst (VectorReinterpret dst));
16878   ins_cost(0);
16879   format %{ " # reinterpret $dst" %}
16880   ins_encode %{
16881     // empty
16882   %}
16883   ins_pipe(pipe_class_empty);
16884 %}
16885 
16886 // Load vector (16 bits)
16887 instruct loadV2(vecD dst, memory mem)
16888 %{
16889   predicate(n->as_LoadVector()->memory_size() == 2);
16890   match(Set dst (LoadVector mem));
16891   ins_cost(4 * INSN_COST);
16892   format %{ "ldrh   $dst,$mem\t# vector (16 bits)" %}
16893   ins_encode( aarch64_enc_ldrvH(dst, mem) );
16894   ins_pipe(vload_reg_mem64);
16895 %}
16896 
16897 // Load vector (32 bits)
16898 instruct loadV4(vecD dst, vmem4 mem)
16899 %{
16900   predicate(n->as_LoadVector()->memory_size() == 4);
16901   match(Set dst (LoadVector mem));
16902   ins_cost(4 * INSN_COST);
16903   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
16904   ins_encode( aarch64_enc_ldrvS(dst, mem) );
16905   ins_pipe(vload_reg_mem64);
16906 %}
16907 
16908 // Load vector (64 bits)
16909 instruct loadV8(vecD dst, vmem8 mem)
16910 %{
16911   predicate(n->as_LoadVector()->memory_size() == 8);
16912   match(Set dst (LoadVector mem));
16913   ins_cost(4 * INSN_COST);
16914   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
16915   ins_encode( aarch64_enc_ldrvD(dst, mem) );
16916   ins_pipe(vload_reg_mem64);
16917 %}
16918 
16919 // Load Vector (128 bits)
16920 instruct loadV16(vecX dst, vmem16 mem)
16921 %{
16922   predicate(n->as_LoadVector()->memory_size() == 16);
16923   match(Set dst (LoadVector mem));
16924   ins_cost(4 * INSN_COST);
16925   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
16926   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
16927   ins_pipe(vload_reg_mem128);
16928 %}
16929 
16930 // Store Vector (16 bits)
16931 instruct storeV2(vecD src, memory mem)
16932 %{
16933   predicate(n->as_StoreVector()->memory_size() == 2);
16934   match(Set mem (StoreVector mem src));
16935   ins_cost(4 * INSN_COST);
16936   format %{ "strh   $mem,$src\t# vector (16 bits)" %}
16937   ins_encode( aarch64_enc_strvH(src, mem) );
16938   ins_pipe(vstore_reg_mem64);
16939 %}
16940 
16941 // Store Vector (32 bits)
16942 instruct storeV4(vecD src, vmem4 mem)
16943 %{
16944   predicate(n->as_StoreVector()->memory_size() == 4);
16945   match(Set mem (StoreVector mem src));
16946   ins_cost(4 * INSN_COST);
16947   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
16948   ins_encode( aarch64_enc_strvS(src, mem) );
16949   ins_pipe(vstore_reg_mem64);
16950 %}
16951 
16952 // Store Vector (64 bits)
16953 instruct storeV8(vecD src, vmem8 mem)
16954 %{
16955   predicate(n->as_StoreVector()->memory_size() == 8);
16956   match(Set mem (StoreVector mem src));
16957   ins_cost(4 * INSN_COST);
16958   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
16959   ins_encode( aarch64_enc_strvD(src, mem) );
16960   ins_pipe(vstore_reg_mem64);
16961 %}
16962 
16963 // Store Vector (128 bits)
16964 instruct storeV16(vecX src, vmem16 mem)
16965 %{
16966   predicate(n->as_StoreVector()->memory_size() == 16);
16967   match(Set mem (StoreVector mem src));
16968   ins_cost(4 * INSN_COST);
16969   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
16970   ins_encode( aarch64_enc_strvQ(src, mem) );
16971   ins_pipe(vstore_reg_mem128);
16972 %}
16973 
16974 instruct replicate8B(vecD dst, iRegIorL2I src)
16975 %{
16976   predicate(n->as_Vector()->length() == 4 ||
16977             n->as_Vector()->length() == 8);
16978   match(Set dst (ReplicateB src));
16979   ins_cost(INSN_COST);
16980   format %{ "dup  $dst, $src\t# vector (8B)" %}
16981   ins_encode %{
16982     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
16983   %}
16984   ins_pipe(vdup_reg_reg64);
16985 %}
16986 
16987 instruct replicate16B(vecX dst, iRegIorL2I src)
16988 %{
16989   predicate(n->as_Vector()->length() == 16);
16990   match(Set dst (ReplicateB src));
16991   ins_cost(INSN_COST);
16992   format %{ "dup  $dst, $src\t# vector (16B)" %}
16993   ins_encode %{
16994     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
16995   %}
16996   ins_pipe(vdup_reg_reg128);
16997 %}
16998 
16999 instruct replicate8B_imm(vecD dst, immI con)
17000 %{
17001   predicate(n->as_Vector()->length() == 4 ||
17002             n->as_Vector()->length() == 8);
17003   match(Set dst (ReplicateB con));
17004   ins_cost(INSN_COST);
17005   format %{ "movi  $dst, $con\t# vector(8B)" %}
17006   ins_encode %{
17007     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
17008   %}
17009   ins_pipe(vmovi_reg_imm64);
17010 %}
17011 
17012 instruct replicate16B_imm(vecX dst, immI con)
17013 %{
17014   predicate(n->as_Vector()->length() == 16);
17015   match(Set dst (ReplicateB con));
17016   ins_cost(INSN_COST);
17017   format %{ "movi  $dst, $con\t# vector(16B)" %}
17018   ins_encode %{
17019     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
17020   %}
17021   ins_pipe(vmovi_reg_imm128);
17022 %}
17023 
17024 instruct replicate4S(vecD dst, iRegIorL2I src)
17025 %{
17026   predicate(n->as_Vector()->length() == 2 ||
17027             n->as_Vector()->length() == 4);
17028   match(Set dst (ReplicateS src));
17029   ins_cost(INSN_COST);
17030   format %{ "dup  $dst, $src\t# vector (4S)" %}
17031   ins_encode %{
17032     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
17033   %}
17034   ins_pipe(vdup_reg_reg64);
17035 %}
17036 
17037 instruct replicate8S(vecX dst, iRegIorL2I src)
17038 %{
17039   predicate(n->as_Vector()->length() == 8);
17040   match(Set dst (ReplicateS src));
17041   ins_cost(INSN_COST);
17042   format %{ "dup  $dst, $src\t# vector (8S)" %}
17043   ins_encode %{
17044     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
17045   %}
17046   ins_pipe(vdup_reg_reg128);
17047 %}
17048 
17049 instruct replicate4S_imm(vecD dst, immI con)
17050 %{
17051   predicate(n->as_Vector()->length() == 2 ||
17052             n->as_Vector()->length() == 4);
17053   match(Set dst (ReplicateS con));
17054   ins_cost(INSN_COST);
17055   format %{ "movi  $dst, $con\t# vector(4H)" %}
17056   ins_encode %{
17057     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
17058   %}
17059   ins_pipe(vmovi_reg_imm64);
17060 %}
17061 
17062 instruct replicate8S_imm(vecX dst, immI con)
17063 %{
17064   predicate(n->as_Vector()->length() == 8);
17065   match(Set dst (ReplicateS con));
17066   ins_cost(INSN_COST);
17067   format %{ "movi  $dst, $con\t# vector(8H)" %}
17068   ins_encode %{
17069     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
17070   %}
17071   ins_pipe(vmovi_reg_imm128);
17072 %}
17073 
17074 instruct replicate2I(vecD dst, iRegIorL2I src)
17075 %{
17076   predicate(n->as_Vector()->length() == 2);
17077   match(Set dst (ReplicateI src));
17078   ins_cost(INSN_COST);
17079   format %{ "dup  $dst, $src\t# vector (2I)" %}
17080   ins_encode %{
17081     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
17082   %}
17083   ins_pipe(vdup_reg_reg64);
17084 %}
17085 
17086 instruct replicate4I(vecX dst, iRegIorL2I src)
17087 %{
17088   predicate(n->as_Vector()->length() == 4);
17089   match(Set dst (ReplicateI src));
17090   ins_cost(INSN_COST);
17091   format %{ "dup  $dst, $src\t# vector (4I)" %}
17092   ins_encode %{
17093     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
17094   %}
17095   ins_pipe(vdup_reg_reg128);
17096 %}
17097 
17098 instruct replicate2I_imm(vecD dst, immI con)
17099 %{
17100   predicate(n->as_Vector()->length() == 2);
17101   match(Set dst (ReplicateI con));
17102   ins_cost(INSN_COST);
17103   format %{ "movi  $dst, $con\t# vector(2I)" %}
17104   ins_encode %{
17105     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
17106   %}
17107   ins_pipe(vmovi_reg_imm64);
17108 %}
17109 
17110 instruct replicate4I_imm(vecX dst, immI con)
17111 %{
17112   predicate(n->as_Vector()->length() == 4);
17113   match(Set dst (ReplicateI con));
17114   ins_cost(INSN_COST);
17115   format %{ "movi  $dst, $con\t# vector(4I)" %}
17116   ins_encode %{
17117     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
17118   %}
17119   ins_pipe(vmovi_reg_imm128);
17120 %}
17121 
17122 instruct replicate2L(vecX dst, iRegL src)
17123 %{
17124   predicate(n->as_Vector()->length() == 2);
17125   match(Set dst (ReplicateL src));
17126   ins_cost(INSN_COST);
17127   format %{ "dup  $dst, $src\t# vector (2L)" %}
17128   ins_encode %{
17129     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
17130   %}
17131   ins_pipe(vdup_reg_reg128);
17132 %}
17133 
17134 instruct replicate2L_zero(vecX dst, immI0 zero)
17135 %{
17136   predicate(n->as_Vector()->length() == 2);
17137   match(Set dst (ReplicateI zero));
17138   ins_cost(INSN_COST);
17139   format %{ "movi  $dst, $zero\t# vector(4I)" %}
17140   ins_encode %{
17141     __ eor(as_FloatRegister($dst$$reg), __ T16B,
17142            as_FloatRegister($dst$$reg),
17143            as_FloatRegister($dst$$reg));
17144   %}
17145   ins_pipe(vmovi_reg_imm128);
17146 %}
17147 
17148 instruct replicate2F(vecD dst, vRegF src)
17149 %{
17150   predicate(n->as_Vector()->length() == 2);
17151   match(Set dst (ReplicateF src));
17152   ins_cost(INSN_COST);
17153   format %{ "dup  $dst, $src\t# vector (2F)" %}
17154   ins_encode %{
17155     __ dup(as_FloatRegister($dst$$reg), __ T2S,
17156            as_FloatRegister($src$$reg));
17157   %}
17158   ins_pipe(vdup_reg_freg64);
17159 %}
17160 
17161 instruct replicate4F(vecX dst, vRegF src)
17162 %{
17163   predicate(n->as_Vector()->length() == 4);
17164   match(Set dst (ReplicateF src));
17165   ins_cost(INSN_COST);
17166   format %{ "dup  $dst, $src\t# vector (4F)" %}
17167   ins_encode %{
17168     __ dup(as_FloatRegister($dst$$reg), __ T4S,
17169            as_FloatRegister($src$$reg));
17170   %}
17171   ins_pipe(vdup_reg_freg128);
17172 %}
17173 
17174 instruct replicate2D(vecX dst, vRegD src)
17175 %{
17176   predicate(n->as_Vector()->length() == 2);
17177   match(Set dst (ReplicateD src));
17178   ins_cost(INSN_COST);
17179   format %{ "dup  $dst, $src\t# vector (2D)" %}
17180   ins_encode %{
17181     __ dup(as_FloatRegister($dst$$reg), __ T2D,
17182            as_FloatRegister($src$$reg));
17183   %}
17184   ins_pipe(vdup_reg_dreg128);
17185 %}
17186 
17187 // ====================REDUCTION ARITHMETIC====================================
17188 
17189 instruct reduce_add8B(iRegINoSp dst, iRegIorL2I src1, vecD src2, vecD tmp)
17190 %{
17191   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
17192   match(Set dst (AddReductionVI src1 src2));
17193   ins_cost(INSN_COST);
17194   effect(TEMP_DEF dst, TEMP tmp);
17195   format %{ "addv  $tmp, T8B, $src2\n\t"
17196             "smov  $dst, $tmp, B, 0\n\t"
17197             "addw  $dst, $dst, $src1\n\t"
17198             "sxtb  $dst, $dst\t add reduction8B"
17199   %}
17200   ins_encode %{
17201     __ addv(as_FloatRegister($tmp$$reg), __ T8B, as_FloatRegister($src2$$reg));
17202     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
17203     __ addw($dst$$Register, $dst$$Register, $src1$$Register);
17204     __ sxtb($dst$$Register, $dst$$Register);
17205   %}
17206   ins_pipe(pipe_class_default);
17207 %}
17208 
17209 instruct reduce_add16B(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp)
17210 %{
17211   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
17212   match(Set dst (AddReductionVI src1 src2));
17213   ins_cost(INSN_COST);
17214   effect(TEMP_DEF dst, TEMP tmp);
17215   format %{ "addv  $tmp, T16B, $src2\n\t"
17216             "smov  $dst, $tmp, B, 0\n\t"
17217             "addw  $dst, $dst, $src1\n\t"
17218             "sxtb  $dst, $dst\t add reduction16B"
17219   %}
17220   ins_encode %{
17221     __ addv(as_FloatRegister($tmp$$reg), __ T16B, as_FloatRegister($src2$$reg));
17222     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
17223     __ addw($dst$$Register, $dst$$Register, $src1$$Register);
17224     __ sxtb($dst$$Register, $dst$$Register);
17225   %}
17226   ins_pipe(pipe_class_default);
17227 %}
17228 
17229 instruct reduce_add4S(iRegINoSp dst, iRegIorL2I src1, vecD src2, vecD tmp)
17230 %{
17231   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
17232   match(Set dst (AddReductionVI src1 src2));
17233   ins_cost(INSN_COST);
17234   effect(TEMP_DEF dst, TEMP tmp);
17235   format %{ "addv  $tmp, T4H, $src2\n\t"
17236             "smov  $dst, $tmp, H, 0\n\t"
17237             "addw  $dst, $dst, $src1\n\t"
17238             "sxth  $dst, $dst\t add reduction4S"
17239   %}
17240   ins_encode %{
17241     __ addv(as_FloatRegister($tmp$$reg), __ T4H, as_FloatRegister($src2$$reg));
17242     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ H, 0);
17243     __ addw($dst$$Register, $dst$$Register, $src1$$Register);
17244     __ sxth($dst$$Register, $dst$$Register);
17245   %}
17246   ins_pipe(pipe_class_default);
17247 %}
17248 
17249 instruct reduce_add8S(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp)
17250 %{
17251   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
17252   match(Set dst (AddReductionVI src1 src2));
17253   ins_cost(INSN_COST);
17254   effect(TEMP_DEF dst, TEMP tmp);
17255   format %{ "addv  $tmp, T8H, $src2\n\t"
17256             "smov  $dst, $tmp, H, 0\n\t"
17257             "addw  $dst, $dst, $src1\n\t"
17258             "sxth  $dst, $dst\t add reduction8S"
17259   %}
17260   ins_encode %{
17261     __ addv(as_FloatRegister($tmp$$reg), __ T8H, as_FloatRegister($src2$$reg));
17262     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ H, 0);
17263     __ addw($dst$$Register, $dst$$Register, $src1$$Register);
17264     __ sxth($dst$$Register, $dst$$Register);
17265   %}
17266   ins_pipe(pipe_class_default);
17267 %}
17268 
17269 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I isrc, vecD vsrc, iRegINoSp tmp, iRegINoSp tmp2)
17270 %{
17271   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
17272   match(Set dst (AddReductionVI isrc vsrc));
17273   ins_cost(INSN_COST);
17274   effect(TEMP tmp, TEMP tmp2);
17275   format %{ "umov  $tmp, $vsrc, S, 0\n\t"
17276             "umov  $tmp2, $vsrc, S, 1\n\t"
17277             "addw  $tmp, $isrc, $tmp\n\t"
17278             "addw  $dst, $tmp, $tmp2\t# add reduction2I"
17279   %}
17280   ins_encode %{
17281     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ S, 0);
17282     __ umov($tmp2$$Register, as_FloatRegister($vsrc$$reg), __ S, 1);
17283     __ addw($tmp$$Register, $isrc$$Register, $tmp$$Register);
17284     __ addw($dst$$Register, $tmp$$Register, $tmp2$$Register);
17285   %}
17286   ins_pipe(pipe_class_default);
17287 %}
17288 
17289 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I isrc, vecX vsrc, vecX vtmp, iRegINoSp itmp)
17290 %{
17291   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
17292   match(Set dst (AddReductionVI isrc vsrc));
17293   ins_cost(INSN_COST);
17294   effect(TEMP vtmp, TEMP itmp);
17295   format %{ "addv  $vtmp, T4S, $vsrc\n\t"
17296             "umov  $itmp, $vtmp, S, 0\n\t"
17297             "addw  $dst, $itmp, $isrc\t# add reduction4I"
17298   %}
17299   ins_encode %{
17300     __ addv(as_FloatRegister($vtmp$$reg), __ T4S,
17301             as_FloatRegister($vsrc$$reg));
17302     __ umov($itmp$$Register, as_FloatRegister($vtmp$$reg), __ S, 0);
17303     __ addw($dst$$Register, $itmp$$Register, $isrc$$Register);
17304   %}
17305   ins_pipe(pipe_class_default);
17306 %}
17307 
17308 instruct reduce_mul8B(iRegINoSp dst, iRegIorL2I src1, vecD src2, vecD vtmp1, vecD vtmp2, iRegINoSp itmp)
17309 %{
17310   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
17311   match(Set dst (MulReductionVI src1 src2));
17312   ins_cost(INSN_COST);
17313   effect(TEMP_DEF dst, TEMP vtmp1, TEMP vtmp2, TEMP itmp);
17314   format %{ "ins   $vtmp1, S, $src2, 0, 1\n\t"
17315             "mulv  $vtmp1, T8B, $vtmp1, $src2\n\t"
17316             "ins   $vtmp2, H, $vtmp1, 0, 1\n\t"
17317             "mulv  $vtmp2, T8B, $vtmp2, $vtmp1\n\t"
17318             "umov  $itmp, $vtmp2, B, 0\n\t"
17319             "mulw  $dst, $itmp, $src1\n\t"
17320             "sxtb  $dst, $dst\n\t"
17321             "umov  $itmp, $vtmp2, B, 1\n\t"
17322             "mulw  $dst, $itmp, $dst\n\t"
17323             "sxtb  $dst, $dst\t mul reduction8B"
17324   %}
17325   ins_encode %{
17326     __ ins(as_FloatRegister($vtmp1$$reg), __ S,
17327            as_FloatRegister($src2$$reg), 0, 1);
17328     __ mulv(as_FloatRegister($vtmp1$$reg), __ T8B,
17329             as_FloatRegister($vtmp1$$reg), as_FloatRegister($src2$$reg));
17330     __ ins(as_FloatRegister($vtmp2$$reg), __ H,
17331            as_FloatRegister($vtmp1$$reg), 0, 1);
17332     __ mulv(as_FloatRegister($vtmp2$$reg), __ T8B,
17333             as_FloatRegister($vtmp2$$reg), as_FloatRegister($vtmp1$$reg));
17334     __ umov($itmp$$Register, as_FloatRegister($vtmp2$$reg), __ B, 0);
17335     __ mulw($dst$$Register, $itmp$$Register, $src1$$Register);
17336     __ sxtb($dst$$Register, $dst$$Register);
17337     __ umov($itmp$$Register, as_FloatRegister($vtmp2$$reg), __ B, 1);
17338     __ mulw($dst$$Register, $itmp$$Register, $dst$$Register);
17339     __ sxtb($dst$$Register, $dst$$Register);
17340   %}
17341   ins_pipe(pipe_class_default);
17342 %}
17343 
17344 instruct reduce_mul16B(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX vtmp1, vecX vtmp2, iRegINoSp itmp)
17345 %{
17346   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
17347   match(Set dst (MulReductionVI src1 src2));
17348   ins_cost(INSN_COST);
17349   effect(TEMP_DEF dst, TEMP vtmp1, TEMP vtmp2, TEMP itmp);
17350   format %{ "ins   $vtmp1, D, $src2, 0, 1\n\t"
17351             "mulv  $vtmp1, T8B, $vtmp1, $src2\n\t"
17352             "ins   $vtmp2, S, $vtmp1, 0, 1\n\t"
17353             "mulv  $vtmp1, T8B, $vtmp2, $vtmp1\n\t"
17354             "ins   $vtmp2, H, $vtmp1, 0, 1\n\t"
17355             "mulv  $vtmp2, T8B, $vtmp2, $vtmp1\n\t"
17356             "umov  $itmp, $vtmp2, B, 0\n\t"
17357             "mulw  $dst, $itmp, $src1\n\t"
17358             "sxtb  $dst, $dst\n\t"
17359             "umov  $itmp, $vtmp2, B, 1\n\t"
17360             "mulw  $dst, $itmp, $dst\n\t"
17361             "sxtb  $dst, $dst\t mul reduction16B"
17362   %}
17363   ins_encode %{
17364     __ ins(as_FloatRegister($vtmp1$$reg), __ D,
17365            as_FloatRegister($src2$$reg), 0, 1);
17366     __ mulv(as_FloatRegister($vtmp1$$reg), __ T8B,
17367             as_FloatRegister($vtmp1$$reg), as_FloatRegister($src2$$reg));
17368     __ ins(as_FloatRegister($vtmp2$$reg), __ S,
17369            as_FloatRegister($vtmp1$$reg), 0, 1);
17370     __ mulv(as_FloatRegister($vtmp1$$reg), __ T8B,
17371             as_FloatRegister($vtmp2$$reg), as_FloatRegister($vtmp1$$reg));
17372     __ ins(as_FloatRegister($vtmp2$$reg), __ H,
17373            as_FloatRegister($vtmp1$$reg), 0, 1);
17374     __ mulv(as_FloatRegister($vtmp2$$reg), __ T8B,
17375             as_FloatRegister($vtmp2$$reg), as_FloatRegister($vtmp1$$reg));
17376     __ umov($itmp$$Register, as_FloatRegister($vtmp2$$reg), __ B, 0);
17377     __ mulw($dst$$Register, $itmp$$Register, $src1$$Register);
17378     __ sxtb($dst$$Register, $dst$$Register);
17379     __ umov($itmp$$Register, as_FloatRegister($vtmp2$$reg), __ B, 1);
17380     __ mulw($dst$$Register, $itmp$$Register, $dst$$Register);
17381     __ sxtb($dst$$Register, $dst$$Register);
17382   %}
17383   ins_pipe(pipe_class_default);
17384 %}
17385 
17386 instruct reduce_mul4S(iRegINoSp dst, iRegIorL2I src1, vecD src2, vecD vtmp, iRegINoSp itmp)
17387 %{
17388   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
17389   match(Set dst (MulReductionVI src1 src2));
17390   ins_cost(INSN_COST);
17391   effect(TEMP_DEF dst, TEMP vtmp, TEMP itmp);
17392   format %{ "ins   $vtmp, S, $src2, 0, 1\n\t"
17393             "mulv  $vtmp, T4H, $vtmp, $src2\n\t"
17394             "umov  $itmp, $vtmp, H, 0\n\t"
17395             "mulw  $dst, $itmp, $src1\n\t"
17396             "sxth  $dst, $dst\n\t"
17397             "umov  $itmp, $vtmp, H, 1\n\t"
17398             "mulw  $dst, $itmp, $dst\n\t"
17399             "sxth  $dst, $dst\t mul reduction4S"
17400   %}
17401   ins_encode %{
17402     __ ins(as_FloatRegister($vtmp$$reg), __ S,
17403            as_FloatRegister($src2$$reg), 0, 1);
17404     __ mulv(as_FloatRegister($vtmp$$reg), __ T4H,
17405             as_FloatRegister($vtmp$$reg), as_FloatRegister($src2$$reg));
17406     __ umov($itmp$$Register, as_FloatRegister($vtmp$$reg), __ H, 0);
17407     __ mulw($dst$$Register, $itmp$$Register, $src1$$Register);
17408     __ sxth($dst$$Register, $dst$$Register);
17409     __ umov($itmp$$Register, as_FloatRegister($vtmp$$reg), __ H, 1);
17410     __ mulw($dst$$Register, $itmp$$Register, $dst$$Register);
17411     __ sxth($dst$$Register, $dst$$Register);
17412   %}
17413   ins_pipe(pipe_class_default);
17414 %}
17415 
17416 instruct reduce_mul8S(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX vtmp1, vecX vtmp2, iRegINoSp itmp)
17417 %{
17418   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
17419   match(Set dst (MulReductionVI src1 src2));
17420   ins_cost(INSN_COST);
17421   effect(TEMP_DEF dst, TEMP vtmp1, TEMP vtmp2, TEMP itmp);
17422   format %{ "ins   $vtmp1, D, $src2, 0, 1\n\t"
17423             "mulv  $vtmp1, T4H, $vtmp1, $src2\n\t"
17424             "ins   $vtmp2, S, $vtmp1, 0, 1\n\t"
17425             "mulv  $vtmp2, T4H, $vtmp2, $vtmp1\n\t"
17426             "umov  $itmp, $vtmp2, H, 0\n\t"
17427             "mulw  $dst, $itmp, $src1\n\t"
17428             "sxth  $dst, $dst\n\t"
17429             "umov  $itmp, $vtmp2, H, 1\n\t"
17430             "mulw  $dst, $itmp, $dst\n\t"
17431             "sxth  $dst, $dst\t mul reduction8S"
17432   %}
17433   ins_encode %{
17434     __ ins(as_FloatRegister($vtmp1$$reg), __ D,
17435            as_FloatRegister($src2$$reg), 0, 1);
17436     __ mulv(as_FloatRegister($vtmp1$$reg), __ T4H,
17437             as_FloatRegister($vtmp1$$reg), as_FloatRegister($src2$$reg));
17438     __ ins(as_FloatRegister($vtmp2$$reg), __ S,
17439            as_FloatRegister($vtmp1$$reg), 0, 1);
17440     __ mulv(as_FloatRegister($vtmp2$$reg), __ T4H,
17441             as_FloatRegister($vtmp2$$reg), as_FloatRegister($vtmp1$$reg));
17442     __ umov($itmp$$Register, as_FloatRegister($vtmp2$$reg), __ H, 0);
17443     __ mulw($dst$$Register, $itmp$$Register, $src1$$Register);
17444     __ sxth($dst$$Register, $dst$$Register);
17445     __ umov($itmp$$Register, as_FloatRegister($vtmp2$$reg), __ H, 1);
17446     __ mulw($dst$$Register, $itmp$$Register, $dst$$Register);
17447     __ sxth($dst$$Register, $dst$$Register);
17448   %}
17449   ins_pipe(pipe_class_default);
17450 %}
17451 
17452 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I isrc, vecD vsrc, iRegINoSp tmp)
17453 %{
17454   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
17455   match(Set dst (MulReductionVI isrc vsrc));
17456   ins_cost(INSN_COST);
17457   effect(TEMP tmp, TEMP dst);
17458   format %{ "umov  $tmp, $vsrc, S, 0\n\t"
17459             "mul   $dst, $tmp, $isrc\n\t"
17460             "umov  $tmp, $vsrc, S, 1\n\t"
17461             "mul   $dst, $tmp, $dst\t# mul reduction2I\n\t"
17462   %}
17463   ins_encode %{
17464     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ S, 0);
17465     __ mul($dst$$Register, $tmp$$Register, $isrc$$Register);
17466     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ S, 1);
17467     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
17468   %}
17469   ins_pipe(pipe_class_default);
17470 %}
17471 
17472 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I isrc, vecX vsrc, vecX vtmp, iRegINoSp itmp)
17473 %{
17474   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
17475   match(Set dst (MulReductionVI isrc vsrc));
17476   ins_cost(INSN_COST);
17477   effect(TEMP vtmp, TEMP itmp, TEMP dst);
17478   format %{ "ins   $vtmp, D, $vsrc, 0, 1\n\t"
17479             "mulv  $vtmp, T2S, $vtmp, $vsrc\n\t"
17480             "umov  $itmp, $vtmp, S, 0\n\t"
17481             "mul   $dst, $itmp, $isrc\n\t"
17482             "umov  $itmp, $vtmp, S, 1\n\t"
17483             "mul   $dst, $itmp, $dst\t# mul reduction4I\n\t"
17484   %}
17485   ins_encode %{
17486     __ ins(as_FloatRegister($vtmp$$reg), __ D,
17487            as_FloatRegister($vsrc$$reg), 0, 1);
17488     __ mulv(as_FloatRegister($vtmp$$reg), __ T2S,
17489             as_FloatRegister($vtmp$$reg), as_FloatRegister($vsrc$$reg));
17490     __ umov($itmp$$Register, as_FloatRegister($vtmp$$reg), __ S, 0);
17491     __ mul($dst$$Register, $itmp$$Register, $isrc$$Register);
17492     __ umov($itmp$$Register, as_FloatRegister($vtmp$$reg), __ S, 1);
17493     __ mul($dst$$Register, $itmp$$Register, $dst$$Register);
17494   %}
17495   ins_pipe(pipe_class_default);
17496 %}
17497 
17498 instruct reduce_add2F(vRegF dst, vRegF fsrc, vecD vsrc, vecD tmp)
17499 %{
17500   match(Set dst (AddReductionVF fsrc vsrc));
17501   ins_cost(INSN_COST);
17502   effect(TEMP tmp, TEMP dst);
17503   format %{ "fadds $dst, $fsrc, $vsrc\n\t"
17504             "ins   $tmp, S, $vsrc, 0, 1\n\t"
17505             "fadds $dst, $dst, $tmp\t# add reduction2F"
17506   %}
17507   ins_encode %{
17508     __ fadds(as_FloatRegister($dst$$reg),
17509              as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
17510     __ ins(as_FloatRegister($tmp$$reg), __ S,
17511            as_FloatRegister($vsrc$$reg), 0, 1);
17512     __ fadds(as_FloatRegister($dst$$reg),
17513              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17514   %}
17515   ins_pipe(pipe_class_default);
17516 %}
17517 
17518 instruct reduce_add4F(vRegF dst, vRegF fsrc, vecX vsrc, vecX tmp)
17519 %{
17520   match(Set dst (AddReductionVF fsrc vsrc));
17521   ins_cost(INSN_COST);
17522   effect(TEMP tmp, TEMP dst);
17523   format %{ "fadds $dst, $fsrc, $vsrc\n\t"
17524             "ins   $tmp, S, $vsrc, 0, 1\n\t"
17525             "fadds $dst, $dst, $tmp\n\t"
17526             "ins   $tmp, S, $vsrc, 0, 2\n\t"
17527             "fadds $dst, $dst, $tmp\n\t"
17528             "ins   $tmp, S, $vsrc, 0, 3\n\t"
17529             "fadds $dst, $dst, $tmp\t# add reduction4F"
17530   %}
17531   ins_encode %{
17532     __ fadds(as_FloatRegister($dst$$reg),
17533              as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
17534     __ ins(as_FloatRegister($tmp$$reg), __ S,
17535            as_FloatRegister($vsrc$$reg), 0, 1);
17536     __ fadds(as_FloatRegister($dst$$reg),
17537              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17538     __ ins(as_FloatRegister($tmp$$reg), __ S,
17539            as_FloatRegister($vsrc$$reg), 0, 2);
17540     __ fadds(as_FloatRegister($dst$$reg),
17541              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17542     __ ins(as_FloatRegister($tmp$$reg), __ S,
17543            as_FloatRegister($vsrc$$reg), 0, 3);
17544     __ fadds(as_FloatRegister($dst$$reg),
17545              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17546   %}
17547   ins_pipe(pipe_class_default);
17548 %}
17549 
17550 instruct reduce_mul2F(vRegF dst, vRegF fsrc, vecD vsrc, vecD tmp)
17551 %{
17552   match(Set dst (MulReductionVF fsrc vsrc));
17553   ins_cost(INSN_COST);
17554   effect(TEMP tmp, TEMP dst);
17555   format %{ "fmuls $dst, $fsrc, $vsrc\n\t"
17556             "ins   $tmp, S, $vsrc, 0, 1\n\t"
17557             "fmuls $dst, $dst, $tmp\t# mul reduction2F"
17558   %}
17559   ins_encode %{
17560     __ fmuls(as_FloatRegister($dst$$reg),
17561              as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
17562     __ ins(as_FloatRegister($tmp$$reg), __ S,
17563            as_FloatRegister($vsrc$$reg), 0, 1);
17564     __ fmuls(as_FloatRegister($dst$$reg),
17565              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17566   %}
17567   ins_pipe(pipe_class_default);
17568 %}
17569 
17570 instruct reduce_mul4F(vRegF dst, vRegF fsrc, vecX vsrc, vecX tmp)
17571 %{
17572   match(Set dst (MulReductionVF fsrc vsrc));
17573   ins_cost(INSN_COST);
17574   effect(TEMP tmp, TEMP dst);
17575   format %{ "fmuls $dst, $fsrc, $vsrc\n\t"
17576             "ins   $tmp, S, $vsrc, 0, 1\n\t"
17577             "fmuls $dst, $dst, $tmp\n\t"
17578             "ins   $tmp, S, $vsrc, 0, 2\n\t"
17579             "fmuls $dst, $dst, $tmp\n\t"
17580             "ins   $tmp, S, $vsrc, 0, 3\n\t"
17581             "fmuls $dst, $dst, $tmp\t# mul reduction4F"
17582   %}
17583   ins_encode %{
17584     __ fmuls(as_FloatRegister($dst$$reg),
17585              as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
17586     __ ins(as_FloatRegister($tmp$$reg), __ S,
17587            as_FloatRegister($vsrc$$reg), 0, 1);
17588     __ fmuls(as_FloatRegister($dst$$reg),
17589              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17590     __ ins(as_FloatRegister($tmp$$reg), __ S,
17591            as_FloatRegister($vsrc$$reg), 0, 2);
17592     __ fmuls(as_FloatRegister($dst$$reg),
17593              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17594     __ ins(as_FloatRegister($tmp$$reg), __ S,
17595            as_FloatRegister($vsrc$$reg), 0, 3);
17596     __ fmuls(as_FloatRegister($dst$$reg),
17597              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17598   %}
17599   ins_pipe(pipe_class_default);
17600 %}
17601 
17602 instruct reduce_add2L(iRegLNoSp dst, iRegL src1, vecX src2, vecX tmp)
17603 %{
17604   match(Set dst (AddReductionVL src1 src2));
17605   ins_cost(INSN_COST);
17606   effect(TEMP_DEF dst, TEMP tmp);
17607   format %{ "addpd $tmp, $src2\n\t"
17608             "umov  $dst, $tmp, D, 0\n\t"
17609             "add   $dst, $src1, $dst\t add reduction2L"
17610   %}
17611   ins_encode %{
17612     __ addpd(as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
17613     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ D, 0);
17614     __ add($dst$$Register, $src1$$Register, $dst$$Register);
17615   %}
17616   ins_pipe(pipe_class_default);
17617 %}
17618 
17619 instruct reduce_mul2L(iRegLNoSp dst, iRegL src1, vecX src2, iRegLNoSp tmp)
17620 %{
17621   match(Set dst (MulReductionVL src1 src2));
17622   ins_cost(INSN_COST);
17623   effect(TEMP_DEF dst, TEMP tmp);
17624   format %{ "umov  $tmp, $src2, D, 0\n\t"
17625             "mul   $dst, $src1, $tmp\n\t"
17626             "umov  $tmp, $src2, D, 1\n\t"
17627             "mul   $dst, $dst, $tmp\t mul reduction2L"
17628   %}
17629   ins_encode %{
17630     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
17631     __ mul($dst$$Register, $src1$$Register, $tmp$$Register);
17632     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 1);
17633     __ mul($dst$$Register, $dst$$Register, $tmp$$Register);
17634   %}
17635   ins_pipe(pipe_class_default);
17636 %}
17637 
17638 instruct reduce_add2D(vRegD dst, vRegD dsrc, vecX vsrc, vecX tmp)
17639 %{
17640   match(Set dst (AddReductionVD dsrc vsrc));
17641   ins_cost(INSN_COST);
17642   effect(TEMP tmp, TEMP dst);
17643   format %{ "faddd $dst, $dsrc, $vsrc\n\t"
17644             "ins   $tmp, D, $vsrc, 0, 1\n\t"
17645             "faddd $dst, $dst, $tmp\t# add reduction2D"
17646   %}
17647   ins_encode %{
17648     __ faddd(as_FloatRegister($dst$$reg),
17649              as_FloatRegister($dsrc$$reg), as_FloatRegister($vsrc$$reg));
17650     __ ins(as_FloatRegister($tmp$$reg), __ D,
17651            as_FloatRegister($vsrc$$reg), 0, 1);
17652     __ faddd(as_FloatRegister($dst$$reg),
17653              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17654   %}
17655   ins_pipe(pipe_class_default);
17656 %}
17657 
17658 instruct reduce_mul2D(vRegD dst, vRegD dsrc, vecX vsrc, vecX tmp)
17659 %{
17660   match(Set dst (MulReductionVD dsrc vsrc));
17661   ins_cost(INSN_COST);
17662   effect(TEMP tmp, TEMP dst);
17663   format %{ "fmuld $dst, $dsrc, $vsrc\n\t"
17664             "ins   $tmp, D, $vsrc, 0, 1\n\t"
17665             "fmuld $dst, $dst, $tmp\t# mul reduction2D"
17666   %}
17667   ins_encode %{
17668     __ fmuld(as_FloatRegister($dst$$reg),
17669              as_FloatRegister($dsrc$$reg), as_FloatRegister($vsrc$$reg));
17670     __ ins(as_FloatRegister($tmp$$reg), __ D,
17671            as_FloatRegister($vsrc$$reg), 0, 1);
17672     __ fmuld(as_FloatRegister($dst$$reg),
17673              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17674   %}
17675   ins_pipe(pipe_class_default);
17676 %}
17677 
17678 instruct reduce_max8B(iRegINoSp dst, iRegIorL2I src1, vecD src2, vecD tmp, rFlagsReg cr) %{
17679   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
17680   match(Set dst (MaxReductionV src1 src2));
17681   ins_cost(INSN_COST);
17682   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
17683   format %{ "smaxv $tmp, T8B, $src2\n\t"
17684             "smov  $dst, $tmp, B, 0\n\t"
17685             "cmpw  $dst, $src1\n\t"
17686             "cselw $dst, $dst, $src1 gt\t max reduction8B" %}
17687   ins_encode %{
17688     __ smaxv(as_FloatRegister($tmp$$reg), __ T8B, as_FloatRegister($src2$$reg));
17689     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
17690     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
17691     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::GT);
17692   %}
17693   ins_pipe(pipe_class_default);
17694 %}
17695 
17696 instruct reduce_max16B(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, rFlagsReg cr) %{
17697   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
17698   match(Set dst (MaxReductionV src1 src2));
17699   ins_cost(INSN_COST);
17700   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
17701   format %{ "smaxv $tmp, T16B, $src2\n\t"
17702             "smov  $dst, $tmp, B, 0\n\t"
17703             "cmpw  $dst, $src1\n\t"
17704             "cselw $dst, $dst, $src1 gt\t max reduction16B" %}
17705   ins_encode %{
17706     __ smaxv(as_FloatRegister($tmp$$reg), __ T16B, as_FloatRegister($src2$$reg));
17707     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
17708     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
17709     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::GT);
17710   %}
17711   ins_pipe(pipe_class_default);
17712 %}
17713 
17714 instruct reduce_max4S(iRegINoSp dst, iRegIorL2I src1, vecD src2, vecD tmp, rFlagsReg cr) %{
17715   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
17716   match(Set dst (MaxReductionV src1 src2));
17717   ins_cost(INSN_COST);
17718   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
17719   format %{ "smaxv $tmp, T4H, $src2\n\t"
17720             "smov  $dst, $tmp, H, 0\n\t"
17721             "cmpw  $dst, $src1\n\t"
17722             "cselw $dst, $dst, $src1 gt\t max reduction4S" %}
17723   ins_encode %{
17724     __ smaxv(as_FloatRegister($tmp$$reg), __ T4H, as_FloatRegister($src2$$reg));
17725     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ H, 0);
17726     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
17727     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::GT);
17728   %}
17729   ins_pipe(pipe_class_default);
17730 %}
17731 
17732 instruct reduce_max8S(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, rFlagsReg cr) %{
17733   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
17734   match(Set dst (MaxReductionV src1 src2));
17735   ins_cost(INSN_COST);
17736   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
17737   format %{ "smaxv  $tmp, T8H, $src2\n\t"
17738             "smov  $dst, $tmp, H, 0\n\t"
17739             "cmpw  $dst, $src1\n\t"
17740             "cselw $dst, $dst, $src1 gt\t max reduction8S" %}
17741   ins_encode %{
17742     __ smaxv(as_FloatRegister($tmp$$reg), __ T8H, as_FloatRegister($src2$$reg));
17743     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ H, 0);
17744     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
17745     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::GT);
17746   %}
17747   ins_pipe(pipe_class_default);
17748 %}
17749 
17750 instruct reduce_max2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, vecX tmp, rFlagsReg cr) %{
17751   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
17752   match(Set dst (MaxReductionV src1 src2));
17753   ins_cost(INSN_COST);
17754   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
17755   format %{ "dup   $tmp, T2D, $src2\n\t"
17756             "smaxv $tmp, T4S, $tmp\n\t"
17757             "umov  $dst, $tmp, S, 0\n\t"
17758             "cmpw  $dst, $src1\n\t"
17759             "cselw $dst, $dst, $src1 gt\t max reduction2I" %}
17760   ins_encode %{
17761     __ dup(as_FloatRegister($tmp$$reg), __ T2D, as_FloatRegister($src2$$reg));
17762     __ smaxv(as_FloatRegister($tmp$$reg), __ T4S, as_FloatRegister($tmp$$reg));
17763     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
17764     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
17765     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::GT);
17766   %}
17767   ins_pipe(pipe_class_default);
17768 %}
17769 
17770 instruct reduce_max4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, rFlagsReg cr) %{
17771   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
17772   match(Set dst (MaxReductionV src1 src2));
17773   ins_cost(INSN_COST);
17774   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
17775   format %{ "smaxv $tmp, T4S, $src2\n\t"
17776             "umov  $dst, $tmp, S, 0\n\t"
17777             "cmpw  $dst, $src1\n\t"
17778             "cselw $dst, $dst, $src1 gt\t max reduction4I" %}
17779   ins_encode %{
17780     __ smaxv(as_FloatRegister($tmp$$reg), __ T4S, as_FloatRegister($src2$$reg));
17781     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
17782     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
17783     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::GT);
17784   %}
17785   ins_pipe(pipe_class_default);
17786 %}
17787 
17788 instruct reduce_max2L(iRegLNoSp dst, iRegL src1, vecX src2, iRegLNoSp tmp, rFlagsReg cr) %{
17789   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
17790   match(Set dst (MaxReductionV src1 src2));
17791   ins_cost(INSN_COST);
17792   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
17793   format %{ "umov  $tmp, $src2, D, 0\n\t"
17794             "cmp   $src1,$tmp\n\t"
17795             "csel  $dst, $src1, $tmp gt\n\t"
17796             "umov  $tmp, $src2, D, 1\n\t"
17797             "cmp   $dst, $tmp\n\t"
17798             "csel  $dst, $dst, $tmp gt\t max reduction2L" %}
17799   ins_encode %{
17800     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
17801     __ cmp(as_Register($src1$$reg), as_Register($tmp$$reg));
17802     __ csel(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($tmp$$reg), Assembler::GT);
17803     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 1);
17804     __ cmp(as_Register($dst$$reg), as_Register($tmp$$reg));
17805     __ csel(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($tmp$$reg), Assembler::GT);
17806   %}
17807   ins_pipe(pipe_class_default);
17808 %}
17809 
17810 instruct reduce_max2F(vRegF dst, vRegF fsrc, vecD vsrc, vecD tmp) %{
17811   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
17812   match(Set dst (MaxReductionV fsrc vsrc));
17813   ins_cost(INSN_COST);
17814   effect(TEMP_DEF dst, TEMP tmp);
17815   format %{ "fmaxs $dst, $fsrc, $vsrc\n\t"
17816             "ins   $tmp, S, $vsrc, 0, 1\n\t"
17817             "fmaxs $dst, $dst, $tmp\t# max reduction2F" %}
17818   ins_encode %{
17819     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
17820     __ ins(as_FloatRegister($tmp$$reg), __ S, as_FloatRegister($vsrc$$reg), 0, 1);
17821     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17822   %}
17823   ins_pipe(pipe_class_default);
17824 %}
17825 
17826 instruct reduce_max4F(vRegF dst, vRegF fsrc, vecX vsrc) %{
17827   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
17828   match(Set dst (MaxReductionV fsrc vsrc));
17829   ins_cost(INSN_COST);
17830   effect(TEMP_DEF dst);
17831   format %{ "fmaxv $dst, T4S, $vsrc\n\t"
17832             "fmaxs $dst, $dst, $fsrc\t# max reduction4F" %}
17833   ins_encode %{
17834     __ fmaxv(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($vsrc$$reg));
17835     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($fsrc$$reg));
17836   %}
17837   ins_pipe(pipe_class_default);
17838 %}
17839 
17840 instruct reduce_max2D(vRegD dst, vRegD dsrc, vecX vsrc, vecX tmp) %{
17841   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
17842   match(Set dst (MaxReductionV dsrc vsrc));
17843   ins_cost(INSN_COST);
17844   effect(TEMP_DEF dst, TEMP tmp);
17845   format %{ "fmaxd $dst, $dsrc, $vsrc\n\t"
17846             "ins   $tmp, D, $vsrc, 0, 1\n\t"
17847             "fmaxd $dst, $dst, $tmp\t# max reduction2D" %}
17848   ins_encode %{
17849     __ fmaxd(as_FloatRegister($dst$$reg), as_FloatRegister($dsrc$$reg), as_FloatRegister($vsrc$$reg));
17850     __ ins(as_FloatRegister($tmp$$reg), __ D, as_FloatRegister($vsrc$$reg), 0, 1);
17851     __ fmaxd(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17852   %}
17853   ins_pipe(pipe_class_default);
17854 %}
17855 
17856 instruct reduce_min8B(iRegINoSp dst, iRegIorL2I src1, vecD src2, vecD tmp, rFlagsReg cr) %{
17857   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
17858   match(Set dst (MinReductionV src1 src2));
17859   ins_cost(INSN_COST);
17860   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
17861   format %{ "sminv $tmp, T8B, $src2\n\t"
17862             "smov  $dst, $tmp, B, 0\n\t"
17863             "cmpw  $dst, $src1\n\t"
17864             "cselw $dst, $dst, $src1 lt\t min reduction8B" %}
17865   ins_encode %{
17866     __ sminv(as_FloatRegister($tmp$$reg), __ T8B, as_FloatRegister($src2$$reg));
17867     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
17868     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
17869     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::LT);
17870   %}
17871   ins_pipe(pipe_class_default);
17872 %}
17873 
17874 instruct reduce_min16B(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, rFlagsReg cr) %{
17875   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
17876   match(Set dst (MinReductionV src1 src2));
17877   ins_cost(INSN_COST);
17878   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
17879   format %{ "sminv $tmp, T16B, $src2\n\t"
17880             "smov  $dst, $tmp, B, 0\n\t"
17881             "cmpw  $dst, $src1\n\t"
17882             "cselw $dst, $dst, $src1 lt\t min reduction16B" %}
17883   ins_encode %{
17884     __ sminv(as_FloatRegister($tmp$$reg), __ T16B, as_FloatRegister($src2$$reg));
17885     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
17886     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
17887     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::LT);
17888   %}
17889   ins_pipe(pipe_class_default);
17890 %}
17891 
17892 instruct reduce_min4S(iRegINoSp dst, iRegIorL2I src1, vecD src2, vecD tmp, rFlagsReg cr) %{
17893   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
17894   match(Set dst (MinReductionV src1 src2));
17895   ins_cost(INSN_COST);
17896   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
17897   format %{ "sminv $tmp, T4H, $src2\n\t"
17898             "smov  $dst, $tmp, H, 0\n\t"
17899             "cmpw  $dst, $src1\n\t"
17900             "cselw $dst, $dst, $src1 lt\t min reduction4S" %}
17901   ins_encode %{
17902     __ sminv(as_FloatRegister($tmp$$reg), __ T4H, as_FloatRegister($src2$$reg));
17903     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ H, 0);
17904     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
17905     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::LT);
17906   %}
17907   ins_pipe(pipe_class_default);
17908 %}
17909 
17910 instruct reduce_min8S(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, rFlagsReg cr) %{
17911   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
17912   match(Set dst (MinReductionV src1 src2));
17913   ins_cost(INSN_COST);
17914   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
17915   format %{ "sminv $tmp, T8H, $src2\n\t"
17916             "smov  $dst, $tmp, H, 0\n\t"
17917             "cmpw  $dst, $src1\n\t"
17918             "cselw $dst, $dst, $src1 lt\t min reduction8S" %}
17919   ins_encode %{
17920     __ sminv(as_FloatRegister($tmp$$reg), __ T8H, as_FloatRegister($src2$$reg));
17921     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ H, 0);
17922     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
17923     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::LT);
17924   %}
17925   ins_pipe(pipe_class_default);
17926 %}
17927 
17928 instruct reduce_min2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, vecX tmp, rFlagsReg cr) %{
17929   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
17930   match(Set dst (MinReductionV src1 src2));
17931   ins_cost(INSN_COST);
17932   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
17933   format %{ "dup   $tmp, T2D, $src2\n\t"
17934             "sminv $tmp, T2S, $tmp\n\t"
17935             "umov  $dst, $tmp, S, 0\n\t"
17936             "cmpw  $dst, $src1\n\t"
17937             "cselw $dst, $dst, $src1 lt\t min reduction2I" %}
17938   ins_encode %{
17939     __ dup(as_FloatRegister($tmp$$reg), __ T2D, as_FloatRegister($src2$$reg));
17940     __ sminv(as_FloatRegister($tmp$$reg), __ T4S, as_FloatRegister($tmp$$reg));
17941     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
17942     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
17943     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::LT);
17944   %}
17945   ins_pipe(pipe_class_default);
17946 %}
17947 
17948 instruct reduce_min4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, rFlagsReg cr) %{
17949   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
17950   match(Set dst (MinReductionV src1 src2));
17951   ins_cost(INSN_COST);
17952   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
17953   format %{ "sminv $tmp, T4S, $src2\n\t"
17954             "umov  $dst, $tmp, S, 0\n\t"
17955             "cmpw  $dst, $src1\n\t"
17956             "cselw $dst, $dst, $src1 lt\t min reduction4I" %}
17957   ins_encode %{
17958     __ sminv(as_FloatRegister($tmp$$reg), __ T4S, as_FloatRegister($src2$$reg));
17959     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
17960     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
17961     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::LT);
17962   %}
17963   ins_pipe(pipe_class_default);
17964 %}
17965 
17966 instruct reduce_min2L(iRegLNoSp dst, iRegL src1, vecX src2, iRegLNoSp tmp, rFlagsReg cr) %{
17967   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
17968   match(Set dst (MinReductionV src1 src2));
17969   ins_cost(INSN_COST);
17970   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
17971   format %{ "umov  $tmp, $src2, D, 0\n\t"
17972             "cmp   $src1,$tmp\n\t"
17973             "csel  $dst, $src1, $tmp lt\n\t"
17974             "umov  $tmp, $src2, D, 1\n\t"
17975             "cmp   $dst, $tmp\n\t"
17976             "csel  $dst, $dst, $tmp lt\t min reduction2L" %}
17977   ins_encode %{
17978     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
17979     __ cmp(as_Register($src1$$reg), as_Register($tmp$$reg));
17980     __ csel(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($tmp$$reg), Assembler::LT);
17981     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 1);
17982     __ cmp(as_Register($dst$$reg), as_Register($tmp$$reg));
17983     __ csel(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($tmp$$reg), Assembler::LT);
17984   %}
17985   ins_pipe(pipe_class_default);
17986 %}
17987 
17988 instruct reduce_min2F(vRegF dst, vRegF fsrc, vecD vsrc, vecD tmp) %{
17989   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
17990   match(Set dst (MinReductionV fsrc vsrc));
17991   ins_cost(INSN_COST);
17992   effect(TEMP_DEF dst, TEMP tmp);
17993   format %{ "fmins $dst, $fsrc, $vsrc\n\t"
17994             "ins   $tmp, S, $vsrc, 0, 1\n\t"
17995             "fmins $dst, $dst, $tmp\t# min reduction2F" %}
17996   ins_encode %{
17997     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
17998     __ ins(as_FloatRegister($tmp$$reg), __ S, as_FloatRegister($vsrc$$reg), 0, 1);
17999     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
18000   %}
18001   ins_pipe(pipe_class_default);
18002 %}
18003 
18004 instruct reduce_min4F(vRegF dst, vRegF fsrc, vecX vsrc) %{
18005   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18006   match(Set dst (MinReductionV fsrc vsrc));
18007   ins_cost(INSN_COST);
18008   effect(TEMP_DEF dst);
18009   format %{ "fminv $dst, T4S, $vsrc\n\t"
18010             "fmins $dst, $dst, $fsrc\t# min reduction4F" %}
18011   ins_encode %{
18012     __ fminv(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($vsrc$$reg));
18013     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($fsrc$$reg));
18014   %}
18015   ins_pipe(pipe_class_default);
18016 %}
18017 
18018 instruct reduce_min2D(vRegD dst, vRegD dsrc, vecX vsrc, vecX tmp) %{
18019   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
18020   match(Set dst (MinReductionV dsrc vsrc));
18021   ins_cost(INSN_COST);
18022   effect(TEMP_DEF dst, TEMP tmp);
18023   format %{ "fmind $dst, $dsrc, $vsrc\n\t"
18024             "ins   $tmp, D, $vsrc, 0, 1\n\t"
18025             "fmind $dst, $dst, $tmp\t# min reduction2D" %}
18026   ins_encode %{
18027     __ fmind(as_FloatRegister($dst$$reg), as_FloatRegister($dsrc$$reg), as_FloatRegister($vsrc$$reg));
18028     __ ins(as_FloatRegister($tmp$$reg), __ D, as_FloatRegister($vsrc$$reg), 0, 1);
18029     __ fmind(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
18030   %}
18031   ins_pipe(pipe_class_default);
18032 %}
18033 
18034 // ====================VECTOR ARITHMETIC=======================================
18035 
18036 // --------------------------------- ADD --------------------------------------
18037 
18038 instruct vadd8B(vecD dst, vecD src1, vecD src2)
18039 %{
18040   predicate(n->as_Vector()->length() == 4 ||
18041             n->as_Vector()->length() == 8);
18042   match(Set dst (AddVB src1 src2));
18043   ins_cost(INSN_COST);
18044   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
18045   ins_encode %{
18046     __ addv(as_FloatRegister($dst$$reg), __ T8B,
18047             as_FloatRegister($src1$$reg),
18048             as_FloatRegister($src2$$reg));
18049   %}
18050   ins_pipe(vdop64);
18051 %}
18052 
18053 instruct vadd16B(vecX dst, vecX src1, vecX src2)
18054 %{
18055   predicate(n->as_Vector()->length() == 16);
18056   match(Set dst (AddVB src1 src2));
18057   ins_cost(INSN_COST);
18058   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
18059   ins_encode %{
18060     __ addv(as_FloatRegister($dst$$reg), __ T16B,
18061             as_FloatRegister($src1$$reg),
18062             as_FloatRegister($src2$$reg));
18063   %}
18064   ins_pipe(vdop128);
18065 %}
18066 
18067 instruct vadd4S(vecD dst, vecD src1, vecD src2)
18068 %{
18069   predicate(n->as_Vector()->length() == 2 ||
18070             n->as_Vector()->length() == 4);
18071   match(Set dst (AddVS src1 src2));
18072   ins_cost(INSN_COST);
18073   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
18074   ins_encode %{
18075     __ addv(as_FloatRegister($dst$$reg), __ T4H,
18076             as_FloatRegister($src1$$reg),
18077             as_FloatRegister($src2$$reg));
18078   %}
18079   ins_pipe(vdop64);
18080 %}
18081 
18082 instruct vadd8S(vecX dst, vecX src1, vecX src2)
18083 %{
18084   predicate(n->as_Vector()->length() == 8);
18085   match(Set dst (AddVS src1 src2));
18086   ins_cost(INSN_COST);
18087   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
18088   ins_encode %{
18089     __ addv(as_FloatRegister($dst$$reg), __ T8H,
18090             as_FloatRegister($src1$$reg),
18091             as_FloatRegister($src2$$reg));
18092   %}
18093   ins_pipe(vdop128);
18094 %}
18095 
18096 instruct vadd2I(vecD dst, vecD src1, vecD src2)
18097 %{
18098   predicate(n->as_Vector()->length() == 2);
18099   match(Set dst (AddVI src1 src2));
18100   ins_cost(INSN_COST);
18101   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
18102   ins_encode %{
18103     __ addv(as_FloatRegister($dst$$reg), __ T2S,
18104             as_FloatRegister($src1$$reg),
18105             as_FloatRegister($src2$$reg));
18106   %}
18107   ins_pipe(vdop64);
18108 %}
18109 
18110 instruct vadd4I(vecX dst, vecX src1, vecX src2)
18111 %{
18112   predicate(n->as_Vector()->length() == 4);
18113   match(Set dst (AddVI src1 src2));
18114   ins_cost(INSN_COST);
18115   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
18116   ins_encode %{
18117     __ addv(as_FloatRegister($dst$$reg), __ T4S,
18118             as_FloatRegister($src1$$reg),
18119             as_FloatRegister($src2$$reg));
18120   %}
18121   ins_pipe(vdop128);
18122 %}
18123 
18124 instruct vadd2L(vecX dst, vecX src1, vecX src2)
18125 %{
18126   predicate(n->as_Vector()->length() == 2);
18127   match(Set dst (AddVL src1 src2));
18128   ins_cost(INSN_COST);
18129   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
18130   ins_encode %{
18131     __ addv(as_FloatRegister($dst$$reg), __ T2D,
18132             as_FloatRegister($src1$$reg),
18133             as_FloatRegister($src2$$reg));
18134   %}
18135   ins_pipe(vdop128);
18136 %}
18137 
18138 instruct vadd2F(vecD dst, vecD src1, vecD src2)
18139 %{
18140   predicate(n->as_Vector()->length() == 2);
18141   match(Set dst (AddVF src1 src2));
18142   ins_cost(INSN_COST);
18143   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
18144   ins_encode %{
18145     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
18146             as_FloatRegister($src1$$reg),
18147             as_FloatRegister($src2$$reg));
18148   %}
18149   ins_pipe(vdop_fp64);
18150 %}
18151 
18152 instruct vadd4F(vecX dst, vecX src1, vecX src2)
18153 %{
18154   predicate(n->as_Vector()->length() == 4);
18155   match(Set dst (AddVF src1 src2));
18156   ins_cost(INSN_COST);
18157   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
18158   ins_encode %{
18159     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
18160             as_FloatRegister($src1$$reg),
18161             as_FloatRegister($src2$$reg));
18162   %}
18163   ins_pipe(vdop_fp128);
18164 %}
18165 
18166 instruct vadd2D(vecX dst, vecX src1, vecX src2)
18167 %{
18168   match(Set dst (AddVD src1 src2));
18169   ins_cost(INSN_COST);
18170   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
18171   ins_encode %{
18172     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
18173             as_FloatRegister($src1$$reg),
18174             as_FloatRegister($src2$$reg));
18175   %}
18176   ins_pipe(vdop_fp128);
18177 %}
18178 
18179 // --------------------------------- SUB --------------------------------------
18180 
18181 instruct vsub8B(vecD dst, vecD src1, vecD src2)
18182 %{
18183   predicate(n->as_Vector()->length() == 4 ||
18184             n->as_Vector()->length() == 8);
18185   match(Set dst (SubVB src1 src2));
18186   ins_cost(INSN_COST);
18187   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
18188   ins_encode %{
18189     __ subv(as_FloatRegister($dst$$reg), __ T8B,
18190             as_FloatRegister($src1$$reg),
18191             as_FloatRegister($src2$$reg));
18192   %}
18193   ins_pipe(vdop64);
18194 %}
18195 
18196 instruct vsub16B(vecX dst, vecX src1, vecX src2)
18197 %{
18198   predicate(n->as_Vector()->length() == 16);
18199   match(Set dst (SubVB src1 src2));
18200   ins_cost(INSN_COST);
18201   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
18202   ins_encode %{
18203     __ subv(as_FloatRegister($dst$$reg), __ T16B,
18204             as_FloatRegister($src1$$reg),
18205             as_FloatRegister($src2$$reg));
18206   %}
18207   ins_pipe(vdop128);
18208 %}
18209 
18210 instruct vsub4S(vecD dst, vecD src1, vecD src2)
18211 %{
18212   predicate(n->as_Vector()->length() == 2 ||
18213             n->as_Vector()->length() == 4);
18214   match(Set dst (SubVS src1 src2));
18215   ins_cost(INSN_COST);
18216   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
18217   ins_encode %{
18218     __ subv(as_FloatRegister($dst$$reg), __ T4H,
18219             as_FloatRegister($src1$$reg),
18220             as_FloatRegister($src2$$reg));
18221   %}
18222   ins_pipe(vdop64);
18223 %}
18224 
18225 instruct vsub8S(vecX dst, vecX src1, vecX src2)
18226 %{
18227   predicate(n->as_Vector()->length() == 8);
18228   match(Set dst (SubVS src1 src2));
18229   ins_cost(INSN_COST);
18230   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
18231   ins_encode %{
18232     __ subv(as_FloatRegister($dst$$reg), __ T8H,
18233             as_FloatRegister($src1$$reg),
18234             as_FloatRegister($src2$$reg));
18235   %}
18236   ins_pipe(vdop128);
18237 %}
18238 
18239 instruct vsub2I(vecD dst, vecD src1, vecD src2)
18240 %{
18241   predicate(n->as_Vector()->length() == 2);
18242   match(Set dst (SubVI src1 src2));
18243   ins_cost(INSN_COST);
18244   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
18245   ins_encode %{
18246     __ subv(as_FloatRegister($dst$$reg), __ T2S,
18247             as_FloatRegister($src1$$reg),
18248             as_FloatRegister($src2$$reg));
18249   %}
18250   ins_pipe(vdop64);
18251 %}
18252 
18253 instruct vsub4I(vecX dst, vecX src1, vecX src2)
18254 %{
18255   predicate(n->as_Vector()->length() == 4);
18256   match(Set dst (SubVI src1 src2));
18257   ins_cost(INSN_COST);
18258   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
18259   ins_encode %{
18260     __ subv(as_FloatRegister($dst$$reg), __ T4S,
18261             as_FloatRegister($src1$$reg),
18262             as_FloatRegister($src2$$reg));
18263   %}
18264   ins_pipe(vdop128);
18265 %}
18266 
18267 instruct vsub2L(vecX dst, vecX src1, vecX src2)
18268 %{
18269   predicate(n->as_Vector()->length() == 2);
18270   match(Set dst (SubVL src1 src2));
18271   ins_cost(INSN_COST);
18272   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
18273   ins_encode %{
18274     __ subv(as_FloatRegister($dst$$reg), __ T2D,
18275             as_FloatRegister($src1$$reg),
18276             as_FloatRegister($src2$$reg));
18277   %}
18278   ins_pipe(vdop128);
18279 %}
18280 
18281 instruct vsub2F(vecD dst, vecD src1, vecD src2)
18282 %{
18283   predicate(n->as_Vector()->length() == 2);
18284   match(Set dst (SubVF src1 src2));
18285   ins_cost(INSN_COST);
18286   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
18287   ins_encode %{
18288     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
18289             as_FloatRegister($src1$$reg),
18290             as_FloatRegister($src2$$reg));
18291   %}
18292   ins_pipe(vdop_fp64);
18293 %}
18294 
18295 instruct vsub4F(vecX dst, vecX src1, vecX src2)
18296 %{
18297   predicate(n->as_Vector()->length() == 4);
18298   match(Set dst (SubVF src1 src2));
18299   ins_cost(INSN_COST);
18300   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
18301   ins_encode %{
18302     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
18303             as_FloatRegister($src1$$reg),
18304             as_FloatRegister($src2$$reg));
18305   %}
18306   ins_pipe(vdop_fp128);
18307 %}
18308 
18309 instruct vsub2D(vecX dst, vecX src1, vecX src2)
18310 %{
18311   predicate(n->as_Vector()->length() == 2);
18312   match(Set dst (SubVD src1 src2));
18313   ins_cost(INSN_COST);
18314   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
18315   ins_encode %{
18316     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
18317             as_FloatRegister($src1$$reg),
18318             as_FloatRegister($src2$$reg));
18319   %}
18320   ins_pipe(vdop_fp128);
18321 %}
18322 
18323 // --------------------------------- MUL --------------------------------------
18324 
18325 instruct vmul8B(vecD dst, vecD src1, vecD src2)
18326 %{
18327   predicate(n->as_Vector()->length() == 4 ||
18328             n->as_Vector()->length() == 8);
18329   match(Set dst (MulVB src1 src2));
18330   ins_cost(INSN_COST);
18331   format %{ "mulv  $dst,$src1,$src2\t# vector (8B)" %}
18332   ins_encode %{
18333     __ mulv(as_FloatRegister($dst$$reg), __ T8B,
18334             as_FloatRegister($src1$$reg),
18335             as_FloatRegister($src2$$reg));
18336   %}
18337   ins_pipe(vmul64);
18338 %}
18339 
18340 instruct vmul16B(vecX dst, vecX src1, vecX src2)
18341 %{
18342   predicate(n->as_Vector()->length() == 16);
18343   match(Set dst (MulVB src1 src2));
18344   ins_cost(INSN_COST);
18345   format %{ "mulv  $dst,$src1,$src2\t# vector (16B)" %}
18346   ins_encode %{
18347     __ mulv(as_FloatRegister($dst$$reg), __ T16B,
18348             as_FloatRegister($src1$$reg),
18349             as_FloatRegister($src2$$reg));
18350   %}
18351   ins_pipe(vmul128);
18352 %}
18353 
18354 instruct vmul4S(vecD dst, vecD src1, vecD src2)
18355 %{
18356   predicate(n->as_Vector()->length() == 2 ||
18357             n->as_Vector()->length() == 4);
18358   match(Set dst (MulVS src1 src2));
18359   ins_cost(INSN_COST);
18360   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
18361   ins_encode %{
18362     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
18363             as_FloatRegister($src1$$reg),
18364             as_FloatRegister($src2$$reg));
18365   %}
18366   ins_pipe(vmul64);
18367 %}
18368 
18369 instruct vmul8S(vecX dst, vecX src1, vecX src2)
18370 %{
18371   predicate(n->as_Vector()->length() == 8);
18372   match(Set dst (MulVS src1 src2));
18373   ins_cost(INSN_COST);
18374   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
18375   ins_encode %{
18376     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
18377             as_FloatRegister($src1$$reg),
18378             as_FloatRegister($src2$$reg));
18379   %}
18380   ins_pipe(vmul128);
18381 %}
18382 
18383 instruct vmul2I(vecD dst, vecD src1, vecD src2)
18384 %{
18385   predicate(n->as_Vector()->length() == 2);
18386   match(Set dst (MulVI src1 src2));
18387   ins_cost(INSN_COST);
18388   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
18389   ins_encode %{
18390     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
18391             as_FloatRegister($src1$$reg),
18392             as_FloatRegister($src2$$reg));
18393   %}
18394   ins_pipe(vmul64);
18395 %}
18396 
18397 instruct vmul4I(vecX dst, vecX src1, vecX src2)
18398 %{
18399   predicate(n->as_Vector()->length() == 4);
18400   match(Set dst (MulVI src1 src2));
18401   ins_cost(INSN_COST);
18402   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
18403   ins_encode %{
18404     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
18405             as_FloatRegister($src1$$reg),
18406             as_FloatRegister($src2$$reg));
18407   %}
18408   ins_pipe(vmul128);
18409 %}
18410 
18411 instruct vmul2L(vecX dst, vecX src1, vecX src2, iRegLNoSp tmp1, iRegLNoSp tmp2)
18412 %{
18413   predicate(n->as_Vector()->length() == 2);
18414   match(Set dst (MulVL src1 src2));
18415   ins_cost(INSN_COST);
18416   effect(TEMP tmp1, TEMP tmp2);
18417   format %{ "umov   $tmp1, $src1, D, 0\n\t"
18418             "umov   $tmp2, $src2, D, 0\n\t"
18419             "mul    $tmp2, $tmp2, $tmp1\n\t"
18420             "mov    $dst,  T2D,   0, $tmp2\t# insert into vector(2L)\n\t"
18421             "umov   $tmp1, $src1, D, 1\n\t"
18422             "umov   $tmp2, $src2, D, 1\n\t"
18423             "mul    $tmp2, $tmp2, $tmp1\n\t"
18424             "mov    $dst,  T2D,   1, $tmp2\t# insert into vector(2L)\n\t"
18425   %}
18426   ins_encode %{
18427     __ umov($tmp1$$Register, as_FloatRegister($src1$$reg), __ D, 0);
18428     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ D, 0);
18429     __ mul(as_Register($tmp2$$reg), as_Register($tmp2$$reg), as_Register($tmp1$$reg));
18430     __ mov(as_FloatRegister($dst$$reg), __ T2D, 0, $tmp2$$Register);
18431     __ umov($tmp1$$Register, as_FloatRegister($src1$$reg), __ D, 1);
18432     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ D, 1);
18433     __ mul(as_Register($tmp2$$reg), as_Register($tmp2$$reg), as_Register($tmp1$$reg));
18434     __ mov(as_FloatRegister($dst$$reg), __ T2D, 1, $tmp2$$Register);
18435   %}
18436   ins_pipe(pipe_slow);
18437 %}
18438 
18439 instruct vmul2F(vecD dst, vecD src1, vecD src2)
18440 %{
18441   predicate(n->as_Vector()->length() == 2);
18442   match(Set dst (MulVF src1 src2));
18443   ins_cost(INSN_COST);
18444   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
18445   ins_encode %{
18446     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
18447             as_FloatRegister($src1$$reg),
18448             as_FloatRegister($src2$$reg));
18449   %}
18450   ins_pipe(vmuldiv_fp64);
18451 %}
18452 
18453 instruct vmul4F(vecX dst, vecX src1, vecX src2)
18454 %{
18455   predicate(n->as_Vector()->length() == 4);
18456   match(Set dst (MulVF src1 src2));
18457   ins_cost(INSN_COST);
18458   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
18459   ins_encode %{
18460     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
18461             as_FloatRegister($src1$$reg),
18462             as_FloatRegister($src2$$reg));
18463   %}
18464   ins_pipe(vmuldiv_fp128);
18465 %}
18466 
18467 instruct vmul2D(vecX dst, vecX src1, vecX src2)
18468 %{
18469   predicate(n->as_Vector()->length() == 2);
18470   match(Set dst (MulVD src1 src2));
18471   ins_cost(INSN_COST);
18472   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
18473   ins_encode %{
18474     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
18475             as_FloatRegister($src1$$reg),
18476             as_FloatRegister($src2$$reg));
18477   %}
18478   ins_pipe(vmuldiv_fp128);
18479 %}
18480 
18481 // --------------------------------- MLA --------------------------------------
18482 
18483 instruct vmla4S(vecD dst, vecD src1, vecD src2)
18484 %{
18485   predicate(n->as_Vector()->length() == 2 ||
18486             n->as_Vector()->length() == 4);
18487   match(Set dst (AddVS dst (MulVS src1 src2)));
18488   ins_cost(INSN_COST);
18489   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
18490   ins_encode %{
18491     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
18492             as_FloatRegister($src1$$reg),
18493             as_FloatRegister($src2$$reg));
18494   %}
18495   ins_pipe(vmla64);
18496 %}
18497 
18498 instruct vmla8S(vecX dst, vecX src1, vecX src2)
18499 %{
18500   predicate(n->as_Vector()->length() == 8);
18501   match(Set dst (AddVS dst (MulVS src1 src2)));
18502   ins_cost(INSN_COST);
18503   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
18504   ins_encode %{
18505     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
18506             as_FloatRegister($src1$$reg),
18507             as_FloatRegister($src2$$reg));
18508   %}
18509   ins_pipe(vmla128);
18510 %}
18511 
18512 instruct vmla2I(vecD dst, vecD src1, vecD src2)
18513 %{
18514   predicate(n->as_Vector()->length() == 2);
18515   match(Set dst (AddVI dst (MulVI src1 src2)));
18516   ins_cost(INSN_COST);
18517   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
18518   ins_encode %{
18519     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
18520             as_FloatRegister($src1$$reg),
18521             as_FloatRegister($src2$$reg));
18522   %}
18523   ins_pipe(vmla64);
18524 %}
18525 
18526 instruct vmla4I(vecX dst, vecX src1, vecX src2)
18527 %{
18528   predicate(n->as_Vector()->length() == 4);
18529   match(Set dst (AddVI dst (MulVI src1 src2)));
18530   ins_cost(INSN_COST);
18531   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
18532   ins_encode %{
18533     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
18534             as_FloatRegister($src1$$reg),
18535             as_FloatRegister($src2$$reg));
18536   %}
18537   ins_pipe(vmla128);
18538 %}
18539 
18540 // dst + src1 * src2
18541 instruct vmla2F(vecD dst, vecD src1, vecD src2) %{
18542   predicate(UseFMA && n->as_Vector()->length() == 2);
18543   match(Set dst (FmaVF  dst (Binary src1 src2)));
18544   format %{ "fmla  $dst,$src1,$src2\t# vector (2S)" %}
18545   ins_cost(INSN_COST);
18546   ins_encode %{
18547     __ fmla(as_FloatRegister($dst$$reg), __ T2S,
18548             as_FloatRegister($src1$$reg),
18549             as_FloatRegister($src2$$reg));
18550   %}
18551   ins_pipe(vmuldiv_fp64);
18552 %}
18553 
18554 // dst + src1 * src2
18555 instruct vmla4F(vecX dst, vecX src1, vecX src2) %{
18556   predicate(UseFMA && n->as_Vector()->length() == 4);
18557   match(Set dst (FmaVF  dst (Binary src1 src2)));
18558   format %{ "fmla  $dst,$src1,$src2\t# vector (4S)" %}
18559   ins_cost(INSN_COST);
18560   ins_encode %{
18561     __ fmla(as_FloatRegister($dst$$reg), __ T4S,
18562             as_FloatRegister($src1$$reg),
18563             as_FloatRegister($src2$$reg));
18564   %}
18565   ins_pipe(vmuldiv_fp128);
18566 %}
18567 
18568 // dst + src1 * src2
18569 instruct vmla2D(vecX dst, vecX src1, vecX src2) %{
18570   predicate(UseFMA && n->as_Vector()->length() == 2);
18571   match(Set dst (FmaVD  dst (Binary src1 src2)));
18572   format %{ "fmla  $dst,$src1,$src2\t# vector (2D)" %}
18573   ins_cost(INSN_COST);
18574   ins_encode %{
18575     __ fmla(as_FloatRegister($dst$$reg), __ T2D,
18576             as_FloatRegister($src1$$reg),
18577             as_FloatRegister($src2$$reg));
18578   %}
18579   ins_pipe(vmuldiv_fp128);
18580 %}
18581 
18582 // --------------------------------- MLS --------------------------------------
18583 
18584 instruct vmls4S(vecD dst, vecD src1, vecD src2)
18585 %{
18586   predicate(n->as_Vector()->length() == 2 ||
18587             n->as_Vector()->length() == 4);
18588   match(Set dst (SubVS dst (MulVS src1 src2)));
18589   ins_cost(INSN_COST);
18590   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
18591   ins_encode %{
18592     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
18593             as_FloatRegister($src1$$reg),
18594             as_FloatRegister($src2$$reg));
18595   %}
18596   ins_pipe(vmla64);
18597 %}
18598 
18599 instruct vmls8S(vecX dst, vecX src1, vecX src2)
18600 %{
18601   predicate(n->as_Vector()->length() == 8);
18602   match(Set dst (SubVS dst (MulVS src1 src2)));
18603   ins_cost(INSN_COST);
18604   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
18605   ins_encode %{
18606     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
18607             as_FloatRegister($src1$$reg),
18608             as_FloatRegister($src2$$reg));
18609   %}
18610   ins_pipe(vmla128);
18611 %}
18612 
18613 instruct vmls2I(vecD dst, vecD src1, vecD src2)
18614 %{
18615   predicate(n->as_Vector()->length() == 2);
18616   match(Set dst (SubVI dst (MulVI src1 src2)));
18617   ins_cost(INSN_COST);
18618   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
18619   ins_encode %{
18620     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
18621             as_FloatRegister($src1$$reg),
18622             as_FloatRegister($src2$$reg));
18623   %}
18624   ins_pipe(vmla64);
18625 %}
18626 
18627 instruct vmls4I(vecX dst, vecX src1, vecX src2)
18628 %{
18629   predicate(n->as_Vector()->length() == 4);
18630   match(Set dst (SubVI dst (MulVI src1 src2)));
18631   ins_cost(INSN_COST);
18632   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
18633   ins_encode %{
18634     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
18635             as_FloatRegister($src1$$reg),
18636             as_FloatRegister($src2$$reg));
18637   %}
18638   ins_pipe(vmla128);
18639 %}
18640 
18641 // dst - src1 * src2
18642 instruct vmls2F(vecD dst, vecD src1, vecD src2) %{
18643   predicate(UseFMA && n->as_Vector()->length() == 2);
18644   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
18645   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
18646   format %{ "fmls  $dst,$src1,$src2\t# vector (2S)" %}
18647   ins_cost(INSN_COST);
18648   ins_encode %{
18649     __ fmls(as_FloatRegister($dst$$reg), __ T2S,
18650             as_FloatRegister($src1$$reg),
18651             as_FloatRegister($src2$$reg));
18652   %}
18653   ins_pipe(vmuldiv_fp64);
18654 %}
18655 
18656 // dst - src1 * src2
18657 instruct vmls4F(vecX dst, vecX src1, vecX src2) %{
18658   predicate(UseFMA && n->as_Vector()->length() == 4);
18659   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
18660   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
18661   format %{ "fmls  $dst,$src1,$src2\t# vector (4S)" %}
18662   ins_cost(INSN_COST);
18663   ins_encode %{
18664     __ fmls(as_FloatRegister($dst$$reg), __ T4S,
18665             as_FloatRegister($src1$$reg),
18666             as_FloatRegister($src2$$reg));
18667   %}
18668   ins_pipe(vmuldiv_fp128);
18669 %}
18670 
18671 // dst - src1 * src2
18672 instruct vmls2D(vecX dst, vecX src1, vecX src2) %{
18673   predicate(UseFMA && n->as_Vector()->length() == 2);
18674   match(Set dst (FmaVD  dst (Binary (NegVD src1) src2)));
18675   match(Set dst (FmaVD  dst (Binary src1 (NegVD src2))));
18676   format %{ "fmls  $dst,$src1,$src2\t# vector (2D)" %}
18677   ins_cost(INSN_COST);
18678   ins_encode %{
18679     __ fmls(as_FloatRegister($dst$$reg), __ T2D,
18680             as_FloatRegister($src1$$reg),
18681             as_FloatRegister($src2$$reg));
18682   %}
18683   ins_pipe(vmuldiv_fp128);
18684 %}
18685 
18686 // --------------- Vector Multiply-Add Shorts into Integer --------------------
18687 
18688 instruct vmuladdS2I(vecX dst, vecX src1, vecX src2, vecX tmp) %{
18689   predicate(n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
18690   match(Set dst (MulAddVS2VI src1 src2));
18691   ins_cost(INSN_COST);
18692   effect(TEMP_DEF dst, TEMP tmp);
18693   format %{ "smullv  $tmp, $src1, $src2\t# vector (4H)\n\t"
18694             "smullv  $dst, $src1, $src2\t# vector (8H)\n\t"
18695             "addpv   $dst, $tmp, $dst\t# vector (4S)\n\t" %}
18696   ins_encode %{
18697     __ smullv(as_FloatRegister($tmp$$reg), __ T4H,
18698               as_FloatRegister($src1$$reg),
18699               as_FloatRegister($src2$$reg));
18700     __ smullv(as_FloatRegister($dst$$reg), __ T8H,
18701               as_FloatRegister($src1$$reg),
18702               as_FloatRegister($src2$$reg));
18703     __ addpv(as_FloatRegister($dst$$reg), __ T4S,
18704              as_FloatRegister($tmp$$reg),
18705              as_FloatRegister($dst$$reg));
18706   %}
18707   ins_pipe(vmuldiv_fp128);
18708 %}
18709 
18710 // --------------------------------- DIV --------------------------------------
18711 
18712 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
18713 %{
18714   predicate(n->as_Vector()->length() == 2);
18715   match(Set dst (DivVF src1 src2));
18716   ins_cost(INSN_COST);
18717   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
18718   ins_encode %{
18719     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
18720             as_FloatRegister($src1$$reg),
18721             as_FloatRegister($src2$$reg));
18722   %}
18723   ins_pipe(vmuldiv_fp64);
18724 %}
18725 
18726 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
18727 %{
18728   predicate(n->as_Vector()->length() == 4);
18729   match(Set dst (DivVF src1 src2));
18730   ins_cost(INSN_COST);
18731   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
18732   ins_encode %{
18733     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
18734             as_FloatRegister($src1$$reg),
18735             as_FloatRegister($src2$$reg));
18736   %}
18737   ins_pipe(vmuldiv_fp128);
18738 %}
18739 
18740 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
18741 %{
18742   predicate(n->as_Vector()->length() == 2);
18743   match(Set dst (DivVD src1 src2));
18744   ins_cost(INSN_COST);
18745   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
18746   ins_encode %{
18747     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
18748             as_FloatRegister($src1$$reg),
18749             as_FloatRegister($src2$$reg));
18750   %}
18751   ins_pipe(vmuldiv_fp128);
18752 %}
18753 
18754 // --------------------------------- SQRT -------------------------------------
18755 
18756 instruct vsqrt2F(vecD dst, vecD src)
18757 %{
18758   predicate(n->as_Vector()->length() == 2);
18759   match(Set dst (SqrtVF src));
18760   format %{ "fsqrt  $dst, $src\t# vector (2F)" %}
18761   ins_encode %{
18762     __ fsqrt(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($src$$reg));
18763   %}
18764   ins_pipe(vunop_fp64);
18765 %}
18766 
18767 instruct vsqrt4F(vecX dst, vecX src)
18768 %{
18769   predicate(n->as_Vector()->length() == 4);
18770   match(Set dst (SqrtVF src));
18771   format %{ "fsqrt  $dst, $src\t# vector (4F)" %}
18772   ins_encode %{
18773     __ fsqrt(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src$$reg));
18774   %}
18775   ins_pipe(vsqrt_fp128);
18776 %}
18777 
18778 instruct vsqrt2D(vecX dst, vecX src)
18779 %{
18780   predicate(n->as_Vector()->length() == 2);
18781   match(Set dst (SqrtVD src));
18782   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
18783   ins_encode %{
18784     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
18785              as_FloatRegister($src$$reg));
18786   %}
18787   ins_pipe(vsqrt_fp128);
18788 %}
18789 
18790 // --------------------------------- ABS --------------------------------------
18791 
18792 instruct vabs8B(vecD dst, vecD src)
18793 %{
18794   predicate(n->as_Vector()->length() == 8);
18795   match(Set dst (AbsVB src));
18796   ins_cost(INSN_COST);
18797   format %{ "abs  $dst,$src\t# vector (8B)" %}
18798   ins_encode %{
18799     __ absr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($src$$reg));
18800   %}
18801   ins_pipe(vlogical64);
18802 %}
18803 
18804 instruct vabs16B(vecX dst, vecX src)
18805 %{
18806   predicate(n->as_Vector()->length() == 16);
18807   match(Set dst (AbsVB src));
18808   ins_cost(INSN_COST);
18809   format %{ "abs  $dst,$src\t# vector (16B)" %}
18810   ins_encode %{
18811     __ absr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($src$$reg));
18812   %}
18813   ins_pipe(vlogical128);
18814 %}
18815 
18816 instruct vabs4S(vecD dst, vecD src)
18817 %{
18818   predicate(n->as_Vector()->length() == 4);
18819   match(Set dst (AbsVS src));
18820   ins_cost(INSN_COST);
18821   format %{ "abs  $dst,$src\t# vector (4H)" %}
18822   ins_encode %{
18823     __ absr(as_FloatRegister($dst$$reg), __ T4H, as_FloatRegister($src$$reg));
18824   %}
18825   ins_pipe(vlogical64);
18826 %}
18827 
18828 instruct vabs8S(vecX dst, vecX src)
18829 %{
18830   predicate(n->as_Vector()->length() == 8);
18831   match(Set dst (AbsVS src));
18832   ins_cost(INSN_COST);
18833   format %{ "abs  $dst,$src\t# vector (8H)" %}
18834   ins_encode %{
18835     __ absr(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg));
18836   %}
18837   ins_pipe(vlogical128);
18838 %}
18839 
18840 instruct vabs2I(vecD dst, vecD src)
18841 %{
18842   predicate(n->as_Vector()->length() == 2);
18843   match(Set dst (AbsVI src));
18844   ins_cost(INSN_COST);
18845   format %{ "abs  $dst,$src\t# vector (2S)" %}
18846   ins_encode %{
18847     __ absr(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($src$$reg));
18848   %}
18849   ins_pipe(vlogical64);
18850 %}
18851 
18852 instruct vabs4I(vecX dst, vecX src)
18853 %{
18854   predicate(n->as_Vector()->length() == 4);
18855   match(Set dst (AbsVI src));
18856   ins_cost(INSN_COST);
18857   format %{ "abs  $dst,$src\t# vector (4S)" %}
18858   ins_encode %{
18859     __ absr(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src$$reg));
18860   %}
18861   ins_pipe(vlogical128);
18862 %}
18863 
18864 instruct vabs2L(vecX dst, vecX src)
18865 %{
18866   predicate(n->as_Vector()->length() == 2);
18867   match(Set dst (AbsVL src));
18868   ins_cost(INSN_COST);
18869   format %{ "abs  $dst,$src\t# vector (2D)" %}
18870   ins_encode %{
18871     __ absr(as_FloatRegister($dst$$reg), __ T2D, as_FloatRegister($src$$reg));
18872   %}
18873   ins_pipe(vlogical128);
18874 %}
18875 
18876 instruct vabs2F(vecD dst, vecD src)
18877 %{
18878   predicate(n->as_Vector()->length() == 2);
18879   match(Set dst (AbsVF src));
18880   ins_cost(INSN_COST * 3);
18881   format %{ "fabs  $dst,$src\t# vector (2S)" %}
18882   ins_encode %{
18883     __ fabs(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($src$$reg));
18884   %}
18885   ins_pipe(vunop_fp64);
18886 %}
18887 
18888 instruct vabs4F(vecX dst, vecX src)
18889 %{
18890   predicate(n->as_Vector()->length() == 4);
18891   match(Set dst (AbsVF src));
18892   ins_cost(INSN_COST * 3);
18893   format %{ "fabs  $dst,$src\t# vector (4S)" %}
18894   ins_encode %{
18895     __ fabs(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src$$reg));
18896   %}
18897   ins_pipe(vunop_fp128);
18898 %}
18899 
18900 instruct vabs2D(vecX dst, vecX src)
18901 %{
18902   predicate(n->as_Vector()->length() == 2);
18903   match(Set dst (AbsVD src));
18904   ins_cost(INSN_COST * 3);
18905   format %{ "fabs  $dst,$src\t# vector (2D)" %}
18906   ins_encode %{
18907     __ fabs(as_FloatRegister($dst$$reg), __ T2D, as_FloatRegister($src$$reg));
18908   %}
18909   ins_pipe(vunop_fp128);
18910 %}
18911 
18912 // --------------------------------- NEG --------------------------------------
18913 
18914 instruct vneg2F(vecD dst, vecD src)
18915 %{
18916   predicate(n->as_Vector()->length() == 2);
18917   match(Set dst (NegVF src));
18918   ins_cost(INSN_COST * 3);
18919   format %{ "fneg  $dst,$src\t# vector (2S)" %}
18920   ins_encode %{
18921     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
18922             as_FloatRegister($src$$reg));
18923   %}
18924   ins_pipe(vunop_fp64);
18925 %}
18926 
18927 instruct vneg4F(vecX dst, vecX src)
18928 %{
18929   predicate(n->as_Vector()->length() == 4);
18930   match(Set dst (NegVF src));
18931   ins_cost(INSN_COST * 3);
18932   format %{ "fneg  $dst,$src\t# vector (4S)" %}
18933   ins_encode %{
18934     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
18935             as_FloatRegister($src$$reg));
18936   %}
18937   ins_pipe(vunop_fp128);
18938 %}
18939 
18940 instruct vneg2D(vecX dst, vecX src)
18941 %{
18942   predicate(n->as_Vector()->length() == 2);
18943   match(Set dst (NegVD src));
18944   ins_cost(INSN_COST * 3);
18945   format %{ "fneg  $dst,$src\t# vector (2D)" %}
18946   ins_encode %{
18947     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
18948             as_FloatRegister($src$$reg));
18949   %}
18950   ins_pipe(vunop_fp128);
18951 %}
18952 
18953 // --------------------------------- NOT --------------------------------------
18954 
18955 instruct vnot2I(vecD dst, vecD src, immI_M1 m1)
18956 %{
18957   predicate(n->as_Vector()->length_in_bytes() == 8);
18958   match(Set dst (XorV src (ReplicateB m1)));
18959   match(Set dst (XorV src (ReplicateS m1)));
18960   match(Set dst (XorV src (ReplicateI m1)));
18961   ins_cost(INSN_COST);
18962   format %{ "not  $dst,$src\t# vector (8B)" %}
18963   ins_encode %{
18964     __ notr(as_FloatRegister($dst$$reg), __ T8B,
18965             as_FloatRegister($src$$reg));
18966   %}
18967   ins_pipe(vlogical64);
18968 %}
18969 
18970 instruct vnot4I(vecX dst, vecX src, immI_M1 m1)
18971 %{
18972   predicate(n->as_Vector()->length_in_bytes() == 16);
18973   match(Set dst (XorV src (ReplicateB m1)));
18974   match(Set dst (XorV src (ReplicateS m1)));
18975   match(Set dst (XorV src (ReplicateI m1)));
18976   ins_cost(INSN_COST);
18977   format %{ "not  $dst,$src\t# vector (16B)" %}
18978   ins_encode %{
18979     __ notr(as_FloatRegister($dst$$reg), __ T16B,
18980             as_FloatRegister($src$$reg));
18981   %}
18982   ins_pipe(vlogical128);
18983 %}
18984 
18985 instruct vnot2L(vecX dst, vecX src, immL_M1 m1)
18986 %{
18987   predicate(n->as_Vector()->length_in_bytes() == 16);
18988   match(Set dst (XorV src (ReplicateL m1)));
18989   ins_cost(INSN_COST);
18990   format %{ "not  $dst,$src\t# vector (16B)" %}
18991   ins_encode %{
18992     __ notr(as_FloatRegister($dst$$reg), __ T16B,
18993             as_FloatRegister($src$$reg));
18994   %}
18995   ins_pipe(vlogical128);
18996 %}
18997 
18998 // --------------------------------- AND --------------------------------------
18999 
19000 instruct vand8B(vecD dst, vecD src1, vecD src2)
19001 %{
19002   predicate(n->as_Vector()->length_in_bytes() == 4 ||
19003             n->as_Vector()->length_in_bytes() == 8);
19004   match(Set dst (AndV src1 src2));
19005   ins_cost(INSN_COST);
19006   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
19007   ins_encode %{
19008     __ andr(as_FloatRegister($dst$$reg), __ T8B,
19009             as_FloatRegister($src1$$reg),
19010             as_FloatRegister($src2$$reg));
19011   %}
19012   ins_pipe(vlogical64);
19013 %}
19014 
19015 instruct vand16B(vecX dst, vecX src1, vecX src2)
19016 %{
19017   predicate(n->as_Vector()->length_in_bytes() == 16);
19018   match(Set dst (AndV src1 src2));
19019   ins_cost(INSN_COST);
19020   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
19021   ins_encode %{
19022     __ andr(as_FloatRegister($dst$$reg), __ T16B,
19023             as_FloatRegister($src1$$reg),
19024             as_FloatRegister($src2$$reg));
19025   %}
19026   ins_pipe(vlogical128);
19027 %}
19028 
19029 // --------------------------------- OR ---------------------------------------
19030 
19031 instruct vor8B(vecD dst, vecD src1, vecD src2)
19032 %{
19033   predicate(n->as_Vector()->length_in_bytes() == 4 ||
19034             n->as_Vector()->length_in_bytes() == 8);
19035   match(Set dst (OrV src1 src2));
19036   ins_cost(INSN_COST);
19037   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
19038   ins_encode %{
19039     __ orr(as_FloatRegister($dst$$reg), __ T8B,
19040             as_FloatRegister($src1$$reg),
19041             as_FloatRegister($src2$$reg));
19042   %}
19043   ins_pipe(vlogical64);
19044 %}
19045 
19046 instruct vor16B(vecX dst, vecX src1, vecX src2)
19047 %{
19048   predicate(n->as_Vector()->length_in_bytes() == 16);
19049   match(Set dst (OrV src1 src2));
19050   ins_cost(INSN_COST);
19051   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
19052   ins_encode %{
19053     __ orr(as_FloatRegister($dst$$reg), __ T16B,
19054             as_FloatRegister($src1$$reg),
19055             as_FloatRegister($src2$$reg));
19056   %}
19057   ins_pipe(vlogical128);
19058 %}
19059 
19060 // --------------------------------- XOR --------------------------------------
19061 
19062 instruct vxor8B(vecD dst, vecD src1, vecD src2)
19063 %{
19064   predicate(n->as_Vector()->length_in_bytes() == 4 ||
19065             n->as_Vector()->length_in_bytes() == 8);
19066   match(Set dst (XorV src1 src2));
19067   ins_cost(INSN_COST);
19068   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
19069   ins_encode %{
19070     __ eor(as_FloatRegister($dst$$reg), __ T8B,
19071             as_FloatRegister($src1$$reg),
19072             as_FloatRegister($src2$$reg));
19073   %}
19074   ins_pipe(vlogical64);
19075 %}
19076 
19077 instruct vxor16B(vecX dst, vecX src1, vecX src2)
19078 %{
19079   predicate(n->as_Vector()->length_in_bytes() == 16);
19080   match(Set dst (XorV src1 src2));
19081   ins_cost(INSN_COST);
19082   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
19083   ins_encode %{
19084     __ eor(as_FloatRegister($dst$$reg), __ T16B,
19085             as_FloatRegister($src1$$reg),
19086             as_FloatRegister($src2$$reg));
19087   %}
19088   ins_pipe(vlogical128);
19089 %}
19090 
19091 instruct vround2D_reg(vecX dst, vecX src, immI rmode) %{
19092   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
19093   match(Set dst (RoundDoubleModeV src rmode));
19094   format %{ "frint  $dst, $src, $rmode" %}
19095   ins_encode %{
19096     switch ($rmode$$constant) {
19097       case RoundDoubleModeNode::rmode_rint:
19098         __ frintn(as_FloatRegister($dst$$reg), __ T2D,
19099                   as_FloatRegister($src$$reg));
19100         break;
19101       case RoundDoubleModeNode::rmode_floor:
19102         __ frintm(as_FloatRegister($dst$$reg), __ T2D,
19103                   as_FloatRegister($src$$reg));
19104         break;
19105       case RoundDoubleModeNode::rmode_ceil:
19106         __ frintp(as_FloatRegister($dst$$reg), __ T2D,
19107                   as_FloatRegister($src$$reg));
19108         break;
19109     }
19110   %}
19111   ins_pipe(vdop_fp128);
19112 %}
19113 
19114 // ------------------------------ Max ---------------------------------------
19115 
19116 instruct vmax8B(vecD dst, vecD src1, vecD src2)
19117 %{
19118   predicate(n->as_Vector()->length() == 8 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19119   match(Set dst (MaxV src1 src2));
19120   ins_cost(INSN_COST);
19121   format %{ "maxv  $dst,$src1,$src2\t# vector (8B)" %}
19122   ins_encode %{
19123     __ maxv(as_FloatRegister($dst$$reg), __ T8B,
19124             as_FloatRegister($src1$$reg),
19125             as_FloatRegister($src2$$reg));
19126   %}
19127   ins_pipe(vdop64);
19128 %}
19129 
19130 instruct vmax16B(vecX dst, vecX src1, vecX src2)
19131 %{
19132   predicate(n->as_Vector()->length() == 16 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19133   match(Set dst (MaxV src1 src2));
19134   ins_cost(INSN_COST);
19135   format %{ "maxv  $dst,$src1,$src2\t# vector (16B)" %}
19136   ins_encode %{
19137     __ maxv(as_FloatRegister($dst$$reg), __ T16B,
19138             as_FloatRegister($src1$$reg),
19139             as_FloatRegister($src2$$reg));
19140   %}
19141   ins_pipe(vdop128);
19142 %}
19143 
19144 instruct vmax4S(vecD dst, vecD src1, vecD src2)
19145 %{
19146   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19147   match(Set dst (MaxV src1 src2));
19148   ins_cost(INSN_COST);
19149   format %{ "maxv  $dst,$src1,$src2\t# vector (4H)" %}
19150   ins_encode %{
19151     __ maxv(as_FloatRegister($dst$$reg), __ T4H,
19152             as_FloatRegister($src1$$reg),
19153             as_FloatRegister($src2$$reg));
19154   %}
19155   ins_pipe(vdop64);
19156 %}
19157 
19158 instruct vmax8S(vecX dst, vecX src1, vecX src2)
19159 %{
19160   predicate(n->as_Vector()->length() == 8 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19161   match(Set dst (MaxV src1 src2));
19162   ins_cost(INSN_COST);
19163   format %{ "maxv  $dst,$src1,$src2\t# vector (8H)" %}
19164   ins_encode %{
19165     __ maxv(as_FloatRegister($dst$$reg), __ T8H,
19166             as_FloatRegister($src1$$reg),
19167             as_FloatRegister($src2$$reg));
19168   %}
19169   ins_pipe(vdop128);
19170 %}
19171 
19172 instruct vmax2I(vecD dst, vecD src1, vecD src2)
19173 %{
19174   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_INT);
19175   match(Set dst (MaxV src1 src2));
19176   ins_cost(INSN_COST);
19177   format %{ "maxv  $dst,$src1,$src2\t# vector (2S)" %}
19178   ins_encode %{
19179     __ maxv(as_FloatRegister($dst$$reg), __ T2S,
19180             as_FloatRegister($src1$$reg),
19181             as_FloatRegister($src2$$reg));
19182   %}
19183   ins_pipe(vdop64);
19184 %}
19185 
19186 instruct vmax4I(vecX dst, vecX src1, vecX src2)
19187 %{
19188   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_INT);
19189   match(Set dst (MaxV src1 src2));
19190   ins_cost(INSN_COST);
19191   format %{ "maxv  $dst,$src1,$src2\t# vector (4S)" %}
19192   ins_encode %{
19193     __ maxv(as_FloatRegister($dst$$reg), __ T4S,
19194             as_FloatRegister($src1$$reg),
19195             as_FloatRegister($src2$$reg));
19196   %}
19197   ins_pipe(vdop128);
19198 %}
19199 
19200 instruct vmax2L(vecX dst, vecX src1, vecX src2)
19201 %{
19202   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_LONG);
19203   match(Set dst (MaxV src1 src2));
19204   ins_cost(INSN_COST);
19205   effect(TEMP dst);
19206   format %{ "cmgt  $dst,$src1,$src2\t# vector (2D)"
19207             "bsl  $dst,$src1,$src2\t# vector (16B)" %}
19208   ins_encode %{
19209     __ cmgt(as_FloatRegister($dst$$reg), __ T2D,
19210             as_FloatRegister($src1$$reg),
19211             as_FloatRegister($src2$$reg));
19212     __ bsl(as_FloatRegister($dst$$reg), __ T16B,
19213            as_FloatRegister($src1$$reg),
19214            as_FloatRegister($src2$$reg));
19215   %}
19216   ins_pipe(vdop128);
19217 %}
19218 
19219 instruct vmax2F(vecD dst, vecD src1, vecD src2)
19220 %{
19221   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
19222   match(Set dst (MaxV src1 src2));
19223   ins_cost(INSN_COST);
19224   format %{ "fmax  $dst,$src1,$src2\t# vector (2F)" %}
19225   ins_encode %{
19226     __ fmax(as_FloatRegister($dst$$reg), __ T2S,
19227             as_FloatRegister($src1$$reg),
19228             as_FloatRegister($src2$$reg));
19229   %}
19230   ins_pipe(vdop_fp64);
19231 %}
19232 
19233 instruct vmax4F(vecX dst, vecX src1, vecX src2)
19234 %{
19235   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
19236   match(Set dst (MaxV src1 src2));
19237   ins_cost(INSN_COST);
19238   format %{ "fmax  $dst,$src1,$src2\t# vector (4S)" %}
19239   ins_encode %{
19240     __ fmax(as_FloatRegister($dst$$reg), __ T4S,
19241             as_FloatRegister($src1$$reg),
19242             as_FloatRegister($src2$$reg));
19243   %}
19244   ins_pipe(vdop_fp128);
19245 %}
19246 
19247 instruct vmax2D(vecX dst, vecX src1, vecX src2)
19248 %{
19249   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
19250   match(Set dst (MaxV src1 src2));
19251   ins_cost(INSN_COST);
19252   format %{ "fmax  $dst,$src1,$src2\t# vector (2D)" %}
19253   ins_encode %{
19254     __ fmax(as_FloatRegister($dst$$reg), __ T2D,
19255             as_FloatRegister($src1$$reg),
19256             as_FloatRegister($src2$$reg));
19257   %}
19258   ins_pipe(vdop_fp128);
19259 %}
19260 
19261 // ------------------------------ Min ---------------------------------------
19262 
19263 instruct vmin8B(vecD dst, vecD src1, vecD src2)
19264 %{
19265   predicate(n->as_Vector()->length() == 8 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19266   match(Set dst (MinV src1 src2));
19267   ins_cost(INSN_COST);
19268   format %{ "minv  $dst,$src1,$src2\t# vector (8B)" %}
19269   ins_encode %{
19270     __ minv(as_FloatRegister($dst$$reg), __ T8B,
19271             as_FloatRegister($src1$$reg),
19272             as_FloatRegister($src2$$reg));
19273   %}
19274   ins_pipe(vdop64);
19275 %}
19276 
19277 instruct vmin16B(vecX dst, vecX src1, vecX src2)
19278 %{
19279   predicate(n->as_Vector()->length() == 16 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19280   match(Set dst (MinV src1 src2));
19281   ins_cost(INSN_COST);
19282   format %{ "minv  $dst,$src1,$src2\t# vector (16B)" %}
19283   ins_encode %{
19284     __ minv(as_FloatRegister($dst$$reg), __ T16B,
19285             as_FloatRegister($src1$$reg),
19286             as_FloatRegister($src2$$reg));
19287   %}
19288   ins_pipe(vdop128);
19289 %}
19290 
19291 instruct vmin4S(vecD dst, vecD src1, vecD src2)
19292 %{
19293   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19294   match(Set dst (MinV src1 src2));
19295   ins_cost(INSN_COST);
19296   format %{ "minv  $dst,$src1,$src2\t# vector (4H)" %}
19297   ins_encode %{
19298     __ minv(as_FloatRegister($dst$$reg), __ T4H,
19299             as_FloatRegister($src1$$reg),
19300             as_FloatRegister($src2$$reg));
19301   %}
19302   ins_pipe(vdop64);
19303 %}
19304 
19305 instruct vmin8S(vecX dst, vecX src1, vecX src2)
19306 %{
19307   predicate(n->as_Vector()->length() == 8 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19308   match(Set dst (MinV src1 src2));
19309   ins_cost(INSN_COST);
19310   format %{ "minv  $dst,$src1,$src2\t# vector (8H)" %}
19311   ins_encode %{
19312     __ minv(as_FloatRegister($dst$$reg), __ T8H,
19313             as_FloatRegister($src1$$reg),
19314             as_FloatRegister($src2$$reg));
19315   %}
19316   ins_pipe(vdop128);
19317 %}
19318 
19319 instruct vmin2I(vecD dst, vecD src1, vecD src2)
19320 %{
19321   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_INT);
19322   match(Set dst (MinV src1 src2));
19323   ins_cost(INSN_COST);
19324   format %{ "minv  $dst,$src1,$src2\t# vector (2S)" %}
19325   ins_encode %{
19326     __ minv(as_FloatRegister($dst$$reg), __ T2S,
19327             as_FloatRegister($src1$$reg),
19328             as_FloatRegister($src2$$reg));
19329   %}
19330   ins_pipe(vdop64);
19331 %}
19332 
19333 instruct vmin4I(vecX dst, vecX src1, vecX src2)
19334 %{
19335   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_INT);
19336   match(Set dst (MinV src1 src2));
19337   ins_cost(INSN_COST);
19338   format %{ "minv  $dst,$src1,$src2\t# vector (4S)" %}
19339   ins_encode %{
19340     __ minv(as_FloatRegister($dst$$reg), __ T4S,
19341             as_FloatRegister($src1$$reg),
19342             as_FloatRegister($src2$$reg));
19343   %}
19344   ins_pipe(vdop128);
19345 %}
19346 
19347 instruct vmin2L(vecX dst, vecX src1, vecX src2)
19348 %{
19349   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_LONG);
19350   match(Set dst (MinV src1 src2));
19351   ins_cost(INSN_COST);
19352   effect(TEMP dst);
19353   format %{ "cmgt  $dst,$src1,$src2\t# vector (2D)"
19354             "bsl  $dst,$src2,$src1\t# vector (16B)" %}
19355   ins_encode %{
19356     __ cmgt(as_FloatRegister($dst$$reg), __ T2D,
19357             as_FloatRegister($src1$$reg),
19358             as_FloatRegister($src2$$reg));
19359     __ bsl(as_FloatRegister($dst$$reg), __ T16B,
19360            as_FloatRegister($src2$$reg),
19361            as_FloatRegister($src1$$reg));
19362   %}
19363   ins_pipe(vdop128);
19364 %}
19365 
19366 instruct vmin2F(vecD dst, vecD src1, vecD src2)
19367 %{
19368   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
19369   match(Set dst (MinV src1 src2));
19370   ins_cost(INSN_COST);
19371   format %{ "fmin  $dst,$src1,$src2\t# vector (2F)" %}
19372   ins_encode %{
19373     __ fmin(as_FloatRegister($dst$$reg), __ T2S,
19374             as_FloatRegister($src1$$reg),
19375             as_FloatRegister($src2$$reg));
19376   %}
19377   ins_pipe(vdop_fp64);
19378 %}
19379 
19380 instruct vmin4F(vecX dst, vecX src1, vecX src2)
19381 %{
19382   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
19383   match(Set dst (MinV src1 src2));
19384   ins_cost(INSN_COST);
19385   format %{ "fmin  $dst,$src1,$src2\t# vector (4S)" %}
19386   ins_encode %{
19387     __ fmin(as_FloatRegister($dst$$reg), __ T4S,
19388             as_FloatRegister($src1$$reg),
19389             as_FloatRegister($src2$$reg));
19390   %}
19391   ins_pipe(vdop_fp128);
19392 %}
19393 
19394 instruct vmin2D(vecX dst, vecX src1, vecX src2)
19395 %{
19396   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
19397   match(Set dst (MinV src1 src2));
19398   ins_cost(INSN_COST);
19399   format %{ "fmin  $dst,$src1,$src2\t# vector (2D)" %}
19400   ins_encode %{
19401     __ fmin(as_FloatRegister($dst$$reg), __ T2D,
19402             as_FloatRegister($src1$$reg),
19403             as_FloatRegister($src2$$reg));
19404   %}
19405   ins_pipe(vdop_fp128);
19406 %}
19407 
19408 // ------------------------------ Comparison ---------------------------------
19409 
19410 instruct vcmeq8B(vecD dst, vecD src1, vecD src2, immI cond)
19411 %{
19412   predicate(n->as_Vector()->length() == 8 &&
19413             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
19414             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19415   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19416   format %{ "cmeq  $dst,$src1,$src2\t# vector cmp (8B)" %}
19417   ins_cost(INSN_COST);
19418   ins_encode %{
19419     __ cmeq(as_FloatRegister($dst$$reg), __ T8B,
19420             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19421   %}
19422   ins_pipe(vdop64);
19423 %}
19424 
19425 instruct vcmeq16B(vecX dst, vecX src1, vecX src2, immI cond)
19426 %{
19427   predicate(n->as_Vector()->length() == 16 &&
19428             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
19429             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19430   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19431   format %{ "cmeq  $dst,$src1,$src2\t# vector cmp (16B)" %}
19432   ins_cost(INSN_COST);
19433   ins_encode %{
19434     __ cmeq(as_FloatRegister($dst$$reg), __ T16B,
19435             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19436   %}
19437   ins_pipe(vdop128);
19438 %}
19439 
19440 instruct vcmeq4S(vecD dst, vecD src1, vecD src2, immI cond)
19441 %{
19442   predicate(n->as_Vector()->length() == 4 &&
19443             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
19444             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19445   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19446   format %{ "cmeq  $dst,$src1,$src2\t# vector cmp (4S)" %}
19447   ins_cost(INSN_COST);
19448   ins_encode %{
19449     __ cmeq(as_FloatRegister($dst$$reg), __ T4H,
19450             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19451   %}
19452   ins_pipe(vdop64);
19453 %}
19454 
19455 instruct vcmeq8S(vecX dst, vecX src1, vecX src2, immI cond)
19456 %{
19457   predicate(n->as_Vector()->length() == 8 &&
19458             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
19459             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19460   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19461   format %{ "cmeq  $dst,$src1,$src2\t# vector cmp (8S)" %}
19462   ins_cost(INSN_COST);
19463   ins_encode %{
19464     __ cmeq(as_FloatRegister($dst$$reg), __ T8H,
19465             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19466   %}
19467   ins_pipe(vdop128);
19468 %}
19469 
19470 instruct vcmeq2I(vecD dst, vecD src1, vecD src2, immI cond)
19471 %{
19472   predicate(n->as_Vector()->length() == 2 &&
19473             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
19474             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
19475   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19476   format %{ "cmeq  $dst,$src1,$src2\t# vector cmp (2I)" %}
19477   ins_cost(INSN_COST);
19478   ins_encode %{
19479     __ cmeq(as_FloatRegister($dst$$reg), __ T2S,
19480             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19481   %}
19482   ins_pipe(vdop64);
19483 %}
19484 
19485 instruct vcmeq4I(vecX dst, vecX src1, vecX src2, immI cond)
19486 %{
19487   predicate(n->as_Vector()->length() == 4 &&
19488             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
19489             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
19490   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19491   format %{ "cmeq  $dst,$src1,$src2\t# vector cmp (4I)" %}
19492   ins_cost(INSN_COST);
19493   ins_encode %{
19494     __ cmeq(as_FloatRegister($dst$$reg), __ T4S,
19495             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19496   %}
19497   ins_pipe(vdop128);
19498 %}
19499 
19500 instruct vcmeq2L(vecX dst, vecX src1, vecX src2, immI cond)
19501 %{
19502   predicate(n->as_Vector()->length() == 2 &&
19503             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
19504             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
19505   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19506   format %{ "cmeq  $dst,$src1,$src2\t# vector cmp (2L)" %}
19507   ins_cost(INSN_COST);
19508   ins_encode %{
19509     __ cmeq(as_FloatRegister($dst$$reg), __ T2D,
19510             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19511   %}
19512   ins_pipe(vdop128);
19513 %}
19514 
19515 instruct vcmeq2F(vecD dst, vecD src1, vecD src2, immI cond)
19516 %{
19517   predicate(n->as_Vector()->length() == 2 &&
19518             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
19519             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
19520   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19521   format %{ "fcmeq  $dst,$src1,$src2\t# vector cmp (2F)" %}
19522   ins_cost(INSN_COST);
19523   ins_encode %{
19524     __ fcmeq(as_FloatRegister($dst$$reg), __ T2S,
19525              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19526   %}
19527   ins_pipe(vdop_fp64);
19528 %}
19529 
19530 instruct vcmeq4F(vecX dst, vecX src1, vecX src2, immI cond)
19531 %{
19532   predicate(n->as_Vector()->length() == 4 &&
19533             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
19534             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
19535   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19536   format %{ "fcmeq  $dst,$src1,$src2\t# vector cmp (4F)" %}
19537   ins_cost(INSN_COST);
19538   ins_encode %{
19539     __ fcmeq(as_FloatRegister($dst$$reg), __ T4S,
19540              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19541   %}
19542   ins_pipe(vdop_fp128);
19543 %}
19544 
19545 instruct vcmeq2D(vecX dst, vecX src1, vecX src2, immI cond)
19546 %{
19547   predicate(n->as_Vector()->length() == 2 &&
19548             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
19549             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
19550   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19551   format %{ "fcmeq  $dst,$src1,$src2\t# vector cmp (2D)" %}
19552   ins_cost(INSN_COST);
19553   ins_encode %{
19554     __ fcmeq(as_FloatRegister($dst$$reg), __ T2D,
19555              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19556   %}
19557   ins_pipe(vdop_fp128);
19558 %}
19559 
19560 instruct vcmne8B(vecD dst, vecD src1, vecD src2, immI cond)
19561 %{
19562   predicate(n->as_Vector()->length() == 8 &&
19563             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
19564             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19565   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19566   format %{ "cmeq  $dst,$src1,$src2\n\t# vector cmp (8B)"
19567             "not   $dst,$dst\t" %}
19568   ins_cost(INSN_COST);
19569   ins_encode %{
19570     __ cmeq(as_FloatRegister($dst$$reg), __ T8B,
19571             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19572     __ notr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
19573   %}
19574   ins_pipe(vdop64);
19575 %}
19576 
19577 instruct vcmne16B(vecX dst, vecX src1, vecX src2, immI cond)
19578 %{
19579   predicate(n->as_Vector()->length() == 16 &&
19580             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
19581             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19582   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19583   format %{ "cmeq  $dst,$src1,$src2\n\t# vector cmp (16B)"
19584             "not   $dst,$dst\t" %}
19585   ins_cost(INSN_COST);
19586   ins_encode %{
19587     __ cmeq(as_FloatRegister($dst$$reg), __ T16B,
19588             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19589     __ notr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
19590   %}
19591   ins_pipe(vdop128);
19592 %}
19593 
19594 instruct vcmne4S(vecD dst, vecD src1, vecD src2, immI cond)
19595 %{
19596   predicate(n->as_Vector()->length() == 4 &&
19597             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
19598             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19599   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19600   format %{ "cmeq  $dst,$src1,$src2\n\t# vector cmp (4S)"
19601             "not   $dst,$dst\t" %}
19602   ins_cost(INSN_COST);
19603   ins_encode %{
19604     __ cmeq(as_FloatRegister($dst$$reg), __ T4H,
19605             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19606     __ notr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
19607   %}
19608   ins_pipe(vdop64);
19609 %}
19610 
19611 instruct vcmne8S(vecX dst, vecX src1, vecX src2, immI cond)
19612 %{
19613   predicate(n->as_Vector()->length() == 8 &&
19614             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
19615             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19616   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19617   format %{ "cmeq  $dst,$src1,$src2\n\t# vector cmp (8S)"
19618             "not   $dst,$dst\t" %}
19619   ins_cost(INSN_COST);
19620   ins_encode %{
19621     __ cmeq(as_FloatRegister($dst$$reg), __ T8H,
19622             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19623     __ notr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
19624   %}
19625   ins_pipe(vdop128);
19626 %}
19627 
19628 instruct vcmne2I(vecD dst, vecD src1, vecD src2, immI cond)
19629 %{
19630   predicate(n->as_Vector()->length() == 2 &&
19631             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
19632             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
19633   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19634   format %{ "cmeq  $dst,$src1,$src2\n\t# vector cmp (2I)"
19635             "not   $dst,$dst\t" %}
19636   ins_cost(INSN_COST);
19637   ins_encode %{
19638     __ cmeq(as_FloatRegister($dst$$reg), __ T2S,
19639             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19640     __ notr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
19641   %}
19642   ins_pipe(vdop64);
19643 %}
19644 
19645 instruct vcmne4I(vecX dst, vecX src1, vecX src2, immI cond)
19646 %{
19647   predicate(n->as_Vector()->length() == 4 &&
19648             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
19649             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
19650   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19651   format %{ "cmeq  $dst,$src1,$src2\n\t# vector cmp (4I)"
19652             "not   $dst,$dst\t" %}
19653   ins_cost(INSN_COST);
19654   ins_encode %{
19655     __ cmeq(as_FloatRegister($dst$$reg), __ T4S,
19656             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19657     __ notr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
19658   %}
19659   ins_pipe(vdop128);
19660 %}
19661 
19662 instruct vcmne2L(vecX dst, vecX src1, vecX src2, immI cond)
19663 %{
19664   predicate(n->as_Vector()->length() == 2 &&
19665             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
19666             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
19667   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19668   format %{ "cmeq  $dst,$src1,$src2\n\t# vector cmp (2L)"
19669             "not   $dst,$dst\t" %}
19670   ins_cost(INSN_COST);
19671   ins_encode %{
19672     __ cmeq(as_FloatRegister($dst$$reg), __ T2D,
19673             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19674     __ notr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
19675   %}
19676   ins_pipe(vdop128);
19677 %}
19678 
19679 instruct vcmne2F(vecD dst, vecD src1, vecD src2, immI cond)
19680 %{
19681   predicate(n->as_Vector()->length() == 2 &&
19682             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
19683             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
19684   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19685   format %{ "fcmeq  $dst,$src1,$src2\n\t# vector cmp (2F)"
19686             "not    $dst,$dst\t" %}
19687   ins_cost(INSN_COST);
19688   ins_encode %{
19689     __ fcmeq(as_FloatRegister($dst$$reg), __ T2S,
19690              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19691     __ notr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
19692   %}
19693   ins_pipe(vdop_fp64);
19694 %}
19695 
19696 instruct vcmne4F(vecX dst, vecX src1, vecX src2, immI cond)
19697 %{
19698   predicate(n->as_Vector()->length() == 4 &&
19699             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
19700             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
19701   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19702   format %{ "fcmeq  $dst,$src1,$src2\n\t# vector cmp (4F)"
19703             "not    $dst,$dst\t" %}
19704   ins_cost(INSN_COST);
19705   ins_encode %{
19706     __ fcmeq(as_FloatRegister($dst$$reg), __ T4S,
19707              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19708     __ notr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
19709   %}
19710   ins_pipe(vdop_fp128);
19711 %}
19712 
19713 instruct vcmne2D(vecX dst, vecX src1, vecX src2, immI cond)
19714 %{
19715   predicate(n->as_Vector()->length() == 2 &&
19716             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
19717             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
19718   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19719   format %{ "fcmeq  $dst,$src1,$src2\n\t# vector cmp (2D)"
19720             "not    $dst,$dst\t" %}
19721   ins_cost(INSN_COST);
19722   ins_encode %{
19723     __ fcmeq(as_FloatRegister($dst$$reg), __ T2D,
19724              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19725     __ notr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
19726   %}
19727   ins_pipe(vdop_fp128);
19728 %}
19729 
19730 instruct vcmlt8B(vecD dst, vecD src1, vecD src2, immI cond)
19731 %{
19732   predicate(n->as_Vector()->length() == 8 &&
19733             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
19734             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19735   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19736   format %{ "cmgt  $dst,$src2,$src1\t# vector cmp (8B)" %}
19737   ins_cost(INSN_COST);
19738   ins_encode %{
19739     __ cmgt(as_FloatRegister($dst$$reg), __ T8B,
19740             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19741   %}
19742   ins_pipe(vdop64);
19743 %}
19744 
19745 instruct vcmlt16B(vecX dst, vecX src1, vecX src2, immI cond)
19746 %{
19747   predicate(n->as_Vector()->length() == 16 &&
19748             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
19749             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19750   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19751   format %{ "cmgt  $dst,$src2,$src1\t# vector cmp (16B)" %}
19752   ins_cost(INSN_COST);
19753   ins_encode %{
19754     __ cmgt(as_FloatRegister($dst$$reg), __ T16B,
19755             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19756   %}
19757   ins_pipe(vdop128);
19758 %}
19759 
19760 instruct vcmlt4S(vecD dst, vecD src1, vecD src2, immI cond)
19761 %{
19762   predicate(n->as_Vector()->length() == 4 &&
19763             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
19764             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19765   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19766   format %{ "cmgt  $dst,$src2,$src1\t# vector cmp (4S)" %}
19767   ins_cost(INSN_COST);
19768   ins_encode %{
19769     __ cmgt(as_FloatRegister($dst$$reg), __ T4H,
19770             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19771   %}
19772   ins_pipe(vdop64);
19773 %}
19774 
19775 instruct vcmlt8S(vecX dst, vecX src1, vecX src2, immI cond)
19776 %{
19777   predicate(n->as_Vector()->length() == 8 &&
19778             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
19779             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19780   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19781   format %{ "cmgt  $dst,$src2,$src1\t# vector cmp (8S)" %}
19782   ins_cost(INSN_COST);
19783   ins_encode %{
19784     __ cmgt(as_FloatRegister($dst$$reg), __ T8H,
19785             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19786   %}
19787   ins_pipe(vdop128);
19788 %}
19789 
19790 instruct vcmlt2I(vecD dst, vecD src1, vecD src2, immI cond)
19791 %{
19792   predicate(n->as_Vector()->length() == 2 &&
19793             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
19794             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
19795   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19796   format %{ "cmgt  $dst,$src2,$src1\t# vector cmp (2I)" %}
19797   ins_cost(INSN_COST);
19798   ins_encode %{
19799     __ cmgt(as_FloatRegister($dst$$reg), __ T2S,
19800             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19801   %}
19802   ins_pipe(vdop64);
19803 %}
19804 
19805 instruct vcmlt4I(vecX dst, vecX src1, vecX src2, immI cond)
19806 %{
19807   predicate(n->as_Vector()->length() == 4 &&
19808             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
19809             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
19810   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19811   format %{ "cmgt  $dst,$src2,$src1\t# vector cmp (4I)" %}
19812   ins_cost(INSN_COST);
19813   ins_encode %{
19814     __ cmgt(as_FloatRegister($dst$$reg), __ T4S,
19815             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19816   %}
19817   ins_pipe(vdop128);
19818 %}
19819 
19820 instruct vcmlt2L(vecX dst, vecX src1, vecX src2, immI cond)
19821 %{
19822   predicate(n->as_Vector()->length() == 2 &&
19823             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
19824             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
19825   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19826   format %{ "cmgt  $dst,$src2,$src1\t# vector cmp (2L)" %}
19827   ins_cost(INSN_COST);
19828   ins_encode %{
19829     __ cmgt(as_FloatRegister($dst$$reg), __ T2D,
19830             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19831   %}
19832   ins_pipe(vdop128);
19833 %}
19834 
19835 instruct vcmlt2F(vecD dst, vecD src1, vecD src2, immI cond)
19836 %{
19837   predicate(n->as_Vector()->length() == 2 &&
19838             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
19839             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
19840   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19841   format %{ "fcmgt  $dst,$src2,$src1\t# vector cmp (2F)" %}
19842   ins_cost(INSN_COST);
19843   ins_encode %{
19844     __ fcmgt(as_FloatRegister($dst$$reg), __ T2S,
19845              as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19846   %}
19847   ins_pipe(vdop_fp64);
19848 %}
19849 
19850 instruct vcmlt4F(vecX dst, vecX src1, vecX src2, immI cond)
19851 %{
19852   predicate(n->as_Vector()->length() == 4 &&
19853             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
19854             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
19855   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19856   format %{ "fcmgt  $dst,$src2,$src1\t# vector cmp (4F)" %}
19857   ins_cost(INSN_COST);
19858   ins_encode %{
19859     __ fcmgt(as_FloatRegister($dst$$reg), __ T4S,
19860              as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19861   %}
19862   ins_pipe(vdop_fp128);
19863 %}
19864 
19865 instruct vcmlt2D(vecX dst, vecX src1, vecX src2, immI cond)
19866 %{
19867   predicate(n->as_Vector()->length() == 2 &&
19868             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
19869             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
19870   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19871   format %{ "fcmgt  $dst,$src2,$src1\t# vector cmp (2D)" %}
19872   ins_cost(INSN_COST);
19873   ins_encode %{
19874     __ fcmgt(as_FloatRegister($dst$$reg), __ T2D,
19875              as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19876   %}
19877   ins_pipe(vdop_fp128);
19878 %}
19879 
19880 instruct vcmle8B(vecD dst, vecD src1, vecD src2, immI cond)
19881 %{
19882   predicate(n->as_Vector()->length() == 8 &&
19883             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
19884             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19885   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19886   format %{ "cmge  $dst,$src2,$src1\t# vector cmp (8B)" %}
19887   ins_cost(INSN_COST);
19888   ins_encode %{
19889     __ cmge(as_FloatRegister($dst$$reg), __ T8B,
19890             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19891   %}
19892   ins_pipe(vdop64);
19893 %}
19894 
19895 instruct vcmle16B(vecX dst, vecX src1, vecX src2, immI cond)
19896 %{
19897   predicate(n->as_Vector()->length() == 16 &&
19898             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
19899             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19900   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19901   format %{ "cmge  $dst,$src2,$src1\t# vector cmp (16B)" %}
19902   ins_cost(INSN_COST);
19903   ins_encode %{
19904     __ cmge(as_FloatRegister($dst$$reg), __ T16B,
19905             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19906   %}
19907   ins_pipe(vdop128);
19908 %}
19909 
19910 instruct vcmle4S(vecD dst, vecD src1, vecD src2, immI cond)
19911 %{
19912   predicate(n->as_Vector()->length() == 4 &&
19913             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
19914             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19915   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19916   format %{ "cmge  $dst,$src2,$src1\t# vector cmp (4S)" %}
19917   ins_cost(INSN_COST);
19918   ins_encode %{
19919     __ cmge(as_FloatRegister($dst$$reg), __ T4H,
19920             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19921   %}
19922   ins_pipe(vdop64);
19923 %}
19924 
19925 instruct vcmle8S(vecX dst, vecX src1, vecX src2, immI cond)
19926 %{
19927   predicate(n->as_Vector()->length() == 8 &&
19928             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
19929             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19930   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19931   format %{ "cmge  $dst,$src2,$src1\t# vector cmp (8S)" %}
19932   ins_cost(INSN_COST);
19933   ins_encode %{
19934     __ cmge(as_FloatRegister($dst$$reg), __ T8H,
19935             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19936   %}
19937   ins_pipe(vdop128);
19938 %}
19939 
19940 instruct vcmle2I(vecD dst, vecD src1, vecD src2, immI cond)
19941 %{
19942   predicate(n->as_Vector()->length() == 2 &&
19943             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
19944             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
19945   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19946   format %{ "cmge  $dst,$src2,$src1\t# vector cmp (2I)" %}
19947   ins_cost(INSN_COST);
19948   ins_encode %{
19949     __ cmge(as_FloatRegister($dst$$reg), __ T2S,
19950             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19951   %}
19952   ins_pipe(vdop64);
19953 %}
19954 
19955 instruct vcmle4I(vecX dst, vecX src1, vecX src2, immI cond)
19956 %{
19957   predicate(n->as_Vector()->length() == 4 &&
19958             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
19959             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
19960   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19961   format %{ "cmge  $dst,$src2,$src1\t# vector cmp (4I)" %}
19962   ins_cost(INSN_COST);
19963   ins_encode %{
19964     __ cmge(as_FloatRegister($dst$$reg), __ T4S,
19965             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19966   %}
19967   ins_pipe(vdop128);
19968 %}
19969 
19970 instruct vcmle2L(vecX dst, vecX src1, vecX src2, immI cond)
19971 %{
19972   predicate(n->as_Vector()->length() == 2 &&
19973             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
19974             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
19975   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19976   format %{ "cmge  $dst,$src2,$src1\t# vector cmp (2L)" %}
19977   ins_cost(INSN_COST);
19978   ins_encode %{
19979     __ cmge(as_FloatRegister($dst$$reg), __ T2D,
19980             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19981   %}
19982   ins_pipe(vdop128);
19983 %}
19984 
19985 instruct vcmle2F(vecD dst, vecD src1, vecD src2, immI cond)
19986 %{
19987   predicate(n->as_Vector()->length() == 2 &&
19988             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
19989             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
19990   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19991   format %{ "fcmge  $dst,$src2,$src1\t# vector cmp (2F)" %}
19992   ins_cost(INSN_COST);
19993   ins_encode %{
19994     __ fcmge(as_FloatRegister($dst$$reg), __ T2S,
19995              as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19996   %}
19997   ins_pipe(vdop_fp64);
19998 %}
19999 
20000 instruct vcmle4F(vecX dst, vecX src1, vecX src2, immI cond)
20001 %{
20002   predicate(n->as_Vector()->length() == 4 &&
20003             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
20004             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
20005   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20006   format %{ "fcmge  $dst,$src2,$src1\t# vector cmp (4F)" %}
20007   ins_cost(INSN_COST);
20008   ins_encode %{
20009     __ fcmge(as_FloatRegister($dst$$reg), __ T4S,
20010              as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
20011   %}
20012   ins_pipe(vdop_fp128);
20013 %}
20014 
20015 instruct vcmle2D(vecX dst, vecX src1, vecX src2, immI cond)
20016 %{
20017   predicate(n->as_Vector()->length() == 2 &&
20018             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
20019             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
20020   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20021   format %{ "fcmge  $dst,$src2,$src1\t# vector cmp (2D)" %}
20022   ins_cost(INSN_COST);
20023   ins_encode %{
20024     __ fcmge(as_FloatRegister($dst$$reg), __ T2D,
20025              as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
20026   %}
20027   ins_pipe(vdop_fp128);
20028 %}
20029 
20030 instruct vcmgt8B(vecD dst, vecD src1, vecD src2, immI cond)
20031 %{
20032   predicate(n->as_Vector()->length() == 8 &&
20033             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
20034             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20035   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20036   format %{ "cmgt  $dst,$src1,$src2\t# vector cmp (8B)" %}
20037   ins_cost(INSN_COST);
20038   ins_encode %{
20039     __ cmgt(as_FloatRegister($dst$$reg), __ T8B,
20040             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20041   %}
20042   ins_pipe(vdop64);
20043 %}
20044 
20045 instruct vcmgt16B(vecX dst, vecX src1, vecX src2, immI cond)
20046 %{
20047   predicate(n->as_Vector()->length() == 16 &&
20048             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
20049             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20050   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20051   format %{ "cmgt  $dst,$src1,$src2\t# vector cmp (16B)" %}
20052   ins_cost(INSN_COST);
20053   ins_encode %{
20054     __ cmgt(as_FloatRegister($dst$$reg), __ T16B,
20055             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20056   %}
20057   ins_pipe(vdop128);
20058 %}
20059 
20060 instruct vcmgt4S(vecD dst, vecD src1, vecD src2, immI cond)
20061 %{
20062   predicate(n->as_Vector()->length() == 4 &&
20063             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
20064             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20065   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20066   format %{ "cmgt  $dst,$src1,$src2\t# vector cmp (4S)" %}
20067   ins_cost(INSN_COST);
20068   ins_encode %{
20069     __ cmgt(as_FloatRegister($dst$$reg), __ T4H,
20070             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20071   %}
20072   ins_pipe(vdop64);
20073 %}
20074 
20075 instruct vcmgt8S(vecX dst, vecX src1, vecX src2, immI cond)
20076 %{
20077   predicate(n->as_Vector()->length() == 8 &&
20078             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
20079             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20080   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20081   format %{ "cmgt  $dst,$src1,$src2\t# vector cmp (8S)" %}
20082   ins_cost(INSN_COST);
20083   ins_encode %{
20084     __ cmgt(as_FloatRegister($dst$$reg), __ T8H,
20085             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20086   %}
20087   ins_pipe(vdop128);
20088 %}
20089 
20090 instruct vcmgt2I(vecD dst, vecD src1, vecD src2, immI cond)
20091 %{
20092   predicate(n->as_Vector()->length() == 2 &&
20093             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
20094             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
20095   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20096   format %{ "cmgt  $dst,$src1,$src2\t# vector cmp (2I)" %}
20097   ins_cost(INSN_COST);
20098   ins_encode %{
20099     __ cmgt(as_FloatRegister($dst$$reg), __ T2S,
20100             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20101   %}
20102   ins_pipe(vdop64);
20103 %}
20104 
20105 instruct vcmgt4I(vecX dst, vecX src1, vecX src2, immI cond)
20106 %{
20107   predicate(n->as_Vector()->length() == 4 &&
20108             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
20109             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
20110   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20111   format %{ "cmgt  $dst,$src1,$src2\t# vector cmp (4I)" %}
20112   ins_cost(INSN_COST);
20113   ins_encode %{
20114     __ cmgt(as_FloatRegister($dst$$reg), __ T4S,
20115             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20116   %}
20117   ins_pipe(vdop128);
20118 %}
20119 
20120 instruct vcmgt2L(vecX dst, vecX src1, vecX src2, immI cond)
20121 %{
20122   predicate(n->as_Vector()->length() == 2 &&
20123             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
20124             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
20125   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20126   format %{ "cmgt  $dst,$src1,$src2\t# vector cmp (2L)" %}
20127   ins_cost(INSN_COST);
20128   ins_encode %{
20129     __ cmgt(as_FloatRegister($dst$$reg), __ T2D,
20130             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20131   %}
20132   ins_pipe(vdop128);
20133 %}
20134 
20135 instruct vcmgt2F(vecD dst, vecD src1, vecD src2, immI cond)
20136 %{
20137   predicate(n->as_Vector()->length() == 2 &&
20138             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
20139             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
20140   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20141   format %{ "fcmgt  $dst,$src1,$src2\t# vector cmp (2F)" %}
20142   ins_cost(INSN_COST);
20143   ins_encode %{
20144     __ fcmgt(as_FloatRegister($dst$$reg), __ T2S,
20145              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20146   %}
20147   ins_pipe(vdop_fp64);
20148 %}
20149 
20150 instruct vcmgt4F(vecX dst, vecX src1, vecX src2, immI cond)
20151 %{
20152   predicate(n->as_Vector()->length() == 4 &&
20153             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
20154             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
20155   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20156   format %{ "fcmgt  $dst,$src1,$src2\t# vector cmp (4F)" %}
20157   ins_cost(INSN_COST);
20158   ins_encode %{
20159     __ fcmgt(as_FloatRegister($dst$$reg), __ T4S,
20160              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20161   %}
20162   ins_pipe(vdop_fp128);
20163 %}
20164 
20165 instruct vcmgt2D(vecX dst, vecX src1, vecX src2, immI cond)
20166 %{
20167   predicate(n->as_Vector()->length() == 2 &&
20168             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
20169             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
20170   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20171   format %{ "fcmgt  $dst,$src1,$src2\t# vector cmp (2D)" %}
20172   ins_cost(INSN_COST);
20173   ins_encode %{
20174     __ fcmgt(as_FloatRegister($dst$$reg), __ T2D,
20175              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20176   %}
20177   ins_pipe(vdop_fp128);
20178 %}
20179 
20180 instruct vcmge8B(vecD dst, vecD src1, vecD src2, immI cond)
20181 %{
20182   predicate(n->as_Vector()->length() == 8 &&
20183             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
20184             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20185   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20186   format %{ "cmge  $dst,$src1,$src2\t# vector cmp (8B)" %}
20187   ins_cost(INSN_COST);
20188   ins_encode %{
20189     __ cmge(as_FloatRegister($dst$$reg), __ T8B,
20190             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20191   %}
20192   ins_pipe(vdop64);
20193 %}
20194 
20195 instruct vcmge16B(vecX dst, vecX src1, vecX src2, immI cond)
20196 %{
20197   predicate(n->as_Vector()->length() == 16 &&
20198             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
20199             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20200   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20201   format %{ "cmge  $dst,$src1,$src2\t# vector cmp (16B)" %}
20202   ins_cost(INSN_COST);
20203   ins_encode %{
20204     __ cmge(as_FloatRegister($dst$$reg), __ T16B,
20205             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20206   %}
20207   ins_pipe(vdop128);
20208 %}
20209 
20210 instruct vcmge4S(vecD dst, vecD src1, vecD src2, immI cond)
20211 %{
20212   predicate(n->as_Vector()->length() == 4 &&
20213             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
20214             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20215   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20216   format %{ "cmge  $dst,$src1,$src2\t# vector cmp (4S)" %}
20217   ins_cost(INSN_COST);
20218   ins_encode %{
20219     __ cmge(as_FloatRegister($dst$$reg), __ T4H,
20220             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20221   %}
20222   ins_pipe(vdop64);
20223 %}
20224 
20225 instruct vcmge8S(vecX dst, vecX src1, vecX src2, immI cond)
20226 %{
20227   predicate(n->as_Vector()->length() == 8 &&
20228             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
20229             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20230   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20231   format %{ "cmge  $dst,$src1,$src2\t# vector cmp (8S)" %}
20232   ins_cost(INSN_COST);
20233   ins_encode %{
20234     __ cmge(as_FloatRegister($dst$$reg), __ T8H,
20235             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20236   %}
20237   ins_pipe(vdop128);
20238 %}
20239 
20240 instruct vcmge2I(vecD dst, vecD src1, vecD src2, immI cond)
20241 %{
20242   predicate(n->as_Vector()->length() == 2 &&
20243             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
20244             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
20245   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20246   format %{ "cmge  $dst,$src1,$src2\t# vector cmp (2I)" %}
20247   ins_cost(INSN_COST);
20248   ins_encode %{
20249     __ cmge(as_FloatRegister($dst$$reg), __ T2S,
20250             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20251   %}
20252   ins_pipe(vdop64);
20253 %}
20254 
20255 instruct vcmge4I(vecX dst, vecX src1, vecX src2, immI cond)
20256 %{
20257   predicate(n->as_Vector()->length() == 4 &&
20258             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
20259             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
20260   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20261   format %{ "cmge  $dst,$src1,$src2\t# vector cmp (4I)" %}
20262   ins_cost(INSN_COST);
20263   ins_encode %{
20264     __ cmge(as_FloatRegister($dst$$reg), __ T4S,
20265             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20266   %}
20267   ins_pipe(vdop128);
20268 %}
20269 
20270 instruct vcmge2L(vecX dst, vecX src1, vecX src2, immI cond)
20271 %{
20272   predicate(n->as_Vector()->length() == 2 &&
20273             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
20274             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
20275   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20276   format %{ "cmge  $dst,$src1,$src2\t# vector cmp (2L)" %}
20277   ins_cost(INSN_COST);
20278   ins_encode %{
20279     __ cmge(as_FloatRegister($dst$$reg), __ T2D,
20280             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20281   %}
20282   ins_pipe(vdop128);
20283 %}
20284 
20285 instruct vcmge2F(vecD dst, vecD src1, vecD src2, immI cond)
20286 %{
20287   predicate(n->as_Vector()->length() == 2 &&
20288             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
20289             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
20290   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20291   format %{ "fcmge  $dst,$src1,$src2\t# vector cmp (2F)" %}
20292   ins_cost(INSN_COST);
20293   ins_encode %{
20294     __ fcmge(as_FloatRegister($dst$$reg), __ T2S,
20295              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20296   %}
20297   ins_pipe(vdop_fp64);
20298 %}
20299 
20300 instruct vcmge4F(vecX dst, vecX src1, vecX src2, immI cond)
20301 %{
20302   predicate(n->as_Vector()->length() == 4 &&
20303             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
20304             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
20305   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20306   format %{ "fcmge  $dst,$src1,$src2\t# vector cmp (4F)" %}
20307   ins_cost(INSN_COST);
20308   ins_encode %{
20309     __ fcmge(as_FloatRegister($dst$$reg), __ T4S,
20310              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20311   %}
20312   ins_pipe(vdop_fp128);
20313 %}
20314 
20315 instruct vcmge2D(vecX dst, vecX src1, vecX src2, immI cond)
20316 %{
20317   predicate(n->as_Vector()->length() == 2 &&
20318             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
20319             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
20320   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20321   format %{ "fcmge  $dst,$src1,$src2\t# vector cmp (2D)" %}
20322   ins_cost(INSN_COST);
20323   ins_encode %{
20324     __ fcmge(as_FloatRegister($dst$$reg), __ T2D,
20325              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20326   %}
20327   ins_pipe(vdop_fp128);
20328 %}
20329 
20330 // --------------------------------- blend (bsl) ----------------------------
20331 
20332 instruct vbsl8B(vecD dst, vecD src1, vecD src2)
20333 %{
20334   predicate(n->as_Vector()->length_in_bytes() == 8);
20335   match(Set dst (VectorBlend (Binary src1 src2) dst));
20336   ins_cost(INSN_COST);
20337   format %{ "bsl  $dst,$src2,$src1\t# vector (8B)" %}
20338   ins_encode %{
20339     __ bsl(as_FloatRegister($dst$$reg), __ T8B,
20340            as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
20341   %}
20342   ins_pipe(vlogical64);
20343 %}
20344 
20345 instruct vbsl16B(vecX dst, vecX src1, vecX src2)
20346 %{
20347   predicate(n->as_Vector()->length_in_bytes() == 16);
20348   match(Set dst (VectorBlend (Binary src1 src2) dst));
20349   ins_cost(INSN_COST);
20350   format %{ "bsl  $dst,$src2,$src1\t# vector (16B)" %}
20351   ins_encode %{
20352     __ bsl(as_FloatRegister($dst$$reg), __ T16B,
20353            as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
20354   %}
20355   ins_pipe(vlogical128);
20356 %}
20357 
20358 instruct loadmask8B(vecD dst, vecD src) %{
20359   predicate(n->as_Vector()->length() == 8 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20360   match(Set dst (VectorLoadMask src));
20361   ins_cost(INSN_COST);
20362   format %{ "neg   $dst,$src\t# load mask (8B to 8B)" %}
20363   ins_encode %{
20364     __ negr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($src$$reg));
20365   %}
20366   ins_pipe(vdop64);
20367 %}
20368 
20369 instruct loadmask16B(vecX dst, vecX src) %{
20370   predicate(n->as_Vector()->length() == 16 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20371   match(Set dst (VectorLoadMask src));
20372   ins_cost(INSN_COST);
20373   format %{ "neg   $dst,$src\t# load mask (16B to 16B)" %}
20374   ins_encode %{
20375     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($src$$reg));
20376   %}
20377   ins_pipe(vdop128);
20378 %}
20379 
20380 instruct loadmask4S(vecD dst, vecD src) %{
20381   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20382   match(Set dst (VectorLoadMask src));
20383   ins_cost(INSN_COST);
20384   format %{ "uxtl  $dst,$dst\n\t"
20385             "neg   $dst,$src\t# load mask (4B to 4S)" %}
20386   ins_encode %{
20387     __ uxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
20388     __ negr(as_FloatRegister($dst$$reg), __ T4H, as_FloatRegister($dst$$reg));
20389   %}
20390   ins_pipe(vdop64);
20391 %}
20392 
20393 instruct loadmask8S(vecX dst, vecD src) %{
20394   predicate(n->as_Vector()->length() == 8 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20395   match(Set dst (VectorLoadMask src));
20396   ins_cost(INSN_COST);
20397   format %{ "uxtl  $dst,$dst\n\t"
20398             "neg   $dst,$src\t# load mask (8B to 8S)" %}
20399   ins_encode %{
20400     __ uxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
20401     __ negr(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($dst$$reg));
20402   %}
20403   ins_pipe(vdop128);
20404 %}
20405 
20406 instruct loadmask2I(vecD dst, vecD src) %{
20407   predicate(n->as_Vector()->length() == 2 &&
20408             (n->bottom_type()->is_vect()->element_basic_type() == T_INT ||
20409              n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT));
20410   match(Set dst (VectorLoadMask src));
20411   ins_cost(INSN_COST);
20412   format %{ "uxtl  $dst,$src\n\t# 2B to 2S"
20413             "uxtl  $dst,$dst\n\t# 2S to 2I"
20414             "neg   $dst,$dst\t# load mask (2B to 2I)" %}
20415   ins_encode %{
20416     __ uxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
20417     __ uxtl(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($dst$$reg), __ T4H);
20418     __ negr(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($dst$$reg));
20419   %}
20420   ins_pipe(vdop128);
20421 %}
20422 
20423 instruct loadmask4I(vecX dst, vecD src) %{
20424   predicate(n->as_Vector()->length() == 4 &&
20425             (n->bottom_type()->is_vect()->element_basic_type() == T_INT ||
20426              n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT));
20427   match(Set dst (VectorLoadMask src));
20428   ins_cost(INSN_COST);
20429   format %{ "uxtl  $dst,$src\n\t# 4B to 4S"
20430             "uxtl  $dst,$dst\n\t# 4S to 4I"
20431             "neg   $dst,$dst\t# load mask (4B to 4I)" %}
20432   ins_encode %{
20433     __ uxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
20434     __ uxtl(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($dst$$reg), __ T4H);
20435     __ negr(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($dst$$reg));
20436   %}
20437   ins_pipe(vdop128);
20438 %}
20439 
20440 instruct loadmask2L(vecX dst, vecD src) %{
20441   predicate(n->as_Vector()->length() == 2 &&
20442             (n->bottom_type()->is_vect()->element_basic_type() == T_LONG ||
20443              n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE));
20444   match(Set dst (VectorLoadMask src));
20445   ins_cost(INSN_COST);
20446   format %{ "uxtl  $dst,$src\n\t# 2B to 2S"
20447             "uxtl  $dst,$dst\n\t# 2S to 2I"
20448             "uxtl  $dst,$dst\n\t# 2I to 2L"
20449             "neg   $dst,$dst\t# load mask (2B to 2L)" %}
20450   ins_encode %{
20451     __ uxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
20452     __ uxtl(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($dst$$reg), __ T4H);
20453     __ uxtl(as_FloatRegister($dst$$reg), __ T2D, as_FloatRegister($dst$$reg), __ T2S);
20454     __ negr(as_FloatRegister($dst$$reg), __ T2D, as_FloatRegister($dst$$reg));
20455   %}
20456   ins_pipe(vdop128);
20457 %}
20458 
20459 instruct storemask8B(vecD dst, vecD src, immI_1 size) %{
20460   predicate(n->as_Vector()->length() == 8);
20461   match(Set dst (VectorStoreMask src size));
20462   ins_cost(INSN_COST);
20463   format %{ "negr  $dst,$src\t# store mask (8B to 8B)" %}
20464   ins_encode %{
20465     __ negr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($src$$reg));
20466   %}
20467   ins_pipe(vdop64);
20468 %}
20469 
20470 instruct storemask16B(vecX dst, vecX src, immI_1 size) %{
20471   predicate(n->as_Vector()->length() == 16);
20472   match(Set dst (VectorStoreMask src size));
20473   ins_cost(INSN_COST);
20474   format %{ "negr  $dst,$src\t# store mask (16B to 16B)" %}
20475   ins_encode %{
20476     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($src$$reg));
20477   %}
20478   ins_pipe(vdop128);
20479 %}
20480 
20481 instruct storemask4S(vecD dst, vecD src, immI_2 size) %{
20482   predicate(n->as_Vector()->length() == 4);
20483   match(Set dst (VectorStoreMask src size));
20484   ins_cost(INSN_COST);
20485   format %{ "xtn  $dst,$src\n\t"
20486             "neg  $dst,$dst\t# store mask (4S to 4B)" %}
20487   ins_encode %{
20488     __ xtn(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($src$$reg), __ T8H);
20489     __ negr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
20490   %}
20491   ins_pipe(vdop64);
20492 %}
20493 
20494 instruct storemask8S(vecD dst, vecX src, immI_2 size) %{
20495   predicate(n->as_Vector()->length() == 8);
20496   match(Set dst (VectorStoreMask src size));
20497   ins_cost(INSN_COST);
20498   format %{ "xtn  $dst,$src\n\t"
20499             "neg  $dst,$dst\t# store mask (8S to 8B)" %}
20500   ins_encode %{
20501     __ xtn(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($src$$reg), __ T8H);
20502     __ negr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
20503   %}
20504   ins_pipe(vdop128);
20505 %}
20506 
20507 instruct storemask2I(vecD dst, vecD src, immI_4 size) %{
20508   predicate(n->as_Vector()->length() == 2);
20509   match(Set dst (VectorStoreMask src size));
20510   ins_cost(INSN_COST);
20511   format %{ "xtn  $dst,$src\n\t# 2I to 2S"
20512             "xtn  $dst,$dst\n\t# 2S to 2B"
20513             "neg  $dst,$dst\t# store mask (2I to 2B)" %}
20514   ins_encode %{
20515     __ xtn(as_FloatRegister($dst$$reg), __ T4H, as_FloatRegister($src$$reg), __ T4S);
20516     __ xtn(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg), __ T8H);
20517     __ negr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
20518   %}
20519   ins_pipe(vdop64);
20520 %}
20521 
20522 instruct storemask4I(vecD dst, vecX src, immI_4 size) %{
20523   predicate(n->as_Vector()->length() == 4);
20524   match(Set dst (VectorStoreMask src size));
20525   ins_cost(INSN_COST);
20526   format %{ "xtn  $dst,$src\n\t# 4I to 4S"
20527             "xtn  $dst,$dst\n\t# 4S to 4B"
20528             "neg  $dst,$dst\t# store mask (4I to 4B)" %}
20529   ins_encode %{
20530     __ xtn(as_FloatRegister($dst$$reg), __ T4H, as_FloatRegister($src$$reg), __ T4S);
20531     __ xtn(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg), __ T8H);
20532     __ negr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
20533   %}
20534   ins_pipe(vdop128);
20535 %}
20536 
20537 instruct storemask2L(vecD dst, vecX src, immI_8 size) %{
20538   predicate(n->as_Vector()->length() == 2);
20539   match(Set dst (VectorStoreMask src size));
20540   ins_cost(INSN_COST);
20541   format %{ "xtn  $dst,$src\n\t# 2L to 2I"
20542             "xtn  $dst,$dst\n\t# 2I to 2S"
20543             "xtn  $dst,$dst\n\t# 2S to 2B"
20544             "neg  $dst,$dst\t# store mask (2L to 2B)" %}
20545   ins_encode %{
20546     __ xtn(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($src$$reg), __ T2D);
20547     __ xtn(as_FloatRegister($dst$$reg), __ T4H, as_FloatRegister($dst$$reg), __ T4S);
20548     __ xtn(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg), __ T8H);
20549     __ negr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
20550   %}
20551   ins_pipe(vdop128);
20552 %}
20553 
20554 //-------------------------------- LOAD_IOTA_INDICES----------------------------------
20555 
20556 instruct loadcon8B(vecD dst, immI0 src) %{
20557   predicate((n->as_Vector()->length() == 2 || n->as_Vector()->length() == 4 ||
20558              n->as_Vector()->length() == 8) &&
20559              n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20560   match(Set dst (VectorLoadConst src));
20561   ins_cost(INSN_COST);
20562   format %{ "ldr $dst,CONSTANT_MEMORY\t# load iota indices" %}
20563   ins_encode %{
20564     __ lea(rscratch1, ExternalAddress(StubRoutines::aarch64::vector_iota_indices()));
20565     __ ldrd(as_FloatRegister($dst$$reg), rscratch1);
20566   %}
20567   ins_pipe(pipe_class_memory);
20568 %}
20569 
20570 instruct loadcon16B(vecX dst, immI0 src) %{
20571   predicate(n->as_Vector()->length() == 16 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20572   match(Set dst (VectorLoadConst src));
20573   ins_cost(INSN_COST);
20574   format %{ "ldr $dst,CONSTANT_MEMORY\t# load iota indices" %}
20575   ins_encode %{
20576     __ lea(rscratch1, ExternalAddress(StubRoutines::aarch64::vector_iota_indices()));
20577     __ ldrq(as_FloatRegister($dst$$reg), rscratch1);
20578   %}
20579   ins_pipe(pipe_class_memory);
20580 %}
20581 
20582 //-------------------------------- LOAD_SHUFFLE ----------------------------------
20583 
20584 instruct loadshuffle8B(vecD dst, vecD src) %{
20585   predicate(n->as_Vector()->length() == 8 &&
20586             n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20587   match(Set dst (VectorLoadShuffle src));
20588   ins_cost(INSN_COST);
20589   format %{ "mov  $dst, $src\t# get 8B shuffle" %}
20590   ins_encode %{
20591     __ orr(as_FloatRegister($dst$$reg), __ T8B,
20592            as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
20593   %}
20594   ins_pipe(pipe_class_default);
20595 %}
20596 
20597 instruct loadshuffle16B(vecX dst, vecX src) %{
20598   predicate(n->as_Vector()->length() == 16 &&
20599             n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20600   match(Set dst (VectorLoadShuffle src));
20601   ins_cost(INSN_COST);
20602   format %{ "mov  $dst, $src\t# get 16B shuffle" %}
20603   ins_encode %{
20604     __ orr(as_FloatRegister($dst$$reg), __ T16B,
20605            as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
20606   %}
20607   ins_pipe(pipe_class_default);
20608 %}
20609 
20610 instruct loadshuffle4S(vecD dst, vecD src) %{
20611   predicate(n->as_Vector()->length() == 4 &&
20612             n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20613   match(Set dst (VectorLoadShuffle src));
20614   ins_cost(INSN_COST);
20615   format %{ "uxtl  $dst, $src\n\t# 4B to 4H" %}
20616   ins_encode %{
20617     __ uxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
20618   %}
20619   ins_pipe(pipe_class_default);
20620 %}
20621 
20622 instruct loadshuffle8S(vecX dst, vecD src) %{
20623   predicate(n->as_Vector()->length() == 8 &&
20624             n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20625   match(Set dst (VectorLoadShuffle src));
20626   ins_cost(INSN_COST);
20627   format %{ "uxtl  $dst, $src\n\t# 8B to 8H" %}
20628   ins_encode %{
20629     __ uxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
20630   %}
20631   ins_pipe(pipe_class_default);
20632 %}
20633 
20634 instruct loadshuffle4I(vecX dst, vecD src) %{
20635   predicate(n->as_Vector()->length() == 4 &&
20636            (n->bottom_type()->is_vect()->element_basic_type() == T_INT ||
20637             n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT));
20638   match(Set dst (VectorLoadShuffle src));
20639   ins_cost(INSN_COST);
20640   format %{ "uxtl  $dst, $src\t# 4B to 4H \n\t"
20641             "uxtl  $dst, $dst\t# 4H to 4S" %}
20642   ins_encode %{
20643     __ uxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
20644     __ uxtl(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($dst$$reg), __ T4H);
20645   %}
20646   ins_pipe(pipe_class_default);
20647 %}
20648 
20649 //-------------------------------- Rearrange -------------------------------------
20650 // Here is an example that rearranges a NEON vector with 4 ints:
20651 // Rearrange V1 int[a0, a1, a2, a3] to V2 int[a2, a3, a0, a1]
20652 //   1. Get the indices of V1 and store them as Vi byte[0, 1, 2, 3].
20653 //   2. Convert Vi byte[0, 1, 2, 3] to the indices of V2 and also store them as Vi byte[2, 3, 0, 1].
20654 //   3. Unsigned extend Long Vi from byte[2, 3, 0, 1] to int[2, 3, 0, 1].
20655 //   4. Multiply Vi int[2, 3, 0, 1] with constant int[0x04040404, 0x04040404, 0x04040404, 0x04040404]
20656 //      and get tbl base Vm int[0x08080808, 0x0c0c0c0c, 0x00000000, 0x04040404].
20657 //   5. Add Vm with constant int[0x03020100, 0x03020100, 0x03020100, 0x03020100]
20658 //      and get tbl index Vm int[0x0b0a0908, 0x0f0e0d0c, 0x03020100, 0x07060504]
20659 //   6. Use Vm as index register, and use V1 as table register.
20660 //      Then get V2 as the result by tbl NEON instructions.
20661 // Notes:
20662 //   Step 1 matches VectorLoadConst.
20663 //   Step 3 matches VectorLoadShuffle.
20664 //   Step 4, 5, 6 match VectorRearrange.
20665 //   For VectorRearrange short/int, the reason why such complex calculation is
20666 //   required is because NEON tbl supports bytes table only, so for short/int, we
20667 //   need to lookup 2/4 bytes as a group. For VectorRearrange long, we use bsl
20668 //   to implement rearrange.
20669 
20670 instruct rearrange8B(vecD dst, vecD src, vecD shuffle) %{
20671   predicate(n->as_Vector()->length() == 8 &&
20672             n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20673   match(Set dst (VectorRearrange src shuffle));
20674   ins_cost(INSN_COST);
20675   effect(TEMP_DEF dst);
20676   format %{ "tbl $dst, {$dst}, $shuffle\t# rearrange 8B" %}
20677   ins_encode %{
20678     __ tbl(as_FloatRegister($dst$$reg), __ T8B,
20679            as_FloatRegister($src$$reg), 1, as_FloatRegister($shuffle$$reg));
20680   %}
20681   ins_pipe(pipe_class_default);
20682 %}
20683 
20684 instruct rearrange16B(vecX dst, vecX src, vecX shuffle) %{
20685   predicate(n->as_Vector()->length() == 16 &&
20686             n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20687   match(Set dst (VectorRearrange src shuffle));
20688   ins_cost(INSN_COST);
20689   effect(TEMP_DEF dst);
20690   format %{ "tbl $dst, {$dst}, $shuffle\t# rearrange 16B" %}
20691   ins_encode %{
20692     __ tbl(as_FloatRegister($dst$$reg), __ T16B,
20693            as_FloatRegister($src$$reg), 1, as_FloatRegister($shuffle$$reg));
20694   %}
20695   ins_pipe(pipe_class_default);
20696 %}
20697 
20698 instruct rearrange4S(vecD dst, vecD src, vecD shuffle, vecD vtmp0, vecD vtmp1) %{
20699   predicate(n->as_Vector()->length() == 4 &&
20700             n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20701   match(Set dst (VectorRearrange src shuffle));
20702   ins_cost(INSN_COST);
20703   effect(TEMP_DEF dst, TEMP vtmp0, TEMP vtmp1);
20704   format %{ "mov   $vtmp0, CONSTANT\t# constant 0x0202020202020202 \n\t"
20705             "mov   $vtmp1, CONSTANT\t# constant 0x0100010001000100 \n\t"
20706             "mulv  $dst, T4H, $shuffle, $vtmp0\n\t"
20707             "addv  $dst, T8B, $dst, $vtmp1\n\t"
20708             "tbl   $dst, {$src}, $dst\t# rearrange 4S" %}
20709   ins_encode %{
20710     __ mov(as_FloatRegister($vtmp0$$reg), __ T8B, 0x02);
20711     __ mov(as_FloatRegister($vtmp1$$reg), __ T4H, 0x0100);
20712     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
20713             as_FloatRegister($shuffle$$reg), as_FloatRegister($vtmp0$$reg));
20714     __ addv(as_FloatRegister($dst$$reg), __ T8B,
20715             as_FloatRegister($dst$$reg), as_FloatRegister($vtmp1$$reg));
20716     __ tbl(as_FloatRegister($dst$$reg), __ T8B,
20717            as_FloatRegister($src$$reg), 1, as_FloatRegister($dst$$reg));
20718   %}
20719   ins_pipe(pipe_class_default);
20720 %}
20721 
20722 instruct rearrange8S(vecX dst, vecX src, vecX shuffle, vecX vtmp0, vecX vtmp1) %{
20723   predicate(n->as_Vector()->length() == 8 &&
20724             n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20725   match(Set dst (VectorRearrange src shuffle));
20726   ins_cost(INSN_COST);
20727   effect(TEMP_DEF dst, TEMP vtmp0, TEMP vtmp1);
20728   format %{ "mov   $vtmp0, CONSTANT\t# constant 0x0202020202020202 \n\t"
20729             "mov   $vtmp1, CONSTANT\t# constant 0x0100010001000100 \n\t"
20730             "mulv  $dst, T8H, $shuffle, $vtmp0\n\t"
20731             "addv  $dst, T16B, $dst, $vtmp1\n\t"
20732             "tbl   $dst, {$src}, $dst\t# rearrange 8S" %}
20733   ins_encode %{
20734     __ mov(as_FloatRegister($vtmp0$$reg), __ T16B, 0x02);
20735     __ mov(as_FloatRegister($vtmp1$$reg), __ T8H, 0x0100);
20736     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
20737             as_FloatRegister($shuffle$$reg), as_FloatRegister($vtmp0$$reg));
20738     __ addv(as_FloatRegister($dst$$reg), __ T16B,
20739             as_FloatRegister($dst$$reg), as_FloatRegister($vtmp1$$reg));
20740     __ tbl(as_FloatRegister($dst$$reg), __ T16B,
20741            as_FloatRegister($src$$reg), 1, as_FloatRegister($dst$$reg));
20742   %}
20743   ins_pipe(pipe_class_default);
20744 %}
20745 
20746 instruct rearrange4I(vecX dst, vecX src, vecX shuffle, vecX vtmp0, vecX vtmp1) %{
20747   predicate(n->as_Vector()->length() == 4 &&
20748            (n->bottom_type()->is_vect()->element_basic_type() == T_INT ||
20749             n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT));
20750   match(Set dst (VectorRearrange src shuffle));
20751   ins_cost(INSN_COST);
20752   effect(TEMP_DEF dst, TEMP vtmp0, TEMP vtmp1);
20753   format %{ "mov   $vtmp0, CONSTANT\t# constant 0x0404040404040404 \n\t"
20754             "mov   $vtmp1, CONSTANT\t# constant 0x0302010003020100 \n\t"
20755             "mulv  $dst, T8H, $shuffle, $vtmp0\n\t"
20756             "addv  $dst, T16B, $dst, $vtmp1\n\t"
20757             "tbl   $dst, {$src}, $dst\t# rearrange 4I" %}
20758   ins_encode %{
20759     __ mov(as_FloatRegister($vtmp0$$reg), __ T16B, 0x04);
20760     __ mov(as_FloatRegister($vtmp1$$reg), __ T4S, 0x03020100);
20761     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
20762             as_FloatRegister($shuffle$$reg), as_FloatRegister($vtmp0$$reg));
20763     __ addv(as_FloatRegister($dst$$reg), __ T16B,
20764             as_FloatRegister($dst$$reg), as_FloatRegister($vtmp1$$reg));
20765     __ tbl(as_FloatRegister($dst$$reg), __ T16B,
20766            as_FloatRegister($src$$reg), 1, as_FloatRegister($dst$$reg));
20767   %}
20768   ins_pipe(pipe_class_default);
20769 %}
20770 
20771 instruct anytrue_in_mask8B(iRegINoSp dst, vecD src1, vecD src2, vecD tmp, rFlagsReg cr) %{
20772   predicate(static_cast<const VectorTestNode*>(n)->get_predicate() == BoolTest::ne);
20773   match(Set dst (VectorTest src1 src2 ));
20774   ins_cost(INSN_COST);
20775   effect(TEMP tmp, KILL cr);
20776   format %{ "addv  $tmp, T8B, $src1\n\t # src1 and src2 are the same"
20777             "umov  $dst, $tmp, B, 0\n\t"
20778             "cmp   $dst, 0\n\t"
20779             "cset  $dst\t" %}
20780   ins_encode %{
20781     __ addv(as_FloatRegister($tmp$$reg), __ T8B, as_FloatRegister($src1$$reg));
20782     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
20783     __ cmpw($dst$$Register, zr);
20784     __ csetw($dst$$Register, Assembler::NE);
20785   %}
20786   ins_pipe(pipe_class_default);
20787 %}
20788 
20789 instruct anytrue_in_mask16B(iRegINoSp dst, vecX src1, vecX src2, vecX tmp, rFlagsReg cr) %{
20790   predicate(static_cast<const VectorTestNode*>(n)->get_predicate() == BoolTest::ne);
20791   match(Set dst (VectorTest src1 src2 ));
20792   ins_cost(INSN_COST);
20793   effect(TEMP tmp, KILL cr);
20794   format %{ "addv  $tmp, T16B, $src1\n\t # src1 and src2 are the same"
20795             "umov  $dst, $tmp, B, 0\n\t"
20796             "cmp   $dst, 0\n\t"
20797             "cset  $dst\t" %}
20798   ins_encode %{
20799     __ addv(as_FloatRegister($tmp$$reg), __ T16B, as_FloatRegister($src1$$reg));
20800     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
20801     __ cmpw($dst$$Register, zr);
20802     __ csetw($dst$$Register, Assembler::NE);
20803   %}
20804   ins_pipe(pipe_class_default);
20805 %}
20806 
20807 instruct alltrue_in_mask8B(iRegINoSp dst, vecD src1, vecD src2, vecD tmp, rFlagsReg cr) %{
20808   predicate(static_cast<const VectorTestNode*>(n)->get_predicate() == BoolTest::overflow);
20809   match(Set dst (VectorTest src1 src2 ));
20810   ins_cost(INSN_COST);
20811   effect(TEMP tmp, KILL cr);
20812   format %{ "andr  $tmp, T8B, $src1, $src2\n\t # src2 is maskAllTrue"
20813             "notr  $tmp, T8B, $tmp\n\t"
20814             "addv  $tmp, T8B, $tmp\n\t"
20815             "umov  $dst, $tmp, B, 0\n\t"
20816             "cmp   $dst, 0\n\t"
20817             "cset  $dst\t" %}
20818   ins_encode %{
20819     __ andr(as_FloatRegister($tmp$$reg), __ T8B,
20820             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20821     __ notr(as_FloatRegister($tmp$$reg), __ T8B, as_FloatRegister($tmp$$reg));
20822     __ addv(as_FloatRegister($tmp$$reg), __ T8B, as_FloatRegister($tmp$$reg));
20823     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
20824     __ cmpw($dst$$Register, zr);
20825     __ csetw($dst$$Register, Assembler::EQ);
20826   %}
20827   ins_pipe(pipe_class_default);
20828 %}
20829 
20830 instruct alltrue_in_mask16B(iRegINoSp dst, vecX src1, vecX src2, vecX tmp, rFlagsReg cr) %{
20831   predicate(static_cast<const VectorTestNode*>(n)->get_predicate() == BoolTest::overflow);
20832   match(Set dst (VectorTest src1 src2 ));
20833   ins_cost(INSN_COST);
20834   effect(TEMP tmp, KILL cr);
20835   format %{ "andr  $tmp, T16B, $src1, $src2\n\t # src2 is maskAllTrue"
20836             "notr  $tmp, T16B, $tmp\n\t"
20837             "addv  $tmp, T16B, $tmp\n\t"
20838             "umov  $dst, $tmp, B, 0\n\t"
20839             "cmp   $dst, 0\n\t"
20840             "cset  $dst\t" %}
20841   ins_encode %{
20842     __ andr(as_FloatRegister($tmp$$reg), __ T16B,
20843             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20844     __ notr(as_FloatRegister($tmp$$reg), __ T16B, as_FloatRegister($tmp$$reg));
20845     __ addv(as_FloatRegister($tmp$$reg), __ T16B, as_FloatRegister($tmp$$reg));
20846     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
20847     __ cmpw($dst$$Register, zr);
20848     __ csetw($dst$$Register, Assembler::EQ);
20849   %}
20850   ins_pipe(pipe_class_default);
20851 %}
20852 
20853 // ------------------------------ Shift ---------------------------------------
20854 instruct vshiftcnt8B(vecD dst, iRegIorL2I cnt) %{
20855   predicate(n->as_Vector()->length_in_bytes() == 4 ||
20856             n->as_Vector()->length_in_bytes() == 8);
20857   match(Set dst (LShiftCntV cnt));
20858   match(Set dst (RShiftCntV cnt));
20859   format %{ "dup  $dst, $cnt\t# shift count vector (8B)" %}
20860   ins_encode %{
20861     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($cnt$$reg));
20862   %}
20863   ins_pipe(vdup_reg_reg64);
20864 %}
20865 
20866 instruct vshiftcnt16B(vecX dst, iRegIorL2I cnt) %{
20867   predicate(n->as_Vector()->length_in_bytes() == 16);
20868   match(Set dst (LShiftCntV cnt));
20869   match(Set dst (RShiftCntV cnt));
20870   format %{ "dup  $dst, $cnt\t# shift count vector (16B)" %}
20871   ins_encode %{
20872     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
20873   %}
20874   ins_pipe(vdup_reg_reg128);
20875 %}
20876 
20877 instruct vsll8B(vecD dst, vecD src, vecD shift) %{
20878   predicate(n->as_Vector()->length() == 4 ||
20879             n->as_Vector()->length() == 8);
20880   match(Set dst (LShiftVB src shift));
20881   ins_cost(INSN_COST);
20882   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
20883   ins_encode %{
20884     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
20885             as_FloatRegister($src$$reg),
20886             as_FloatRegister($shift$$reg));
20887   %}
20888   ins_pipe(vshift64);
20889 %}
20890 
20891 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
20892   predicate(n->as_Vector()->length() == 16);
20893   match(Set dst (LShiftVB src shift));
20894   ins_cost(INSN_COST);
20895   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
20896   ins_encode %{
20897     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
20898             as_FloatRegister($src$$reg),
20899             as_FloatRegister($shift$$reg));
20900   %}
20901   ins_pipe(vshift128);
20902 %}
20903 
20904 // Right shifts with vector shift count on aarch64 SIMD are implemented
20905 // as left shift by negative shift count.
20906 // There are two cases for vector shift count.
20907 //
20908 // Case 1: The vector shift count is from replication.
20909 //        |            |
20910 //    LoadVector  RShiftCntV
20911 //        |       /
20912 //     RShiftVI
20913 // Note: In inner loop, multiple neg instructions are used, which can be
20914 // moved to outer loop and merge into one neg instruction.
20915 //
20916 // Case 2: The vector shift count is from loading.
20917 // This case isn't supported by middle-end now. But it's supported by
20918 // panama/vectorIntrinsics(JEP 338: Vector API).
20919 //        |            |
20920 //    LoadVector  LoadVector
20921 //        |       /
20922 //     RShiftVI
20923 //
20924 
20925 instruct vsra8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
20926   predicate(n->as_Vector()->length() == 4 ||
20927             n->as_Vector()->length() == 8);
20928   match(Set dst (RShiftVB src shift));
20929   ins_cost(INSN_COST);
20930   effect(TEMP tmp);
20931   format %{ "negr  $tmp,$shift\t"
20932             "sshl  $dst,$src,$tmp\t# vector (8B)" %}
20933   ins_encode %{
20934     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
20935             as_FloatRegister($shift$$reg));
20936     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
20937             as_FloatRegister($src$$reg),
20938             as_FloatRegister($tmp$$reg));
20939   %}
20940   ins_pipe(vshift64);
20941 %}
20942 
20943 instruct vsra16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
20944   predicate(n->as_Vector()->length() == 16);
20945   match(Set dst (RShiftVB src shift));
20946   ins_cost(INSN_COST);
20947   effect(TEMP tmp);
20948   format %{ "negr  $tmp,$shift\t"
20949             "sshl  $dst,$src,$tmp\t# vector (16B)" %}
20950   ins_encode %{
20951     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
20952             as_FloatRegister($shift$$reg));
20953     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
20954             as_FloatRegister($src$$reg),
20955             as_FloatRegister($tmp$$reg));
20956   %}
20957   ins_pipe(vshift128);
20958 %}
20959 
20960 instruct vsrl8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
20961   predicate(n->as_Vector()->length() == 4 ||
20962             n->as_Vector()->length() == 8);
20963   match(Set dst (URShiftVB src shift));
20964   ins_cost(INSN_COST);
20965   effect(TEMP tmp);
20966   format %{ "negr  $tmp,$shift\t"
20967             "ushl  $dst,$src,$tmp\t# vector (8B)" %}
20968   ins_encode %{
20969     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
20970             as_FloatRegister($shift$$reg));
20971     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
20972             as_FloatRegister($src$$reg),
20973             as_FloatRegister($tmp$$reg));
20974   %}
20975   ins_pipe(vshift64);
20976 %}
20977 
20978 instruct vsrl16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
20979   predicate(n->as_Vector()->length() == 16);
20980   match(Set dst (URShiftVB src shift));
20981   ins_cost(INSN_COST);
20982   effect(TEMP tmp);
20983   format %{ "negr  $tmp,$shift\t"
20984             "ushl  $dst,$src,$tmp\t# vector (16B)" %}
20985   ins_encode %{
20986     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
20987             as_FloatRegister($shift$$reg));
20988     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
20989             as_FloatRegister($src$$reg),
20990             as_FloatRegister($tmp$$reg));
20991   %}
20992   ins_pipe(vshift128);
20993 %}
20994 
20995 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
20996   predicate(n->as_Vector()->length() == 4 ||
20997             n->as_Vector()->length() == 8);
20998   match(Set dst (LShiftVB src (LShiftCntV shift)));
20999   ins_cost(INSN_COST);
21000   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
21001   ins_encode %{
21002     int sh = (int)$shift$$constant;
21003     if (sh >= 8) {
21004       __ eor(as_FloatRegister($dst$$reg), __ T8B,
21005              as_FloatRegister($src$$reg),
21006              as_FloatRegister($src$$reg));
21007     } else {
21008       __ shl(as_FloatRegister($dst$$reg), __ T8B,
21009              as_FloatRegister($src$$reg), sh);
21010     }
21011   %}
21012   ins_pipe(vshift64_imm);
21013 %}
21014 
21015 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
21016   predicate(n->as_Vector()->length() == 16);
21017   match(Set dst (LShiftVB src (LShiftCntV shift)));
21018   ins_cost(INSN_COST);
21019   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
21020   ins_encode %{
21021     int sh = (int)$shift$$constant;
21022     if (sh >= 8) {
21023       __ eor(as_FloatRegister($dst$$reg), __ T16B,
21024              as_FloatRegister($src$$reg),
21025              as_FloatRegister($src$$reg));
21026     } else {
21027       __ shl(as_FloatRegister($dst$$reg), __ T16B,
21028              as_FloatRegister($src$$reg), sh);
21029     }
21030   %}
21031   ins_pipe(vshift128_imm);
21032 %}
21033 
21034 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
21035   predicate(n->as_Vector()->length() == 4 ||
21036             n->as_Vector()->length() == 8);
21037   match(Set dst (RShiftVB src (RShiftCntV shift)));
21038   ins_cost(INSN_COST);
21039   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
21040   ins_encode %{
21041     int sh = (int)$shift$$constant;
21042     if (sh >= 8) sh = 7;
21043     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
21044            as_FloatRegister($src$$reg), sh);
21045   %}
21046   ins_pipe(vshift64_imm);
21047 %}
21048 
21049 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
21050   predicate(n->as_Vector()->length() == 16);
21051   match(Set dst (RShiftVB src (RShiftCntV shift)));
21052   ins_cost(INSN_COST);
21053   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
21054   ins_encode %{
21055     int sh = (int)$shift$$constant;
21056     if (sh >= 8) sh = 7;
21057     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
21058            as_FloatRegister($src$$reg), sh);
21059   %}
21060   ins_pipe(vshift128_imm);
21061 %}
21062 
21063 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
21064   predicate(n->as_Vector()->length() == 4 ||
21065             n->as_Vector()->length() == 8);
21066   match(Set dst (URShiftVB src (RShiftCntV shift)));
21067   ins_cost(INSN_COST);
21068   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
21069   ins_encode %{
21070     int sh = (int)$shift$$constant;
21071     if (sh >= 8) {
21072       __ eor(as_FloatRegister($dst$$reg), __ T8B,
21073              as_FloatRegister($src$$reg),
21074              as_FloatRegister($src$$reg));
21075     } else {
21076       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
21077              as_FloatRegister($src$$reg), sh);
21078     }
21079   %}
21080   ins_pipe(vshift64_imm);
21081 %}
21082 
21083 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
21084   predicate(n->as_Vector()->length() == 16);
21085   match(Set dst (URShiftVB src (RShiftCntV shift)));
21086   ins_cost(INSN_COST);
21087   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
21088   ins_encode %{
21089     int sh = (int)$shift$$constant;
21090     if (sh >= 8) {
21091       __ eor(as_FloatRegister($dst$$reg), __ T16B,
21092              as_FloatRegister($src$$reg),
21093              as_FloatRegister($src$$reg));
21094     } else {
21095       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
21096              as_FloatRegister($src$$reg), sh);
21097     }
21098   %}
21099   ins_pipe(vshift128_imm);
21100 %}
21101 
21102 instruct vsll4S(vecD dst, vecD src, vecD shift) %{
21103   predicate(n->as_Vector()->length() == 2 ||
21104             n->as_Vector()->length() == 4);
21105   match(Set dst (LShiftVS src shift));
21106   ins_cost(INSN_COST);
21107   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
21108   ins_encode %{
21109     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
21110             as_FloatRegister($src$$reg),
21111             as_FloatRegister($shift$$reg));
21112   %}
21113   ins_pipe(vshift64);
21114 %}
21115 
21116 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
21117   predicate(n->as_Vector()->length() == 8);
21118   match(Set dst (LShiftVS src shift));
21119   ins_cost(INSN_COST);
21120   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
21121   ins_encode %{
21122     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
21123             as_FloatRegister($src$$reg),
21124             as_FloatRegister($shift$$reg));
21125   %}
21126   ins_pipe(vshift128);
21127 %}
21128 
21129 instruct vsra4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
21130   predicate(n->as_Vector()->length() == 2 ||
21131             n->as_Vector()->length() == 4);
21132   match(Set dst (RShiftVS src shift));
21133   ins_cost(INSN_COST);
21134   effect(TEMP tmp);
21135   format %{ "negr  $tmp,$shift\t"
21136             "sshl  $dst,$src,$tmp\t# vector (4H)" %}
21137   ins_encode %{
21138     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
21139             as_FloatRegister($shift$$reg));
21140     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
21141             as_FloatRegister($src$$reg),
21142             as_FloatRegister($tmp$$reg));
21143   %}
21144   ins_pipe(vshift64);
21145 %}
21146 
21147 instruct vsra8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
21148   predicate(n->as_Vector()->length() == 8);
21149   match(Set dst (RShiftVS src shift));
21150   ins_cost(INSN_COST);
21151   effect(TEMP tmp);
21152   format %{ "negr  $tmp,$shift\t"
21153             "sshl  $dst,$src,$tmp\t# vector (8H)" %}
21154   ins_encode %{
21155     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
21156             as_FloatRegister($shift$$reg));
21157     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
21158             as_FloatRegister($src$$reg),
21159             as_FloatRegister($tmp$$reg));
21160   %}
21161   ins_pipe(vshift128);
21162 %}
21163 
21164 instruct vsrl4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
21165   predicate(n->as_Vector()->length() == 2 ||
21166             n->as_Vector()->length() == 4);
21167   match(Set dst (URShiftVS src shift));
21168   ins_cost(INSN_COST);
21169   effect(TEMP tmp);
21170   format %{ "negr  $tmp,$shift\t"
21171             "ushl  $dst,$src,$tmp\t# vector (4H)" %}
21172   ins_encode %{
21173     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
21174             as_FloatRegister($shift$$reg));
21175     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
21176             as_FloatRegister($src$$reg),
21177             as_FloatRegister($tmp$$reg));
21178   %}
21179   ins_pipe(vshift64);
21180 %}
21181 
21182 instruct vsrl8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
21183   predicate(n->as_Vector()->length() == 8);
21184   match(Set dst (URShiftVS src shift));
21185   ins_cost(INSN_COST);
21186   effect(TEMP tmp);
21187   format %{ "negr  $tmp,$shift\t"
21188             "ushl  $dst,$src,$tmp\t# vector (8H)" %}
21189   ins_encode %{
21190     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
21191             as_FloatRegister($shift$$reg));
21192     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
21193             as_FloatRegister($src$$reg),
21194             as_FloatRegister($tmp$$reg));
21195   %}
21196   ins_pipe(vshift128);
21197 %}
21198 
21199 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
21200   predicate(n->as_Vector()->length() == 2 ||
21201             n->as_Vector()->length() == 4);
21202   match(Set dst (LShiftVS src (LShiftCntV shift)));
21203   ins_cost(INSN_COST);
21204   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
21205   ins_encode %{
21206     int sh = (int)$shift$$constant;
21207     if (sh >= 16) {
21208       __ eor(as_FloatRegister($dst$$reg), __ T8B,
21209              as_FloatRegister($src$$reg),
21210              as_FloatRegister($src$$reg));
21211     } else {
21212       __ shl(as_FloatRegister($dst$$reg), __ T4H,
21213              as_FloatRegister($src$$reg), sh);
21214     }
21215   %}
21216   ins_pipe(vshift64_imm);
21217 %}
21218 
21219 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
21220   predicate(n->as_Vector()->length() == 8);
21221   match(Set dst (LShiftVS src (LShiftCntV shift)));
21222   ins_cost(INSN_COST);
21223   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
21224   ins_encode %{
21225     int sh = (int)$shift$$constant;
21226     if (sh >= 16) {
21227       __ eor(as_FloatRegister($dst$$reg), __ T16B,
21228              as_FloatRegister($src$$reg),
21229              as_FloatRegister($src$$reg));
21230     } else {
21231       __ shl(as_FloatRegister($dst$$reg), __ T8H,
21232              as_FloatRegister($src$$reg), sh);
21233     }
21234   %}
21235   ins_pipe(vshift128_imm);
21236 %}
21237 
21238 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
21239   predicate(n->as_Vector()->length() == 2 ||
21240             n->as_Vector()->length() == 4);
21241   match(Set dst (RShiftVS src (RShiftCntV shift)));
21242   ins_cost(INSN_COST);
21243   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
21244   ins_encode %{
21245     int sh = (int)$shift$$constant;
21246     if (sh >= 16) sh = 15;
21247     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
21248            as_FloatRegister($src$$reg), sh);
21249   %}
21250   ins_pipe(vshift64_imm);
21251 %}
21252 
21253 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
21254   predicate(n->as_Vector()->length() == 8);
21255   match(Set dst (RShiftVS src (RShiftCntV shift)));
21256   ins_cost(INSN_COST);
21257   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
21258   ins_encode %{
21259     int sh = (int)$shift$$constant;
21260     if (sh >= 16) sh = 15;
21261     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
21262            as_FloatRegister($src$$reg), sh);
21263   %}
21264   ins_pipe(vshift128_imm);
21265 %}
21266 
21267 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
21268   predicate(n->as_Vector()->length() == 2 ||
21269             n->as_Vector()->length() == 4);
21270   match(Set dst (URShiftVS src (RShiftCntV shift)));
21271   ins_cost(INSN_COST);
21272   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
21273   ins_encode %{
21274     int sh = (int)$shift$$constant;
21275     if (sh >= 16) {
21276       __ eor(as_FloatRegister($dst$$reg), __ T8B,
21277              as_FloatRegister($src$$reg),
21278              as_FloatRegister($src$$reg));
21279     } else {
21280       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
21281              as_FloatRegister($src$$reg), sh);
21282     }
21283   %}
21284   ins_pipe(vshift64_imm);
21285 %}
21286 
21287 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
21288   predicate(n->as_Vector()->length() == 8);
21289   match(Set dst (URShiftVS src (RShiftCntV shift)));
21290   ins_cost(INSN_COST);
21291   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
21292   ins_encode %{
21293     int sh = (int)$shift$$constant;
21294     if (sh >= 16) {
21295       __ eor(as_FloatRegister($dst$$reg), __ T16B,
21296              as_FloatRegister($src$$reg),
21297              as_FloatRegister($src$$reg));
21298     } else {
21299       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
21300              as_FloatRegister($src$$reg), sh);
21301     }
21302   %}
21303   ins_pipe(vshift128_imm);
21304 %}
21305 
21306 instruct vsll2I(vecD dst, vecD src, vecD shift) %{
21307   predicate(n->as_Vector()->length() == 2);
21308   match(Set dst (LShiftVI src shift));
21309   ins_cost(INSN_COST);
21310   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
21311   ins_encode %{
21312     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
21313             as_FloatRegister($src$$reg),
21314             as_FloatRegister($shift$$reg));
21315   %}
21316   ins_pipe(vshift64);
21317 %}
21318 
21319 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
21320   predicate(n->as_Vector()->length() == 4);
21321   match(Set dst (LShiftVI src shift));
21322   ins_cost(INSN_COST);
21323   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
21324   ins_encode %{
21325     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
21326             as_FloatRegister($src$$reg),
21327             as_FloatRegister($shift$$reg));
21328   %}
21329   ins_pipe(vshift128);
21330 %}
21331 
21332 instruct vsra2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
21333   predicate(n->as_Vector()->length() == 2);
21334   match(Set dst (RShiftVI src shift));
21335   ins_cost(INSN_COST);
21336   effect(TEMP tmp);
21337   format %{ "negr  $tmp,$shift\t"
21338             "sshl  $dst,$src,$tmp\t# vector (2S)" %}
21339   ins_encode %{
21340     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
21341             as_FloatRegister($shift$$reg));
21342     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
21343             as_FloatRegister($src$$reg),
21344             as_FloatRegister($tmp$$reg));
21345   %}
21346   ins_pipe(vshift64);
21347 %}
21348 
21349 instruct vsra4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
21350   predicate(n->as_Vector()->length() == 4);
21351   match(Set dst (RShiftVI src shift));
21352   ins_cost(INSN_COST);
21353   effect(TEMP tmp);
21354   format %{ "negr  $tmp,$shift\t"
21355             "sshl  $dst,$src,$tmp\t# vector (4S)" %}
21356   ins_encode %{
21357     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
21358             as_FloatRegister($shift$$reg));
21359     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
21360             as_FloatRegister($src$$reg),
21361             as_FloatRegister($tmp$$reg));
21362   %}
21363   ins_pipe(vshift128);
21364 %}
21365 
21366 instruct vsrl2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
21367   predicate(n->as_Vector()->length() == 2);
21368   match(Set dst (URShiftVI src shift));
21369   ins_cost(INSN_COST);
21370   effect(TEMP tmp);
21371   format %{ "negr  $tmp,$shift\t"
21372             "ushl  $dst,$src,$tmp\t# vector (2S)" %}
21373   ins_encode %{
21374     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
21375             as_FloatRegister($shift$$reg));
21376     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
21377             as_FloatRegister($src$$reg),
21378             as_FloatRegister($tmp$$reg));
21379   %}
21380   ins_pipe(vshift64);
21381 %}
21382 
21383 instruct vsrl4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
21384   predicate(n->as_Vector()->length() == 4);
21385   match(Set dst (URShiftVI src shift));
21386   ins_cost(INSN_COST);
21387   effect(TEMP tmp);
21388   format %{ "negr  $tmp,$shift\t"
21389             "ushl  $dst,$src,$tmp\t# vector (4S)" %}
21390   ins_encode %{
21391     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
21392             as_FloatRegister($shift$$reg));
21393     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
21394             as_FloatRegister($src$$reg),
21395             as_FloatRegister($tmp$$reg));
21396   %}
21397   ins_pipe(vshift128);
21398 %}
21399 
21400 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
21401   predicate(n->as_Vector()->length() == 2);
21402   match(Set dst (LShiftVI src (LShiftCntV shift)));
21403   ins_cost(INSN_COST);
21404   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
21405   ins_encode %{
21406     __ shl(as_FloatRegister($dst$$reg), __ T2S,
21407            as_FloatRegister($src$$reg),
21408            (int)$shift$$constant);
21409   %}
21410   ins_pipe(vshift64_imm);
21411 %}
21412 
21413 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
21414   predicate(n->as_Vector()->length() == 4);
21415   match(Set dst (LShiftVI src (LShiftCntV shift)));
21416   ins_cost(INSN_COST);
21417   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
21418   ins_encode %{
21419     __ shl(as_FloatRegister($dst$$reg), __ T4S,
21420            as_FloatRegister($src$$reg),
21421            (int)$shift$$constant);
21422   %}
21423   ins_pipe(vshift128_imm);
21424 %}
21425 
21426 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
21427   predicate(n->as_Vector()->length() == 2);
21428   match(Set dst (RShiftVI src (RShiftCntV shift)));
21429   ins_cost(INSN_COST);
21430   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
21431   ins_encode %{
21432     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
21433             as_FloatRegister($src$$reg),
21434             (int)$shift$$constant);
21435   %}
21436   ins_pipe(vshift64_imm);
21437 %}
21438 
21439 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
21440   predicate(n->as_Vector()->length() == 4);
21441   match(Set dst (RShiftVI src (RShiftCntV shift)));
21442   ins_cost(INSN_COST);
21443   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
21444   ins_encode %{
21445     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
21446             as_FloatRegister($src$$reg),
21447             (int)$shift$$constant);
21448   %}
21449   ins_pipe(vshift128_imm);
21450 %}
21451 
21452 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
21453   predicate(n->as_Vector()->length() == 2);
21454   match(Set dst (URShiftVI src (RShiftCntV shift)));
21455   ins_cost(INSN_COST);
21456   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
21457   ins_encode %{
21458     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
21459             as_FloatRegister($src$$reg),
21460             (int)$shift$$constant);
21461   %}
21462   ins_pipe(vshift64_imm);
21463 %}
21464 
21465 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
21466   predicate(n->as_Vector()->length() == 4);
21467   match(Set dst (URShiftVI src (RShiftCntV shift)));
21468   ins_cost(INSN_COST);
21469   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
21470   ins_encode %{
21471     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
21472             as_FloatRegister($src$$reg),
21473             (int)$shift$$constant);
21474   %}
21475   ins_pipe(vshift128_imm);
21476 %}
21477 
21478 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
21479   predicate(n->as_Vector()->length() == 2);
21480   match(Set dst (LShiftVL src shift));
21481   ins_cost(INSN_COST);
21482   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
21483   ins_encode %{
21484     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
21485             as_FloatRegister($src$$reg),
21486             as_FloatRegister($shift$$reg));
21487   %}
21488   ins_pipe(vshift128);
21489 %}
21490 
21491 instruct vsra2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
21492   predicate(n->as_Vector()->length() == 2);
21493   match(Set dst (RShiftVL src shift));
21494   ins_cost(INSN_COST);
21495   effect(TEMP tmp);
21496   format %{ "negr  $tmp,$shift\t"
21497             "sshl  $dst,$src,$tmp\t# vector (2D)" %}
21498   ins_encode %{
21499     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
21500             as_FloatRegister($shift$$reg));
21501     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
21502             as_FloatRegister($src$$reg),
21503             as_FloatRegister($tmp$$reg));
21504   %}
21505   ins_pipe(vshift128);
21506 %}
21507 
21508 instruct vsrl2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
21509   predicate(n->as_Vector()->length() == 2);
21510   match(Set dst (URShiftVL src shift));
21511   ins_cost(INSN_COST);
21512   effect(TEMP tmp);
21513   format %{ "negr  $tmp,$shift\t"
21514             "ushl  $dst,$src,$tmp\t# vector (2D)" %}
21515   ins_encode %{
21516     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
21517             as_FloatRegister($shift$$reg));
21518     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
21519             as_FloatRegister($src$$reg),
21520             as_FloatRegister($tmp$$reg));
21521   %}
21522   ins_pipe(vshift128);
21523 %}
21524 
21525 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
21526   predicate(n->as_Vector()->length() == 2);
21527   match(Set dst (LShiftVL src (LShiftCntV shift)));
21528   ins_cost(INSN_COST);
21529   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
21530   ins_encode %{
21531     __ shl(as_FloatRegister($dst$$reg), __ T2D,
21532            as_FloatRegister($src$$reg),
21533            (int)$shift$$constant);
21534   %}
21535   ins_pipe(vshift128_imm);
21536 %}
21537 
21538 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
21539   predicate(n->as_Vector()->length() == 2);
21540   match(Set dst (RShiftVL src (RShiftCntV shift)));
21541   ins_cost(INSN_COST);
21542   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
21543   ins_encode %{
21544     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
21545             as_FloatRegister($src$$reg),
21546             (int)$shift$$constant);
21547   %}
21548   ins_pipe(vshift128_imm);
21549 %}
21550 
21551 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
21552   predicate(n->as_Vector()->length() == 2);
21553   match(Set dst (URShiftVL src (RShiftCntV shift)));
21554   ins_cost(INSN_COST);
21555   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
21556   ins_encode %{
21557     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
21558             as_FloatRegister($src$$reg),
21559             (int)$shift$$constant);
21560   %}
21561   ins_pipe(vshift128_imm);
21562 %}
21563 
21564 instruct vpopcount4I(vecX dst, vecX src) %{
21565   predicate(UsePopCountInstruction && n->as_Vector()->length() == 4);
21566   match(Set dst (PopCountVI src));
21567   format %{
21568     "cnt     $dst, $src\t# vector (16B)\n\t"
21569     "uaddlp  $dst, $dst\t# vector (16B)\n\t"
21570     "uaddlp  $dst, $dst\t# vector (8H)"
21571   %}
21572   ins_encode %{
21573     __ cnt(as_FloatRegister($dst$$reg), __ T16B,
21574            as_FloatRegister($src$$reg));
21575     __ uaddlp(as_FloatRegister($dst$$reg), __ T16B,
21576               as_FloatRegister($dst$$reg));
21577     __ uaddlp(as_FloatRegister($dst$$reg), __ T8H,
21578               as_FloatRegister($dst$$reg));
21579   %}
21580   ins_pipe(pipe_class_default);
21581 %}
21582 
21583 instruct vpopcount2I(vecD dst, vecD src) %{
21584   predicate(UsePopCountInstruction && n->as_Vector()->length() == 2);
21585   match(Set dst (PopCountVI src));
21586   format %{
21587     "cnt     $dst, $src\t# vector (8B)\n\t"
21588     "uaddlp  $dst, $dst\t# vector (8B)\n\t"
21589     "uaddlp  $dst, $dst\t# vector (4H)"
21590   %}
21591   ins_encode %{
21592     __ cnt(as_FloatRegister($dst$$reg), __ T8B,
21593            as_FloatRegister($src$$reg));
21594     __ uaddlp(as_FloatRegister($dst$$reg), __ T8B,
21595               as_FloatRegister($dst$$reg));
21596     __ uaddlp(as_FloatRegister($dst$$reg), __ T4H,
21597               as_FloatRegister($dst$$reg));
21598   %}
21599   ins_pipe(pipe_class_default);
21600 %}
21601 
21602 //----------PEEPHOLE RULES-----------------------------------------------------
21603 // These must follow all instruction definitions as they use the names
21604 // defined in the instructions definitions.
21605 //
21606 // peepmatch ( root_instr_name [preceding_instruction]* );
21607 //
21608 // peepconstraint %{
21609 // (instruction_number.operand_name relational_op instruction_number.operand_name
21610 //  [, ...] );
21611 // // instruction numbers are zero-based using left to right order in peepmatch
21612 //
21613 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
21614 // // provide an instruction_number.operand_name for each operand that appears
21615 // // in the replacement instruction's match rule
21616 //
21617 // ---------VM FLAGS---------------------------------------------------------
21618 //
21619 // All peephole optimizations can be turned off using -XX:-OptoPeephole
21620 //
21621 // Each peephole rule is given an identifying number starting with zero and
21622 // increasing by one in the order seen by the parser.  An individual peephole
21623 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
21624 // on the command-line.
21625 //
21626 // ---------CURRENT LIMITATIONS----------------------------------------------
21627 //
21628 // Only match adjacent instructions in same basic block
21629 // Only equality constraints
21630 // Only constraints between operands, not (0.dest_reg == RAX_enc)
21631 // Only one replacement instruction
21632 //
21633 // ---------EXAMPLE----------------------------------------------------------
21634 //
21635 // // pertinent parts of existing instructions in architecture description
21636 // instruct movI(iRegINoSp dst, iRegI src)
21637 // %{
21638 //   match(Set dst (CopyI src));
21639 // %}
21640 //
21641 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
21642 // %{
21643 //   match(Set dst (AddI dst src));
21644 //   effect(KILL cr);
21645 // %}
21646 //
21647 // // Change (inc mov) to lea
21648 // peephole %{
21649 //   // increment preceeded by register-register move
21650 //   peepmatch ( incI_iReg movI );
21651 //   // require that the destination register of the increment
21652 //   // match the destination register of the move
21653 //   peepconstraint ( 0.dst == 1.dst );
21654 //   // construct a replacement instruction that sets
21655 //   // the destination to ( move's source register + one )
21656 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
21657 // %}
21658 //
21659 
21660 // Implementation no longer uses movX instructions since
21661 // machine-independent system no longer uses CopyX nodes.
21662 //
21663 // peephole
21664 // %{
21665 //   peepmatch (incI_iReg movI);
21666 //   peepconstraint (0.dst == 1.dst);
21667 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
21668 // %}
21669 
21670 // peephole
21671 // %{
21672 //   peepmatch (decI_iReg movI);
21673 //   peepconstraint (0.dst == 1.dst);
21674 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
21675 // %}
21676 
21677 // peephole
21678 // %{
21679 //   peepmatch (addI_iReg_imm movI);
21680 //   peepconstraint (0.dst == 1.dst);
21681 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
21682 // %}
21683 
21684 // peephole
21685 // %{
21686 //   peepmatch (incL_iReg movL);
21687 //   peepconstraint (0.dst == 1.dst);
21688 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
21689 // %}
21690 
21691 // peephole
21692 // %{
21693 //   peepmatch (decL_iReg movL);
21694 //   peepconstraint (0.dst == 1.dst);
21695 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
21696 // %}
21697 
21698 // peephole
21699 // %{
21700 //   peepmatch (addL_iReg_imm movL);
21701 //   peepconstraint (0.dst == 1.dst);
21702 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
21703 // %}
21704 
21705 // peephole
21706 // %{
21707 //   peepmatch (addP_iReg_imm movP);
21708 //   peepconstraint (0.dst == 1.dst);
21709 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
21710 // %}
21711 
21712 // // Change load of spilled value to only a spill
21713 // instruct storeI(memory mem, iRegI src)
21714 // %{
21715 //   match(Set mem (StoreI mem src));
21716 // %}
21717 //
21718 // instruct loadI(iRegINoSp dst, memory mem)
21719 // %{
21720 //   match(Set dst (LoadI mem));
21721 // %}
21722 //
21723 
21724 //----------SMARTSPILL RULES---------------------------------------------------
21725 // These must follow all instruction definitions as they use the names
21726 // defined in the instructions definitions.
21727 
21728 // Local Variables:
21729 // mode: c++
21730 // End: