1 //
    2 // Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
    3 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
    4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    5 //
    6 // This code is free software; you can redistribute it and/or modify it
    7 // under the terms of the GNU General Public License version 2 only, as
    8 // published by the Free Software Foundation.
    9 //
   10 // This code is distributed in the hope that it will be useful, but WITHOUT
   11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   13 // version 2 for more details (a copy is included in the LICENSE file that
   14 // accompanied this code).
   15 //
   16 // You should have received a copy of the GNU General Public License version
   17 // 2 along with this work; if not, write to the Free Software Foundation,
   18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   19 //
   20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   21 // or visit www.oracle.com if you need additional information or have any
   22 // questions.
   23 //
   24 //
   25 
   26 // AArch64 Architecture Description File
   27 
   28 //----------REGISTER DEFINITION BLOCK------------------------------------------
   29 // This information is used by the matcher and the register allocator to
   30 // describe individual registers and classes of registers within the target
   31 // archtecture.
   32 
   33 register %{
   34 //----------Architecture Description Register Definitions----------------------
   35 // General Registers
   36 // "reg_def"  name ( register save type, C convention save type,
   37 //                   ideal register type, encoding );
   38 // Register Save Types:
   39 //
   40 // NS  = No-Save:       The register allocator assumes that these registers
   41 //                      can be used without saving upon entry to the method, &
   42 //                      that they do not need to be saved at call sites.
   43 //
   44 // SOC = Save-On-Call:  The register allocator assumes that these registers
   45 //                      can be used without saving upon entry to the method,
   46 //                      but that they must be saved at call sites.
   47 //
   48 // SOE = Save-On-Entry: The register allocator assumes that these registers
   49 //                      must be saved before using them upon entry to the
   50 //                      method, but they do not need to be saved at call
   51 //                      sites.
   52 //
   53 // AS  = Always-Save:   The register allocator assumes that these registers
   54 //                      must be saved before using them upon entry to the
   55 //                      method, & that they must be saved at call sites.
   56 //
   57 // Ideal Register Type is used to determine how to save & restore a
   58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
   59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
   60 //
   61 // The encoding number is the actual bit-pattern placed into the opcodes.
   62 
   63 // We must define the 64 bit int registers in two 32 bit halves, the
   64 // real lower register and a virtual upper half register. upper halves
   65 // are used by the register allocator but are not actually supplied as
   66 // operands to memory ops.
   67 //
   68 // follow the C1 compiler in making registers
   69 //
   70 //   r0-r7,r10-r26 volatile (caller save)
   71 //   r27-r32 system (no save, no allocate)
   72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
   73 //
   74 // as regards Java usage. we don't use any callee save registers
   75 // because this makes it difficult to de-optimise a frame (see comment
   76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
   77 //
   78 
   79 // General Registers
   80 
   81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
   82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
   83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
   84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
   85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
   86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
   87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
   88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
   89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
   90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
   91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
   92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
   93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
   94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
   95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
   96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
   97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
   98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
   99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
  100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
  101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
  102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
  103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
  104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
  105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
  106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
  107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
  108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
  109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
  110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
  111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
  112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
  113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
  114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
  115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
  116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
  117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
  118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
  119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
  120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
  121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
  122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
  123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
  124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
  125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
  126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
  127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
  128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
  129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
  130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
  131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
  132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
  133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
  134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
  135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
  136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
  137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
  138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
  139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
  140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
  141 
  142 // ----------------------------
  143 // Float/Double Registers
  144 // ----------------------------
  145 
  146 // Double Registers
  147 
  148 // The rules of ADL require that double registers be defined in pairs.
  149 // Each pair must be two 32-bit values, but not necessarily a pair of
  150 // single float registers. In each pair, ADLC-assigned register numbers
  151 // must be adjacent, with the lower number even. Finally, when the
  152 // CPU stores such a register pair to memory, the word associated with
  153 // the lower ADLC-assigned number must be stored to the lower address.
  154 
  155 // AArch64 has 32 floating-point registers. Each can store a vector of
  156 // single or double precision floating-point values up to 8 * 32
  157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
  158 // use the first float or double element of the vector.
  159 
  160 // for Java use float registers v0-v15 are always save on call whereas
  161 // the platform ABI treats v8-v15 as callee save). float registers
  162 // v16-v31 are SOC as per the platform spec
  163 
  164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
  165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
  166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
  167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
  168 
  169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
  170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
  171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
  172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
  173 
  174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
  175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
  176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
  177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
  178 
  179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
  180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
  181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
  182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
  183 
  184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
  185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
  186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
  187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
  188 
  189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
  190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
  191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
  192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
  193 
  194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
  195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
  196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
  197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
  198 
  199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
  200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
  201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
  202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
  203 
  204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
  205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
  206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
  207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
  208 
  209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
  210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
  211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
  212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
  213 
  214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
  215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
  216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
  217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
  218 
  219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
  220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
  221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
  222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
  223 
  224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
  225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
  226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
  227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
  228 
  229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
  230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
  231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
  232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
  233 
  234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
  235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
  236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
  237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
  238 
  239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
  240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
  241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
  242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
  243 
  244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
  245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
  246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
  247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
  248 
  249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
  250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
  251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
  252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
  253 
  254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
  255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
  256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
  257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
  258 
  259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
  260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
  261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
  262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
  263 
  264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
  265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
  266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
  267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
  268 
  269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
  270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
  271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
  272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
  273 
  274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
  275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
  276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
  277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
  278 
  279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
  280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
  281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
  282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
  283 
  284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
  285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
  286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
  287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
  288 
  289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
  290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
  291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
  292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
  293 
  294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
  295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
  296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
  297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
  298 
  299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
  300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
  301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
  302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
  303 
  304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
  305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
  306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
  307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
  308 
  309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
  310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
  311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
  312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
  313 
  314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
  315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
  316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
  317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
  318 
  319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
  320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
  321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
  322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
  323 
  324 // ----------------------------
  325 // Special Registers
  326 // ----------------------------
  327 
  328 // the AArch64 CSPR status flag register is not directly acessible as
  329 // instruction operand. the FPSR status flag register is a system
  330 // register which can be written/read using MSR/MRS but again does not
  331 // appear as an operand (a code identifying the FSPR occurs as an
  332 // immediate value in the instruction).
  333 
  334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
  335 
  336 
  337 // Specify priority of register selection within phases of register
  338 // allocation.  Highest priority is first.  A useful heuristic is to
  339 // give registers a low priority when they are required by machine
  340 // instructions, like EAX and EDX on I486, and choose no-save registers
  341 // before save-on-call, & save-on-call before save-on-entry.  Registers
  342 // which participate in fixed calling sequences should come last.
  343 // Registers which are used as pairs must fall on an even boundary.
  344 
  345 alloc_class chunk0(
  346     // volatiles
  347     R10, R10_H,
  348     R11, R11_H,
  349     R12, R12_H,
  350     R13, R13_H,
  351     R14, R14_H,
  352     R15, R15_H,
  353     R16, R16_H,
  354     R17, R17_H,
  355     R18, R18_H,
  356 
  357     // arg registers
  358     R0, R0_H,
  359     R1, R1_H,
  360     R2, R2_H,
  361     R3, R3_H,
  362     R4, R4_H,
  363     R5, R5_H,
  364     R6, R6_H,
  365     R7, R7_H,
  366 
  367     // non-volatiles
  368     R19, R19_H,
  369     R20, R20_H,
  370     R21, R21_H,
  371     R22, R22_H,
  372     R23, R23_H,
  373     R24, R24_H,
  374     R25, R25_H,
  375     R26, R26_H,
  376 
  377     // non-allocatable registers
  378 
  379     R27, R27_H, // heapbase
  380     R28, R28_H, // thread
  381     R29, R29_H, // fp
  382     R30, R30_H, // lr
  383     R31, R31_H, // sp
  384 );
  385 
  386 alloc_class chunk1(
  387 
  388     // no save
  389     V16, V16_H, V16_J, V16_K,
  390     V17, V17_H, V17_J, V17_K,
  391     V18, V18_H, V18_J, V18_K,
  392     V19, V19_H, V19_J, V19_K,
  393     V20, V20_H, V20_J, V20_K,
  394     V21, V21_H, V21_J, V21_K,
  395     V22, V22_H, V22_J, V22_K,
  396     V23, V23_H, V23_J, V23_K,
  397     V24, V24_H, V24_J, V24_K,
  398     V25, V25_H, V25_J, V25_K,
  399     V26, V26_H, V26_J, V26_K,
  400     V27, V27_H, V27_J, V27_K,
  401     V28, V28_H, V28_J, V28_K,
  402     V29, V29_H, V29_J, V29_K,
  403     V30, V30_H, V30_J, V30_K,
  404     V31, V31_H, V31_J, V31_K,
  405 
  406     // arg registers
  407     V0, V0_H, V0_J, V0_K,
  408     V1, V1_H, V1_J, V1_K,
  409     V2, V2_H, V2_J, V2_K,
  410     V3, V3_H, V3_J, V3_K,
  411     V4, V4_H, V4_J, V4_K,
  412     V5, V5_H, V5_J, V5_K,
  413     V6, V6_H, V6_J, V6_K,
  414     V7, V7_H, V7_J, V7_K,
  415 
  416     // non-volatiles
  417     V8, V8_H, V8_J, V8_K,
  418     V9, V9_H, V9_J, V9_K,
  419     V10, V10_H, V10_J, V10_K,
  420     V11, V11_H, V11_J, V11_K,
  421     V12, V12_H, V12_J, V12_K,
  422     V13, V13_H, V13_J, V13_K,
  423     V14, V14_H, V14_J, V14_K,
  424     V15, V15_H, V15_J, V15_K,
  425 );
  426 
  427 alloc_class chunk2(RFLAGS);
  428 
  429 //----------Architecture Description Register Classes--------------------------
  430 // Several register classes are automatically defined based upon information in
  431 // this architecture description.
  432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
  433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
  434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
  435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
  436 //
  437 
  438 // Class for all 32 bit integer registers -- excludes SP which will
  439 // never be used as an integer register
  440 reg_class any_reg32(
  441     R0,
  442     R1,
  443     R2,
  444     R3,
  445     R4,
  446     R5,
  447     R6,
  448     R7,
  449     R10,
  450     R11,
  451     R12,
  452     R13,
  453     R14,
  454     R15,
  455     R16,
  456     R17,
  457     R18,
  458     R19,
  459     R20,
  460     R21,
  461     R22,
  462     R23,
  463     R24,
  464     R25,
  465     R26,
  466     R27,
  467     R28,
  468     R29,
  469     R30
  470 );
  471 
  472 // Singleton class for R0 int register
  473 reg_class int_r0_reg(R0);
  474 
  475 // Singleton class for R2 int register
  476 reg_class int_r2_reg(R2);
  477 
  478 // Singleton class for R3 int register
  479 reg_class int_r3_reg(R3);
  480 
  481 // Singleton class for R4 int register
  482 reg_class int_r4_reg(R4);
  483 
  484 // Class for all long integer registers (including RSP)
  485 reg_class any_reg(
  486     R0, R0_H,
  487     R1, R1_H,
  488     R2, R2_H,
  489     R3, R3_H,
  490     R4, R4_H,
  491     R5, R5_H,
  492     R6, R6_H,
  493     R7, R7_H,
  494     R10, R10_H,
  495     R11, R11_H,
  496     R12, R12_H,
  497     R13, R13_H,
  498     R14, R14_H,
  499     R15, R15_H,
  500     R16, R16_H,
  501     R17, R17_H,
  502     R18, R18_H,
  503     R19, R19_H,
  504     R20, R20_H,
  505     R21, R21_H,
  506     R22, R22_H,
  507     R23, R23_H,
  508     R24, R24_H,
  509     R25, R25_H,
  510     R26, R26_H,
  511     R27, R27_H,
  512     R28, R28_H,
  513     R29, R29_H,
  514     R30, R30_H,
  515     R31, R31_H
  516 );
  517 
  518 // Class for all non-special integer registers
  519 reg_class no_special_reg32_no_fp(
  520     R0,
  521     R1,
  522     R2,
  523     R3,
  524     R4,
  525     R5,
  526     R6,
  527     R7,
  528     R10,
  529     R11,
  530     R12,                        // rmethod
  531     R13,
  532     R14,
  533     R15,
  534     R16,
  535     R17,
  536     R18,
  537     R19,
  538     R20,
  539     R21,
  540     R22,
  541     R23,
  542     R24,
  543     R25,
  544     R26
  545  /* R27, */                     // heapbase
  546  /* R28, */                     // thread
  547  /* R29, */                     // fp
  548  /* R30, */                     // lr
  549  /* R31 */                      // sp
  550 );
  551 
  552 reg_class no_special_reg32_with_fp(
  553     R0,
  554     R1,
  555     R2,
  556     R3,
  557     R4,
  558     R5,
  559     R6,
  560     R7,
  561     R10,
  562     R11,
  563     R12,                        // rmethod
  564     R13,
  565     R14,
  566     R15,
  567     R16,
  568     R17,
  569     R18,
  570     R19,
  571     R20,
  572     R21,
  573     R22,
  574     R23,
  575     R24,
  576     R25,
  577     R26
  578  /* R27, */                     // heapbase
  579  /* R28, */                     // thread
  580     R29,                        // fp
  581  /* R30, */                     // lr
  582  /* R31 */                      // sp
  583 );
  584 
  585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
  586 
  587 // Class for all non-special long integer registers
  588 reg_class no_special_reg_no_fp(
  589     R0, R0_H,
  590     R1, R1_H,
  591     R2, R2_H,
  592     R3, R3_H,
  593     R4, R4_H,
  594     R5, R5_H,
  595     R6, R6_H,
  596     R7, R7_H,
  597     R10, R10_H,
  598     R11, R11_H,
  599     R12, R12_H,                 // rmethod
  600     R13, R13_H,
  601     R14, R14_H,
  602     R15, R15_H,
  603     R16, R16_H,
  604     R17, R17_H,
  605     R18, R18_H,
  606     R19, R19_H,
  607     R20, R20_H,
  608     R21, R21_H,
  609     R22, R22_H,
  610     R23, R23_H,
  611     R24, R24_H,
  612     R25, R25_H,
  613     R26, R26_H,
  614  /* R27, R27_H, */              // heapbase
  615  /* R28, R28_H, */              // thread
  616  /* R29, R29_H, */              // fp
  617  /* R30, R30_H, */              // lr
  618  /* R31, R31_H */               // sp
  619 );
  620 
  621 reg_class no_special_reg_with_fp(
  622     R0, R0_H,
  623     R1, R1_H,
  624     R2, R2_H,
  625     R3, R3_H,
  626     R4, R4_H,
  627     R5, R5_H,
  628     R6, R6_H,
  629     R7, R7_H,
  630     R10, R10_H,
  631     R11, R11_H,
  632     R12, R12_H,                 // rmethod
  633     R13, R13_H,
  634     R14, R14_H,
  635     R15, R15_H,
  636     R16, R16_H,
  637     R17, R17_H,
  638     R18, R18_H,
  639     R19, R19_H,
  640     R20, R20_H,
  641     R21, R21_H,
  642     R22, R22_H,
  643     R23, R23_H,
  644     R24, R24_H,
  645     R25, R25_H,
  646     R26, R26_H,
  647  /* R27, R27_H, */              // heapbase
  648  /* R28, R28_H, */              // thread
  649     R29, R29_H,                 // fp
  650  /* R30, R30_H, */              // lr
  651  /* R31, R31_H */               // sp
  652 );
  653 
  654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
  655 
  656 // Class for 64 bit register r0
  657 reg_class r0_reg(
  658     R0, R0_H
  659 );
  660 
  661 // Class for 64 bit register r1
  662 reg_class r1_reg(
  663     R1, R1_H
  664 );
  665 
  666 // Class for 64 bit register r2
  667 reg_class r2_reg(
  668     R2, R2_H
  669 );
  670 
  671 // Class for 64 bit register r3
  672 reg_class r3_reg(
  673     R3, R3_H
  674 );
  675 
  676 // Class for 64 bit register r4
  677 reg_class r4_reg(
  678     R4, R4_H
  679 );
  680 
  681 // Class for 64 bit register r5
  682 reg_class r5_reg(
  683     R5, R5_H
  684 );
  685 
  686 // Class for 64 bit register r10
  687 reg_class r10_reg(
  688     R10, R10_H
  689 );
  690 
  691 // Class for 64 bit register r11
  692 reg_class r11_reg(
  693     R11, R11_H
  694 );
  695 
  696 // Class for method register
  697 reg_class method_reg(
  698     R12, R12_H
  699 );
  700 
  701 // Class for heapbase register
  702 reg_class heapbase_reg(
  703     R27, R27_H
  704 );
  705 
  706 // Class for thread register
  707 reg_class thread_reg(
  708     R28, R28_H
  709 );
  710 
  711 // Class for frame pointer register
  712 reg_class fp_reg(
  713     R29, R29_H
  714 );
  715 
  716 // Class for link register
  717 reg_class lr_reg(
  718     R30, R30_H
  719 );
  720 
  721 // Class for long sp register
  722 reg_class sp_reg(
  723   R31, R31_H
  724 );
  725 
  726 // Class for all pointer registers
  727 reg_class ptr_reg(
  728     R0, R0_H,
  729     R1, R1_H,
  730     R2, R2_H,
  731     R3, R3_H,
  732     R4, R4_H,
  733     R5, R5_H,
  734     R6, R6_H,
  735     R7, R7_H,
  736     R10, R10_H,
  737     R11, R11_H,
  738     R12, R12_H,
  739     R13, R13_H,
  740     R14, R14_H,
  741     R15, R15_H,
  742     R16, R16_H,
  743     R17, R17_H,
  744     R18, R18_H,
  745     R19, R19_H,
  746     R20, R20_H,
  747     R21, R21_H,
  748     R22, R22_H,
  749     R23, R23_H,
  750     R24, R24_H,
  751     R25, R25_H,
  752     R26, R26_H,
  753     R27, R27_H,
  754     R28, R28_H,
  755     R29, R29_H,
  756     R30, R30_H,
  757     R31, R31_H
  758 );
  759 
  760 // Class for all non_special pointer registers
  761 reg_class no_special_ptr_reg(
  762     R0, R0_H,
  763     R1, R1_H,
  764     R2, R2_H,
  765     R3, R3_H,
  766     R4, R4_H,
  767     R5, R5_H,
  768     R6, R6_H,
  769     R7, R7_H,
  770     R10, R10_H,
  771     R11, R11_H,
  772     R12, R12_H,
  773     R13, R13_H,
  774     R14, R14_H,
  775     R15, R15_H,
  776     R16, R16_H,
  777     R17, R17_H,
  778     R18, R18_H,
  779     R19, R19_H,
  780     R20, R20_H,
  781     R21, R21_H,
  782     R22, R22_H,
  783     R23, R23_H,
  784     R24, R24_H,
  785     R25, R25_H,
  786     R26, R26_H,
  787  /* R27, R27_H, */              // heapbase
  788  /* R28, R28_H, */              // thread
  789  /* R29, R29_H, */              // fp
  790  /* R30, R30_H, */              // lr
  791  /* R31, R31_H */               // sp
  792 );
  793 
  794 // Class for all float registers
  795 reg_class float_reg(
  796     V0,
  797     V1,
  798     V2,
  799     V3,
  800     V4,
  801     V5,
  802     V6,
  803     V7,
  804     V8,
  805     V9,
  806     V10,
  807     V11,
  808     V12,
  809     V13,
  810     V14,
  811     V15,
  812     V16,
  813     V17,
  814     V18,
  815     V19,
  816     V20,
  817     V21,
  818     V22,
  819     V23,
  820     V24,
  821     V25,
  822     V26,
  823     V27,
  824     V28,
  825     V29,
  826     V30,
  827     V31
  828 );
  829 
  830 // Double precision float registers have virtual `high halves' that
  831 // are needed by the allocator.
  832 // Class for all double registers
  833 reg_class double_reg(
  834     V0, V0_H,
  835     V1, V1_H,
  836     V2, V2_H,
  837     V3, V3_H,
  838     V4, V4_H,
  839     V5, V5_H,
  840     V6, V6_H,
  841     V7, V7_H,
  842     V8, V8_H,
  843     V9, V9_H,
  844     V10, V10_H,
  845     V11, V11_H,
  846     V12, V12_H,
  847     V13, V13_H,
  848     V14, V14_H,
  849     V15, V15_H,
  850     V16, V16_H,
  851     V17, V17_H,
  852     V18, V18_H,
  853     V19, V19_H,
  854     V20, V20_H,
  855     V21, V21_H,
  856     V22, V22_H,
  857     V23, V23_H,
  858     V24, V24_H,
  859     V25, V25_H,
  860     V26, V26_H,
  861     V27, V27_H,
  862     V28, V28_H,
  863     V29, V29_H,
  864     V30, V30_H,
  865     V31, V31_H
  866 );
  867 
  868 // Class for all 64bit vector registers
  869 reg_class vectord_reg(
  870     V0, V0_H,
  871     V1, V1_H,
  872     V2, V2_H,
  873     V3, V3_H,
  874     V4, V4_H,
  875     V5, V5_H,
  876     V6, V6_H,
  877     V7, V7_H,
  878     V8, V8_H,
  879     V9, V9_H,
  880     V10, V10_H,
  881     V11, V11_H,
  882     V12, V12_H,
  883     V13, V13_H,
  884     V14, V14_H,
  885     V15, V15_H,
  886     V16, V16_H,
  887     V17, V17_H,
  888     V18, V18_H,
  889     V19, V19_H,
  890     V20, V20_H,
  891     V21, V21_H,
  892     V22, V22_H,
  893     V23, V23_H,
  894     V24, V24_H,
  895     V25, V25_H,
  896     V26, V26_H,
  897     V27, V27_H,
  898     V28, V28_H,
  899     V29, V29_H,
  900     V30, V30_H,
  901     V31, V31_H
  902 );
  903 
  904 // Class for all 128bit vector registers
  905 reg_class vectorx_reg(
  906     V0, V0_H, V0_J, V0_K,
  907     V1, V1_H, V1_J, V1_K,
  908     V2, V2_H, V2_J, V2_K,
  909     V3, V3_H, V3_J, V3_K,
  910     V4, V4_H, V4_J, V4_K,
  911     V5, V5_H, V5_J, V5_K,
  912     V6, V6_H, V6_J, V6_K,
  913     V7, V7_H, V7_J, V7_K,
  914     V8, V8_H, V8_J, V8_K,
  915     V9, V9_H, V9_J, V9_K,
  916     V10, V10_H, V10_J, V10_K,
  917     V11, V11_H, V11_J, V11_K,
  918     V12, V12_H, V12_J, V12_K,
  919     V13, V13_H, V13_J, V13_K,
  920     V14, V14_H, V14_J, V14_K,
  921     V15, V15_H, V15_J, V15_K,
  922     V16, V16_H, V16_J, V16_K,
  923     V17, V17_H, V17_J, V17_K,
  924     V18, V18_H, V18_J, V18_K,
  925     V19, V19_H, V19_J, V19_K,
  926     V20, V20_H, V20_J, V20_K,
  927     V21, V21_H, V21_J, V21_K,
  928     V22, V22_H, V22_J, V22_K,
  929     V23, V23_H, V23_J, V23_K,
  930     V24, V24_H, V24_J, V24_K,
  931     V25, V25_H, V25_J, V25_K,
  932     V26, V26_H, V26_J, V26_K,
  933     V27, V27_H, V27_J, V27_K,
  934     V28, V28_H, V28_J, V28_K,
  935     V29, V29_H, V29_J, V29_K,
  936     V30, V30_H, V30_J, V30_K,
  937     V31, V31_H, V31_J, V31_K
  938 );
  939 
  940 // Class for 128 bit register v0
  941 reg_class v0_reg(
  942     V0, V0_H
  943 );
  944 
  945 // Class for 128 bit register v1
  946 reg_class v1_reg(
  947     V1, V1_H
  948 );
  949 
  950 // Class for 128 bit register v2
  951 reg_class v2_reg(
  952     V2, V2_H
  953 );
  954 
  955 // Class for 128 bit register v3
  956 reg_class v3_reg(
  957     V3, V3_H
  958 );
  959 
  960 // Singleton class for condition codes
  961 reg_class int_flags(RFLAGS);
  962 
  963 %}
  964 
  965 //----------DEFINITION BLOCK---------------------------------------------------
  966 // Define name --> value mappings to inform the ADLC of an integer valued name
  967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
  968 // Format:
  969 //        int_def  <name>         ( <int_value>, <expression>);
  970 // Generated Code in ad_<arch>.hpp
  971 //        #define  <name>   (<expression>)
  972 //        // value == <int_value>
  973 // Generated code in ad_<arch>.cpp adlc_verification()
  974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
  975 //
  976 
  977 // we follow the ppc-aix port in using a simple cost model which ranks
  978 // register operations as cheap, memory ops as more expensive and
  979 // branches as most expensive. the first two have a low as well as a
  980 // normal cost. huge cost appears to be a way of saying don't do
  981 // something
  982 
  983 definitions %{
  984   // The default cost (of a register move instruction).
  985   int_def INSN_COST            (    100,     100);
  986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
  987   int_def CALL_COST            (    200,     2 * INSN_COST);
  988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
  989 %}
  990 
  991 
  992 //----------SOURCE BLOCK-------------------------------------------------------
  993 // This is a block of C++ code which provides values, functions, and
  994 // definitions necessary in the rest of the architecture description
  995 
  996 source_hpp %{
  997 
  998 #include "gc/shared/cardTable.hpp"
  999 #include "gc/shared/cardTableModRefBS.hpp"
 1000 #include "opto/addnode.hpp"
 1001 
 1002 class CallStubImpl {
 1003 
 1004   //--------------------------------------------------------------
 1005   //---<  Used for optimization in Compile::shorten_branches  >---
 1006   //--------------------------------------------------------------
 1007 
 1008  public:
 1009   // Size of call trampoline stub.
 1010   static uint size_call_trampoline() {
 1011     return 0; // no call trampolines on this platform
 1012   }
 1013 
 1014   // number of relocations needed by a call trampoline stub
 1015   static uint reloc_call_trampoline() {
 1016     return 0; // no call trampolines on this platform
 1017   }
 1018 };
 1019 
 1020 class HandlerImpl {
 1021 
 1022  public:
 1023 
 1024   static int emit_exception_handler(CodeBuffer &cbuf);
 1025   static int emit_deopt_handler(CodeBuffer& cbuf);
 1026 
 1027   static uint size_exception_handler() {
 1028     return MacroAssembler::far_branch_size();
 1029   }
 1030 
 1031   static uint size_deopt_handler() {
 1032     // count one adr and one far branch instruction
 1033     return 4 * NativeInstruction::instruction_size;
 1034   }
 1035 };
 1036 
 1037   // graph traversal helpers
 1038 
 1039   MemBarNode *parent_membar(const Node *n);
 1040   MemBarNode *child_membar(const MemBarNode *n);
 1041   bool leading_membar(const MemBarNode *barrier);
 1042 
 1043   bool is_card_mark_membar(const MemBarNode *barrier);
 1044   bool is_CAS(int opcode);
 1045 
 1046   MemBarNode *leading_to_normal(MemBarNode *leading);
 1047   MemBarNode *normal_to_leading(const MemBarNode *barrier);
 1048   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier);
 1049   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing);
 1050   MemBarNode *trailing_to_leading(const MemBarNode *trailing);
 1051 
 1052   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
 1053 
 1054   bool unnecessary_acquire(const Node *barrier);
 1055   bool needs_acquiring_load(const Node *load);
 1056 
 1057   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
 1058 
 1059   bool unnecessary_release(const Node *barrier);
 1060   bool unnecessary_volatile(const Node *barrier);
 1061   bool needs_releasing_store(const Node *store);
 1062 
 1063   // predicate controlling translation of CompareAndSwapX
 1064   bool needs_acquiring_load_exclusive(const Node *load);
 1065 
 1066   // predicate controlling translation of StoreCM
 1067   bool unnecessary_storestore(const Node *storecm);
 1068 
 1069   // predicate controlling addressing modes
 1070   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
 1071 %}
 1072 
 1073 source %{
 1074 
 1075   // Optimizaton of volatile gets and puts
 1076   // -------------------------------------
 1077   //
 1078   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
 1079   // use to implement volatile reads and writes. For a volatile read
 1080   // we simply need
 1081   //
 1082   //   ldar<x>
 1083   //
 1084   // and for a volatile write we need
 1085   //
 1086   //   stlr<x>
 1087   //
 1088   // Alternatively, we can implement them by pairing a normal
 1089   // load/store with a memory barrier. For a volatile read we need
 1090   //
 1091   //   ldr<x>
 1092   //   dmb ishld
 1093   //
 1094   // for a volatile write
 1095   //
 1096   //   dmb ish
 1097   //   str<x>
 1098   //   dmb ish
 1099   //
 1100   // We can also use ldaxr and stlxr to implement compare and swap CAS
 1101   // sequences. These are normally translated to an instruction
 1102   // sequence like the following
 1103   //
 1104   //   dmb      ish
 1105   // retry:
 1106   //   ldxr<x>   rval raddr
 1107   //   cmp       rval rold
 1108   //   b.ne done
 1109   //   stlxr<x>  rval, rnew, rold
 1110   //   cbnz      rval retry
 1111   // done:
 1112   //   cset      r0, eq
 1113   //   dmb ishld
 1114   //
 1115   // Note that the exclusive store is already using an stlxr
 1116   // instruction. That is required to ensure visibility to other
 1117   // threads of the exclusive write (assuming it succeeds) before that
 1118   // of any subsequent writes.
 1119   //
 1120   // The following instruction sequence is an improvement on the above
 1121   //
 1122   // retry:
 1123   //   ldaxr<x>  rval raddr
 1124   //   cmp       rval rold
 1125   //   b.ne done
 1126   //   stlxr<x>  rval, rnew, rold
 1127   //   cbnz      rval retry
 1128   // done:
 1129   //   cset      r0, eq
 1130   //
 1131   // We don't need the leading dmb ish since the stlxr guarantees
 1132   // visibility of prior writes in the case that the swap is
 1133   // successful. Crucially we don't have to worry about the case where
 1134   // the swap is not successful since no valid program should be
 1135   // relying on visibility of prior changes by the attempting thread
 1136   // in the case where the CAS fails.
 1137   //
 1138   // Similarly, we don't need the trailing dmb ishld if we substitute
 1139   // an ldaxr instruction since that will provide all the guarantees we
 1140   // require regarding observation of changes made by other threads
 1141   // before any change to the CAS address observed by the load.
 1142   //
 1143   // In order to generate the desired instruction sequence we need to
 1144   // be able to identify specific 'signature' ideal graph node
 1145   // sequences which i) occur as a translation of a volatile reads or
 1146   // writes or CAS operations and ii) do not occur through any other
 1147   // translation or graph transformation. We can then provide
 1148   // alternative aldc matching rules which translate these node
 1149   // sequences to the desired machine code sequences. Selection of the
 1150   // alternative rules can be implemented by predicates which identify
 1151   // the relevant node sequences.
 1152   //
 1153   // The ideal graph generator translates a volatile read to the node
 1154   // sequence
 1155   //
 1156   //   LoadX[mo_acquire]
 1157   //   MemBarAcquire
 1158   //
 1159   // As a special case when using the compressed oops optimization we
 1160   // may also see this variant
 1161   //
 1162   //   LoadN[mo_acquire]
 1163   //   DecodeN
 1164   //   MemBarAcquire
 1165   //
 1166   // A volatile write is translated to the node sequence
 1167   //
 1168   //   MemBarRelease
 1169   //   StoreX[mo_release] {CardMark}-optional
 1170   //   MemBarVolatile
 1171   //
 1172   // n.b. the above node patterns are generated with a strict
 1173   // 'signature' configuration of input and output dependencies (see
 1174   // the predicates below for exact details). The card mark may be as
 1175   // simple as a few extra nodes or, in a few GC configurations, may
 1176   // include more complex control flow between the leading and
 1177   // trailing memory barriers. However, whatever the card mark
 1178   // configuration these signatures are unique to translated volatile
 1179   // reads/stores -- they will not appear as a result of any other
 1180   // bytecode translation or inlining nor as a consequence of
 1181   // optimizing transforms.
 1182   //
 1183   // We also want to catch inlined unsafe volatile gets and puts and
 1184   // be able to implement them using either ldar<x>/stlr<x> or some
 1185   // combination of ldr<x>/stlr<x> and dmb instructions.
 1186   //
 1187   // Inlined unsafe volatiles puts manifest as a minor variant of the
 1188   // normal volatile put node sequence containing an extra cpuorder
 1189   // membar
 1190   //
 1191   //   MemBarRelease
 1192   //   MemBarCPUOrder
 1193   //   StoreX[mo_release] {CardMark}-optional
 1194   //   MemBarVolatile
 1195   //
 1196   // n.b. as an aside, the cpuorder membar is not itself subject to
 1197   // matching and translation by adlc rules.  However, the rule
 1198   // predicates need to detect its presence in order to correctly
 1199   // select the desired adlc rules.
 1200   //
 1201   // Inlined unsafe volatile gets manifest as a somewhat different
 1202   // node sequence to a normal volatile get
 1203   //
 1204   //   MemBarCPUOrder
 1205   //        ||       \\
 1206   //   MemBarAcquire LoadX[mo_acquire]
 1207   //        ||
 1208   //   MemBarCPUOrder
 1209   //
 1210   // In this case the acquire membar does not directly depend on the
 1211   // load. However, we can be sure that the load is generated from an
 1212   // inlined unsafe volatile get if we see it dependent on this unique
 1213   // sequence of membar nodes. Similarly, given an acquire membar we
 1214   // can know that it was added because of an inlined unsafe volatile
 1215   // get if it is fed and feeds a cpuorder membar and if its feed
 1216   // membar also feeds an acquiring load.
 1217   //
 1218   // Finally an inlined (Unsafe) CAS operation is translated to the
 1219   // following ideal graph
 1220   //
 1221   //   MemBarRelease
 1222   //   MemBarCPUOrder
 1223   //   CompareAndSwapX {CardMark}-optional
 1224   //   MemBarCPUOrder
 1225   //   MemBarAcquire
 1226   //
 1227   // So, where we can identify these volatile read and write
 1228   // signatures we can choose to plant either of the above two code
 1229   // sequences. For a volatile read we can simply plant a normal
 1230   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
 1231   // also choose to inhibit translation of the MemBarAcquire and
 1232   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
 1233   //
 1234   // When we recognise a volatile store signature we can choose to
 1235   // plant at a dmb ish as a translation for the MemBarRelease, a
 1236   // normal str<x> and then a dmb ish for the MemBarVolatile.
 1237   // Alternatively, we can inhibit translation of the MemBarRelease
 1238   // and MemBarVolatile and instead plant a simple stlr<x>
 1239   // instruction.
 1240   //
 1241   // when we recognise a CAS signature we can choose to plant a dmb
 1242   // ish as a translation for the MemBarRelease, the conventional
 1243   // macro-instruction sequence for the CompareAndSwap node (which
 1244   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
 1245   // Alternatively, we can elide generation of the dmb instructions
 1246   // and plant the alternative CompareAndSwap macro-instruction
 1247   // sequence (which uses ldaxr<x>).
 1248   //
 1249   // Of course, the above only applies when we see these signature
 1250   // configurations. We still want to plant dmb instructions in any
 1251   // other cases where we may see a MemBarAcquire, MemBarRelease or
 1252   // MemBarVolatile. For example, at the end of a constructor which
 1253   // writes final/volatile fields we will see a MemBarRelease
 1254   // instruction and this needs a 'dmb ish' lest we risk the
 1255   // constructed object being visible without making the
 1256   // final/volatile field writes visible.
 1257   //
 1258   // n.b. the translation rules below which rely on detection of the
 1259   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
 1260   // If we see anything other than the signature configurations we
 1261   // always just translate the loads and stores to ldr<x> and str<x>
 1262   // and translate acquire, release and volatile membars to the
 1263   // relevant dmb instructions.
 1264   //
 1265 
 1266   // graph traversal helpers used for volatile put/get and CAS
 1267   // optimization
 1268 
 1269   // 1) general purpose helpers
 1270 
 1271   // if node n is linked to a parent MemBarNode by an intervening
 1272   // Control and Memory ProjNode return the MemBarNode otherwise return
 1273   // NULL.
 1274   //
 1275   // n may only be a Load or a MemBar.
 1276 
 1277   MemBarNode *parent_membar(const Node *n)
 1278   {
 1279     Node *ctl = NULL;
 1280     Node *mem = NULL;
 1281     Node *membar = NULL;
 1282 
 1283     if (n->is_Load()) {
 1284       ctl = n->lookup(LoadNode::Control);
 1285       mem = n->lookup(LoadNode::Memory);
 1286     } else if (n->is_MemBar()) {
 1287       ctl = n->lookup(TypeFunc::Control);
 1288       mem = n->lookup(TypeFunc::Memory);
 1289     } else {
 1290 	return NULL;
 1291     }
 1292 
 1293     if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj()) {
 1294       return NULL;
 1295     }
 1296 
 1297     membar = ctl->lookup(0);
 1298 
 1299     if (!membar || !membar->is_MemBar()) {
 1300       return NULL;
 1301     }
 1302 
 1303     if (mem->lookup(0) != membar) {
 1304       return NULL;
 1305     }
 1306 
 1307     return membar->as_MemBar();
 1308   }
 1309 
 1310   // if n is linked to a child MemBarNode by intervening Control and
 1311   // Memory ProjNodes return the MemBarNode otherwise return NULL.
 1312 
 1313   MemBarNode *child_membar(const MemBarNode *n)
 1314   {
 1315     ProjNode *ctl = n->proj_out(TypeFunc::Control);
 1316     ProjNode *mem = n->proj_out(TypeFunc::Memory);
 1317 
 1318     // MemBar needs to have both a Ctl and Mem projection
 1319     if (! ctl || ! mem)
 1320       return NULL;
 1321 
 1322     MemBarNode *child = NULL;
 1323     Node *x;
 1324 
 1325     for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
 1326       x = ctl->fast_out(i);
 1327       // if we see a membar we keep hold of it. we may also see a new
 1328       // arena copy of the original but it will appear later
 1329       if (x->is_MemBar()) {
 1330 	  child = x->as_MemBar();
 1331 	  break;
 1332       }
 1333     }
 1334 
 1335     if (child == NULL) {
 1336       return NULL;
 1337     }
 1338 
 1339     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
 1340       x = mem->fast_out(i);
 1341       // if we see a membar we keep hold of it. we may also see a new
 1342       // arena copy of the original but it will appear later
 1343       if (x == child) {
 1344 	return child;
 1345       }
 1346     }
 1347     return NULL;
 1348   }
 1349 
 1350   // helper predicate use to filter candidates for a leading memory
 1351   // barrier
 1352   //
 1353   // returns true if barrier is a MemBarRelease or a MemBarCPUOrder
 1354   // whose Ctl and Mem feeds come from a MemBarRelease otherwise false
 1355 
 1356   bool leading_membar(const MemBarNode *barrier)
 1357   {
 1358     int opcode = barrier->Opcode();
 1359     // if this is a release membar we are ok
 1360     if (opcode == Op_MemBarRelease) {
 1361       return true;
 1362     }
 1363     // if its a cpuorder membar . . .
 1364     if (opcode != Op_MemBarCPUOrder) {
 1365       return false;
 1366     }
 1367     // then the parent has to be a release membar
 1368     MemBarNode *parent = parent_membar(barrier);
 1369     if (!parent) {
 1370       return false;
 1371     }
 1372     opcode = parent->Opcode();
 1373     return opcode == Op_MemBarRelease;
 1374   }
 1375 
 1376   // 2) card mark detection helper
 1377 
 1378   // helper predicate which can be used to detect a volatile membar
 1379   // introduced as part of a conditional card mark sequence either by
 1380   // G1 or by CMS when UseCondCardMark is true.
 1381   //
 1382   // membar can be definitively determined to be part of a card mark
 1383   // sequence if and only if all the following hold
 1384   //
 1385   // i) it is a MemBarVolatile
 1386   //
 1387   // ii) either UseG1GC or (UseConcMarkSweepGC && UseCondCardMark) is
 1388   // true
 1389   //
 1390   // iii) the node's Mem projection feeds a StoreCM node.
 1391 
 1392   bool is_card_mark_membar(const MemBarNode *barrier)
 1393   {
 1394     if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark)) {
 1395       return false;
 1396     }
 1397 
 1398     if (barrier->Opcode() != Op_MemBarVolatile) {
 1399       return false;
 1400     }
 1401 
 1402     ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
 1403 
 1404     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax ; i++) {
 1405       Node *y = mem->fast_out(i);
 1406       if (y->Opcode() == Op_StoreCM) {
 1407 	return true;
 1408       }
 1409     }
 1410 
 1411     return false;
 1412   }
 1413 
 1414 
 1415   // 3) helper predicates to traverse volatile put or CAS graphs which
 1416   // may contain GC barrier subgraphs
 1417 
 1418   // Preamble
 1419   // --------
 1420   //
 1421   // for volatile writes we can omit generating barriers and employ a
 1422   // releasing store when we see a node sequence sequence with a
 1423   // leading MemBarRelease and a trailing MemBarVolatile as follows
 1424   //
 1425   //   MemBarRelease
 1426   //  {      ||      } -- optional
 1427   //  {MemBarCPUOrder}
 1428   //         ||     \\
 1429   //         ||     StoreX[mo_release]
 1430   //         | \     /
 1431   //         | MergeMem
 1432   //         | /
 1433   //   MemBarVolatile
 1434   //
 1435   // where
 1436   //  || and \\ represent Ctl and Mem feeds via Proj nodes
 1437   //  | \ and / indicate further routing of the Ctl and Mem feeds
 1438   //
 1439   // this is the graph we see for non-object stores. however, for a
 1440   // volatile Object store (StoreN/P) we may see other nodes below the
 1441   // leading membar because of the need for a GC pre- or post-write
 1442   // barrier.
 1443   //
 1444   // with most GC configurations we with see this simple variant which
 1445   // includes a post-write barrier card mark.
 1446   //
 1447   //   MemBarRelease______________________________
 1448   //         ||    \\               Ctl \        \\
 1449   //         ||    StoreN/P[mo_release] CastP2X  StoreB/CM
 1450   //         | \     /                       . . .  /
 1451   //         | MergeMem
 1452   //         | /
 1453   //         ||      /
 1454   //   MemBarVolatile
 1455   //
 1456   // i.e. the leading membar feeds Ctl to a CastP2X (which converts
 1457   // the object address to an int used to compute the card offset) and
 1458   // Ctl+Mem to a StoreB node (which does the actual card mark).
 1459   //
 1460   // n.b. a StoreCM node will only appear in this configuration when
 1461   // using CMS. StoreCM differs from a normal card mark write (StoreB)
 1462   // because it implies a requirement to order visibility of the card
 1463   // mark (StoreCM) relative to the object put (StoreP/N) using a
 1464   // StoreStore memory barrier (arguably this ought to be represented
 1465   // explicitly in the ideal graph but that is not how it works). This
 1466   // ordering is required for both non-volatile and volatile
 1467   // puts. Normally that means we need to translate a StoreCM using
 1468   // the sequence
 1469   //
 1470   //   dmb ishst
 1471   //   stlrb
 1472   //
 1473   // However, in the case of a volatile put if we can recognise this
 1474   // configuration and plant an stlr for the object write then we can
 1475   // omit the dmb and just plant an strb since visibility of the stlr
 1476   // is ordered before visibility of subsequent stores. StoreCM nodes
 1477   // also arise when using G1 or using CMS with conditional card
 1478   // marking. In these cases (as we shall see) we don't need to insert
 1479   // the dmb when translating StoreCM because there is already an
 1480   // intervening StoreLoad barrier between it and the StoreP/N.
 1481   //
 1482   // It is also possible to perform the card mark conditionally on it
 1483   // currently being unmarked in which case the volatile put graph
 1484   // will look slightly different
 1485   //
 1486   //   MemBarRelease____________________________________________
 1487   //         ||    \\               Ctl \     Ctl \     \\  Mem \
 1488   //         ||    StoreN/P[mo_release] CastP2X   If   LoadB     |
 1489   //         | \     /                              \            |
 1490   //         | MergeMem                            . . .      StoreB
 1491   //         | /                                                /
 1492   //         ||     /
 1493   //   MemBarVolatile
 1494   //
 1495   // It is worth noting at this stage that both the above
 1496   // configurations can be uniquely identified by checking that the
 1497   // memory flow includes the following subgraph:
 1498   //
 1499   //   MemBarRelease
 1500   //  {MemBarCPUOrder}
 1501   //          |  \      . . .
 1502   //          |  StoreX[mo_release]  . . .
 1503   //          |   /
 1504   //         MergeMem
 1505   //          |
 1506   //   MemBarVolatile
 1507   //
 1508   // This is referred to as a *normal* subgraph. It can easily be
 1509   // detected starting from any candidate MemBarRelease,
 1510   // StoreX[mo_release] or MemBarVolatile.
 1511   //
 1512   // A simple variation on this normal case occurs for an unsafe CAS
 1513   // operation. The basic graph for a non-object CAS is
 1514   //
 1515   //   MemBarRelease
 1516   //         ||
 1517   //   MemBarCPUOrder
 1518   //         ||     \\   . . .
 1519   //         ||     CompareAndSwapX
 1520   //         ||       |
 1521   //         ||     SCMemProj
 1522   //         | \     /
 1523   //         | MergeMem
 1524   //         | /
 1525   //   MemBarCPUOrder
 1526   //         ||
 1527   //   MemBarAcquire
 1528   //
 1529   // The same basic variations on this arrangement (mutatis mutandis)
 1530   // occur when a card mark is introduced. i.e. we se the same basic
 1531   // shape but the StoreP/N is replaced with CompareAndSawpP/N and the
 1532   // tail of the graph is a pair comprising a MemBarCPUOrder +
 1533   // MemBarAcquire.
 1534   //
 1535   // So, in the case of a CAS the normal graph has the variant form
 1536   //
 1537   //   MemBarRelease
 1538   //   MemBarCPUOrder
 1539   //          |   \      . . .
 1540   //          |  CompareAndSwapX  . . .
 1541   //          |    |
 1542   //          |   SCMemProj
 1543   //          |   /  . . .
 1544   //         MergeMem
 1545   //          |
 1546   //   MemBarCPUOrder
 1547   //   MemBarAcquire
 1548   //
 1549   // This graph can also easily be detected starting from any
 1550   // candidate MemBarRelease, CompareAndSwapX or MemBarAcquire.
 1551   //
 1552   // the code below uses two helper predicates, leading_to_normal and
 1553   // normal_to_leading to identify these normal graphs, one validating
 1554   // the layout starting from the top membar and searching down and
 1555   // the other validating the layout starting from the lower membar
 1556   // and searching up.
 1557   //
 1558   // There are two special case GC configurations when a normal graph
 1559   // may not be generated: when using G1 (which always employs a
 1560   // conditional card mark); and when using CMS with conditional card
 1561   // marking configured. These GCs are both concurrent rather than
 1562   // stop-the world GCs. So they introduce extra Ctl+Mem flow into the
 1563   // graph between the leading and trailing membar nodes, in
 1564   // particular enforcing stronger memory serialisation beween the
 1565   // object put and the corresponding conditional card mark. CMS
 1566   // employs a post-write GC barrier while G1 employs both a pre- and
 1567   // post-write GC barrier. Of course the extra nodes may be absent --
 1568   // they are only inserted for object puts. This significantly
 1569   // complicates the task of identifying whether a MemBarRelease,
 1570   // StoreX[mo_release] or MemBarVolatile forms part of a volatile put
 1571   // when using these GC configurations (see below). It adds similar
 1572   // complexity to the task of identifying whether a MemBarRelease,
 1573   // CompareAndSwapX or MemBarAcquire forms part of a CAS.
 1574   //
 1575   // In both cases the post-write subtree includes an auxiliary
 1576   // MemBarVolatile (StoreLoad barrier) separating the object put and
 1577   // the read of the corresponding card. This poses two additional
 1578   // problems.
 1579   //
 1580   // Firstly, a card mark MemBarVolatile needs to be distinguished
 1581   // from a normal trailing MemBarVolatile. Resolving this first
 1582   // problem is straightforward: a card mark MemBarVolatile always
 1583   // projects a Mem feed to a StoreCM node and that is a unique marker
 1584   //
 1585   //      MemBarVolatile (card mark)
 1586   //       C |    \     . . .
 1587   //         |   StoreCM   . . .
 1588   //       . . .
 1589   //
 1590   // The second problem is how the code generator is to translate the
 1591   // card mark barrier? It always needs to be translated to a "dmb
 1592   // ish" instruction whether or not it occurs as part of a volatile
 1593   // put. A StoreLoad barrier is needed after the object put to ensure
 1594   // i) visibility to GC threads of the object put and ii) visibility
 1595   // to the mutator thread of any card clearing write by a GC
 1596   // thread. Clearly a normal store (str) will not guarantee this
 1597   // ordering but neither will a releasing store (stlr). The latter
 1598   // guarantees that the object put is visible but does not guarantee
 1599   // that writes by other threads have also been observed.
 1600   //
 1601   // So, returning to the task of translating the object put and the
 1602   // leading/trailing membar nodes: what do the non-normal node graph
 1603   // look like for these 2 special cases? and how can we determine the
 1604   // status of a MemBarRelease, StoreX[mo_release] or MemBarVolatile
 1605   // in both normal and non-normal cases?
 1606   //
 1607   // A CMS GC post-barrier wraps its card write (StoreCM) inside an If
 1608   // which selects conditonal execution based on the value loaded
 1609   // (LoadB) from the card. Ctl and Mem are fed to the If via an
 1610   // intervening StoreLoad barrier (MemBarVolatile).
 1611   //
 1612   // So, with CMS we may see a node graph for a volatile object store
 1613   // which looks like this
 1614   //
 1615   //   MemBarRelease
 1616   //   MemBarCPUOrder_(leading)__________________
 1617   //     C |    M \       \\                   C \
 1618   //       |       \    StoreN/P[mo_release]  CastP2X
 1619   //       |    Bot \    /
 1620   //       |       MergeMem
 1621   //       |         /
 1622   //      MemBarVolatile (card mark)
 1623   //     C |  ||    M |
 1624   //       | LoadB    |
 1625   //       |   |      |
 1626   //       | Cmp      |\
 1627   //       | /        | \
 1628   //       If         |  \
 1629   //       | \        |   \
 1630   // IfFalse  IfTrue  |    \
 1631   //       \     / \  |     \
 1632   //        \   / StoreCM    |
 1633   //         \ /      |      |
 1634   //        Region   . . .   |
 1635   //          | \           /
 1636   //          |  . . .  \  / Bot
 1637   //          |       MergeMem
 1638   //          |          |
 1639   //        MemBarVolatile (trailing)
 1640   //
 1641   // The first MergeMem merges the AliasIdxBot Mem slice from the
 1642   // leading membar and the oopptr Mem slice from the Store into the
 1643   // card mark membar. The trailing MergeMem merges the AliasIdxBot
 1644   // Mem slice from the card mark membar and the AliasIdxRaw slice
 1645   // from the StoreCM into the trailing membar (n.b. the latter
 1646   // proceeds via a Phi associated with the If region).
 1647   //
 1648   // The graph for a CAS varies slightly, the obvious difference being
 1649   // that the StoreN/P node is replaced by a CompareAndSwapP/N node
 1650   // and the trailing MemBarVolatile by a MemBarCPUOrder +
 1651   // MemBarAcquire pair. The other important difference is that the
 1652   // CompareAndSwap node's SCMemProj is not merged into the card mark
 1653   // membar - it still feeds the trailing MergeMem. This also means
 1654   // that the card mark membar receives its Mem feed directly from the
 1655   // leading membar rather than via a MergeMem.
 1656   //
 1657   //   MemBarRelease
 1658   //   MemBarCPUOrder__(leading)_________________________
 1659   //       ||                       \\                 C \
 1660   //   MemBarVolatile (card mark)  CompareAndSwapN/P  CastP2X
 1661   //     C |  ||    M |              |
 1662   //       | LoadB    |       ______/|
 1663   //       |   |      |      /       |
 1664   //       | Cmp      |     /      SCMemProj
 1665   //       | /        |    /         |
 1666   //       If         |   /         /
 1667   //       | \        |  /         /
 1668   // IfFalse  IfTrue  | /         /
 1669   //       \     / \  |/ prec    /
 1670   //        \   / StoreCM       /
 1671   //         \ /      |        /
 1672   //        Region   . . .    /
 1673   //          | \            /
 1674   //          |  . . .  \   / Bot
 1675   //          |       MergeMem
 1676   //          |          |
 1677   //        MemBarCPUOrder
 1678   //        MemBarAcquire (trailing)
 1679   //
 1680   // This has a slightly different memory subgraph to the one seen
 1681   // previously but the core of it is the same as for the CAS normal
 1682   // sungraph
 1683   //
 1684   //   MemBarRelease
 1685   //   MemBarCPUOrder____
 1686   //      ||             \      . . .
 1687   //   MemBarVolatile  CompareAndSwapX  . . .
 1688   //      |  \            |
 1689   //        . . .   SCMemProj
 1690   //          |     /  . . .
 1691   //         MergeMem
 1692   //          |
 1693   //   MemBarCPUOrder
 1694   //   MemBarAcquire
 1695   //
 1696   //
 1697   // G1 is quite a lot more complicated. The nodes inserted on behalf
 1698   // of G1 may comprise: a pre-write graph which adds the old value to
 1699   // the SATB queue; the releasing store itself; and, finally, a
 1700   // post-write graph which performs a card mark.
 1701   //
 1702   // The pre-write graph may be omitted, but only when the put is
 1703   // writing to a newly allocated (young gen) object and then only if
 1704   // there is a direct memory chain to the Initialize node for the
 1705   // object allocation. This will not happen for a volatile put since
 1706   // any memory chain passes through the leading membar.
 1707   //
 1708   // The pre-write graph includes a series of 3 If tests. The outermost
 1709   // If tests whether SATB is enabled (no else case). The next If tests
 1710   // whether the old value is non-NULL (no else case). The third tests
 1711   // whether the SATB queue index is > 0, if so updating the queue. The
 1712   // else case for this third If calls out to the runtime to allocate a
 1713   // new queue buffer.
 1714   //
 1715   // So with G1 the pre-write and releasing store subgraph looks like
 1716   // this (the nested Ifs are omitted).
 1717   //
 1718   //  MemBarRelease (leading)____________
 1719   //     C |  ||  M \   M \    M \  M \ . . .
 1720   //       | LoadB   \  LoadL  LoadN   \
 1721   //       | /        \                 \
 1722   //       If         |\                 \
 1723   //       | \        | \                 \
 1724   //  IfFalse  IfTrue |  \                 \
 1725   //       |     |    |   \                 |
 1726   //       |     If   |   /\                |
 1727   //       |     |          \               |
 1728   //       |                 \              |
 1729   //       |    . . .         \             |
 1730   //       | /       | /       |            |
 1731   //      Region  Phi[M]       |            |
 1732   //       | \       |         |            |
 1733   //       |  \_____ | ___     |            |
 1734   //     C | C \     |   C \ M |            |
 1735   //       | CastP2X | StoreN/P[mo_release] |
 1736   //       |         |         |            |
 1737   //     C |       M |       M |          M |
 1738   //        \        |         |           /
 1739   //                  . . .
 1740   //          (post write subtree elided)
 1741   //                    . . .
 1742   //             C \         M /
 1743   //         MemBarVolatile (trailing)
 1744   //
 1745   // n.b. the LoadB in this subgraph is not the card read -- it's a
 1746   // read of the SATB queue active flag.
 1747   //
 1748   // Once again the CAS graph is a minor variant on the above with the
 1749   // expected substitutions of CompareAndSawpX for StoreN/P and
 1750   // MemBarCPUOrder + MemBarAcquire for trailing MemBarVolatile.
 1751   //
 1752   // The G1 post-write subtree is also optional, this time when the
 1753   // new value being written is either null or can be identified as a
 1754   // newly allocated (young gen) object with no intervening control
 1755   // flow. The latter cannot happen but the former may, in which case
 1756   // the card mark membar is omitted and the memory feeds form the
 1757   // leading membar and the SToreN/P are merged direct into the
 1758   // trailing membar as per the normal subgraph. So, the only special
 1759   // case which arises is when the post-write subgraph is generated.
 1760   //
 1761   // The kernel of the post-write G1 subgraph is the card mark itself
 1762   // which includes a card mark memory barrier (MemBarVolatile), a
 1763   // card test (LoadB), and a conditional update (If feeding a
 1764   // StoreCM). These nodes are surrounded by a series of nested Ifs
 1765   // which try to avoid doing the card mark. The top level If skips if
 1766   // the object reference does not cross regions (i.e. it tests if
 1767   // (adr ^ val) >> log2(regsize) != 0) -- intra-region references
 1768   // need not be recorded. The next If, which skips on a NULL value,
 1769   // may be absent (it is not generated if the type of value is >=
 1770   // OopPtr::NotNull). The 3rd If skips writes to young regions (by
 1771   // checking if card_val != young).  n.b. although this test requires
 1772   // a pre-read of the card it can safely be done before the StoreLoad
 1773   // barrier. However that does not bypass the need to reread the card
 1774   // after the barrier.
 1775   //
 1776   //                (pre-write subtree elided)
 1777   //        . . .                  . . .    . . .  . . .
 1778   //        C |                    M |     M |    M |
 1779   //       Region                  Phi[M] StoreN    |
 1780   //          |                     / \      |      |
 1781   //         / \_______            /   \     |      |
 1782   //      C / C \      . . .            \    |      |
 1783   //       If   CastP2X . . .            |   |      |
 1784   //       / \                           |   |      |
 1785   //      /   \                          |   |      |
 1786   // IfFalse IfTrue                      |   |      |
 1787   //   |       |                         |   |     /|
 1788   //   |       If                        |   |    / |
 1789   //   |      / \                        |   |   /  |
 1790   //   |     /   \                        \  |  /   |
 1791   //   | IfFalse IfTrue                   MergeMem  |
 1792   //   |  . . .    / \                       /      |
 1793   //   |          /   \                     /       |
 1794   //   |     IfFalse IfTrue                /        |
 1795   //   |      . . .    |                  /         |
 1796   //   |               If                /          |
 1797   //   |               / \              /           |
 1798   //   |              /   \            /            |
 1799   //   |         IfFalse IfTrue       /             |
 1800   //   |           . . .   |         /              |
 1801   //   |                    \       /               |
 1802   //   |                     \     /                |
 1803   //   |             MemBarVolatile__(card mark)    |
 1804   //   |                ||   C |  M \  M \          |
 1805   //   |               LoadB   If    |    |         |
 1806   //   |                      / \    |    |         |
 1807   //   |                     . . .   |    |         |
 1808   //   |                          \  |    |        /
 1809   //   |                        StoreCM   |       /
 1810   //   |                          . . .   |      /
 1811   //   |                        _________/      /
 1812   //   |                       /  _____________/
 1813   //   |   . . .       . . .  |  /            /
 1814   //   |    |                 | /   _________/
 1815   //   |    |               Phi[M] /        /
 1816   //   |    |                 |   /        /
 1817   //   |    |                 |  /        /
 1818   //   |  Region  . . .     Phi[M]  _____/
 1819   //   |    /                 |    /
 1820   //   |                      |   /
 1821   //   | . . .   . . .        |  /
 1822   //   | /                    | /
 1823   // Region           |  |  Phi[M]
 1824   //   |              |  |  / Bot
 1825   //    \            MergeMem
 1826   //     \            /
 1827   //     MemBarVolatile
 1828   //
 1829   // As with CMS the initial MergeMem merges the AliasIdxBot Mem slice
 1830   // from the leading membar and the oopptr Mem slice from the Store
 1831   // into the card mark membar i.e. the memory flow to the card mark
 1832   // membar still looks like a normal graph.
 1833   //
 1834   // The trailing MergeMem merges an AliasIdxBot Mem slice with other
 1835   // Mem slices (from the StoreCM and other card mark queue stores).
 1836   // However in this case the AliasIdxBot Mem slice does not come
 1837   // direct from the card mark membar. It is merged through a series
 1838   // of Phi nodes. These are needed to merge the AliasIdxBot Mem flow
 1839   // from the leading membar with the Mem feed from the card mark
 1840   // membar. Each Phi corresponds to one of the Ifs which may skip
 1841   // around the card mark membar. So when the If implementing the NULL
 1842   // value check has been elided the total number of Phis is 2
 1843   // otherwise it is 3.
 1844   //
 1845   // The CAS graph when using G1GC also includes a pre-write subgraph
 1846   // and an optional post-write subgraph. Teh sam evarioations are
 1847   // introduced as for CMS with conditional card marking i.e. the
 1848   // StoreP/N is swapped for a CompareAndSwapP/N, the tariling
 1849   // MemBarVolatile for a MemBarCPUOrder + MemBarAcquire pair and the
 1850   // Mem feed from the CompareAndSwapP/N includes a precedence
 1851   // dependency feed to the StoreCM and a feed via an SCMemProj to the
 1852   // trailing membar. So, as before the configuration includes the
 1853   // normal CAS graph as a subgraph of the memory flow.
 1854   //
 1855   // So, the upshot is that in all cases the volatile put graph will
 1856   // include a *normal* memory subgraph betwen the leading membar and
 1857   // its child membar, either a volatile put graph (including a
 1858   // releasing StoreX) or a CAS graph (including a CompareAndSwapX).
 1859   // When that child is not a card mark membar then it marks the end
 1860   // of the volatile put or CAS subgraph. If the child is a card mark
 1861   // membar then the normal subgraph will form part of a volatile put
 1862   // subgraph if and only if the child feeds an AliasIdxBot Mem feed
 1863   // to a trailing barrier via a MergeMem. That feed is either direct
 1864   // (for CMS) or via 2 or 3 Phi nodes merging the leading barrier
 1865   // memory flow (for G1).
 1866   //
 1867   // The predicates controlling generation of instructions for store
 1868   // and barrier nodes employ a few simple helper functions (described
 1869   // below) which identify the presence or absence of all these
 1870   // subgraph configurations and provide a means of traversing from
 1871   // one node in the subgraph to another.
 1872 
 1873   // is_CAS(int opcode)
 1874   //
 1875   // return true if opcode is one of the possible CompareAndSwapX
 1876   // values otherwise false.
 1877 
 1878   bool is_CAS(int opcode)
 1879   {
 1880     switch(opcode) {
 1881       // We handle these
 1882     case Op_CompareAndSwapI:
 1883     case Op_CompareAndSwapL:
 1884     case Op_CompareAndSwapP:
 1885     case Op_CompareAndSwapN:
 1886  // case Op_CompareAndSwapB:
 1887  // case Op_CompareAndSwapS:
 1888       return true;
 1889       // These are TBD
 1890     case Op_WeakCompareAndSwapB:
 1891     case Op_WeakCompareAndSwapS:
 1892     case Op_WeakCompareAndSwapI:
 1893     case Op_WeakCompareAndSwapL:
 1894     case Op_WeakCompareAndSwapP:
 1895     case Op_WeakCompareAndSwapN:
 1896     case Op_CompareAndExchangeB:
 1897     case Op_CompareAndExchangeS:
 1898     case Op_CompareAndExchangeI:
 1899     case Op_CompareAndExchangeL:
 1900     case Op_CompareAndExchangeP:
 1901     case Op_CompareAndExchangeN:
 1902       return false;
 1903     default:
 1904       return false;
 1905     }
 1906   }
 1907 
 1908 
 1909   // leading_to_normal
 1910   //
 1911   //graph traversal helper which detects the normal case Mem feed from
 1912   // a release membar (or, optionally, its cpuorder child) to a
 1913   // dependent volatile membar i.e. it ensures that one or other of
 1914   // the following Mem flow subgraph is present.
 1915   //
 1916   //   MemBarRelease
 1917   //   MemBarCPUOrder {leading}
 1918   //          |  \      . . .
 1919   //          |  StoreN/P[mo_release]  . . .
 1920   //          |   /
 1921   //         MergeMem
 1922   //          |
 1923   //   MemBarVolatile {trailing or card mark}
 1924   //
 1925   //   MemBarRelease
 1926   //   MemBarCPUOrder {leading}
 1927   //      |       \      . . .
 1928   //      |     CompareAndSwapX  . . .
 1929   //               |
 1930   //     . . .    SCMemProj
 1931   //           \   |
 1932   //      |    MergeMem
 1933   //      |       /
 1934   //    MemBarCPUOrder
 1935   //    MemBarAcquire {trailing}
 1936   //
 1937   // if the correct configuration is present returns the trailing
 1938   // membar otherwise NULL.
 1939   //
 1940   // the input membar is expected to be either a cpuorder membar or a
 1941   // release membar. in the latter case it should not have a cpu membar
 1942   // child.
 1943   //
 1944   // the returned value may be a card mark or trailing membar
 1945   //
 1946 
 1947   MemBarNode *leading_to_normal(MemBarNode *leading)
 1948   {
 1949     assert((leading->Opcode() == Op_MemBarRelease ||
 1950 	    leading->Opcode() == Op_MemBarCPUOrder),
 1951 	   "expecting a volatile or cpuroder membar!");
 1952 
 1953     // check the mem flow
 1954     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
 1955 
 1956     if (!mem) {
 1957       return NULL;
 1958     }
 1959 
 1960     Node *x = NULL;
 1961     StoreNode * st = NULL;
 1962     LoadStoreNode *cas = NULL;
 1963     MergeMemNode *mm = NULL;
 1964 
 1965     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
 1966       x = mem->fast_out(i);
 1967       if (x->is_MergeMem()) {
 1968 	if (mm != NULL) {
 1969 	  return NULL;
 1970 	}
 1971 	// two merge mems is one too many
 1972 	mm = x->as_MergeMem();
 1973       } else if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
 1974 	// two releasing stores/CAS nodes is one too many
 1975 	if (st != NULL || cas != NULL) {
 1976 	  return NULL;
 1977 	}
 1978 	st = x->as_Store();
 1979       } else if (is_CAS(x->Opcode())) {
 1980 	if (st != NULL || cas != NULL) {
 1981 	  return NULL;
 1982 	}
 1983 	cas = x->as_LoadStore();
 1984       }
 1985     }
 1986 
 1987     // must have a store or a cas
 1988     if (!st && !cas) {
 1989       return NULL;
 1990     }
 1991 
 1992     // must have a merge if we also have st
 1993     if (st && !mm) {
 1994       return NULL;
 1995     }
 1996 
 1997     Node *y = NULL;
 1998     if (cas) {
 1999       // look for an SCMemProj
 2000       for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) {
 2001 	x = cas->fast_out(i);
 2002 	if (x->is_Proj()) {
 2003 	  y = x;
 2004 	  break;
 2005 	}
 2006       }
 2007       if (y == NULL) {
 2008 	return NULL;
 2009       }
 2010       // the proj must feed a MergeMem
 2011       for (DUIterator_Fast imax, i = y->fast_outs(imax); i < imax; i++) {
 2012 	x = y->fast_out(i);
 2013 	if (x->is_MergeMem()) {
 2014 	  mm = x->as_MergeMem();
 2015 	  break;
 2016 	}
 2017       }
 2018       if (mm == NULL)
 2019 	return NULL;
 2020     } else {
 2021       // ensure the store feeds the existing mergemem;
 2022       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
 2023 	if (st->fast_out(i) == mm) {
 2024 	  y = st;
 2025 	  break;
 2026 	}
 2027       }
 2028       if (y == NULL) {
 2029 	return NULL;
 2030       }
 2031     }
 2032 
 2033     MemBarNode *mbar = NULL;
 2034     // ensure the merge feeds to the expected type of membar
 2035     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
 2036       x = mm->fast_out(i);
 2037       if (x->is_MemBar()) {
 2038 	int opcode = x->Opcode();
 2039 	if (opcode == Op_MemBarVolatile && st) {
 2040 	  mbar = x->as_MemBar();
 2041 	} else if (cas && opcode == Op_MemBarCPUOrder) {
 2042 	  MemBarNode *y =  x->as_MemBar();
 2043 	  y = child_membar(y);
 2044 	  if (y != NULL && y->Opcode() == Op_MemBarAcquire) {
 2045 	    mbar = y;
 2046 	  }
 2047 	}
 2048 	break;
 2049       }
 2050     }
 2051 
 2052     return mbar;
 2053   }
 2054 
 2055   // normal_to_leading
 2056   //
 2057   // graph traversal helper which detects the normal case Mem feed
 2058   // from either a card mark or a trailing membar to a preceding
 2059   // release membar (optionally its cpuorder child) i.e. it ensures
 2060   // that one or other of the following Mem flow subgraphs is present.
 2061   //
 2062   //   MemBarRelease
 2063   //   MemBarCPUOrder {leading}
 2064   //          |  \      . . .
 2065   //          |  StoreN/P[mo_release]  . . .
 2066   //          |   /
 2067   //         MergeMem
 2068   //          |
 2069   //   MemBarVolatile {card mark or trailing}
 2070   //
 2071   //   MemBarRelease
 2072   //   MemBarCPUOrder {leading}
 2073   //      |       \      . . .
 2074   //      |     CompareAndSwapX  . . .
 2075   //               |
 2076   //     . . .    SCMemProj
 2077   //           \   |
 2078   //      |    MergeMem
 2079   //      |        /
 2080   //    MemBarCPUOrder
 2081   //    MemBarAcquire {trailing}
 2082   //
 2083   // this predicate checks for the same flow as the previous predicate
 2084   // but starting from the bottom rather than the top.
 2085   //
 2086   // if the configuration is present returns the cpuorder member for
 2087   // preference or when absent the release membar otherwise NULL.
 2088   //
 2089   // n.b. the input membar is expected to be a MemBarVolatile but
 2090   // need not be a card mark membar.
 2091 
 2092   MemBarNode *normal_to_leading(const MemBarNode *barrier)
 2093   {
 2094     // input must be a volatile membar
 2095     assert((barrier->Opcode() == Op_MemBarVolatile ||
 2096 	    barrier->Opcode() == Op_MemBarAcquire),
 2097 	   "expecting a volatile or an acquire membar");
 2098     Node *x;
 2099     bool is_cas = barrier->Opcode() == Op_MemBarAcquire;
 2100 
 2101     // if we have an acquire membar then it must be fed via a CPUOrder
 2102     // membar
 2103 
 2104     if (is_cas) {
 2105       // skip to parent barrier which must be a cpuorder
 2106       x = parent_membar(barrier);
 2107       if (x->Opcode() != Op_MemBarCPUOrder)
 2108 	return NULL;
 2109     } else {
 2110       // start from the supplied barrier
 2111       x = (Node *)barrier;
 2112     }
 2113 
 2114     // the Mem feed to the membar should be a merge
 2115     x = x ->in(TypeFunc::Memory);
 2116     if (!x->is_MergeMem())
 2117       return NULL;
 2118 
 2119     MergeMemNode *mm = x->as_MergeMem();
 2120 
 2121     if (is_cas) {
 2122       // the merge should be fed from the CAS via an SCMemProj node
 2123       x = NULL;
 2124       for (uint idx = 1; idx < mm->req(); idx++) {
 2125 	if (mm->in(idx)->Opcode() == Op_SCMemProj) {
 2126 	  x = mm->in(idx);
 2127 	  break;
 2128 	}
 2129       }
 2130       if (x == NULL) {
 2131 	return NULL;
 2132       }
 2133       // check for a CAS feeding this proj
 2134       x = x->in(0);
 2135       int opcode = x->Opcode();
 2136       if (!is_CAS(opcode)) {
 2137 	return NULL;
 2138       }
 2139       // the CAS should get its mem feed from the leading membar
 2140       x = x->in(MemNode::Memory);
 2141     } else {
 2142       // the merge should get its Bottom mem feed from the leading membar
 2143       x = mm->in(Compile::AliasIdxBot);
 2144     }
 2145 
 2146     // ensure this is a non control projection
 2147     if (!x->is_Proj() || x->is_CFG()) {
 2148       return NULL;
 2149     }
 2150     // if it is fed by a membar that's the one we want
 2151     x = x->in(0);
 2152 
 2153     if (!x->is_MemBar()) {
 2154       return NULL;
 2155     }
 2156 
 2157     MemBarNode *leading = x->as_MemBar();
 2158     // reject invalid candidates
 2159     if (!leading_membar(leading)) {
 2160       return NULL;
 2161     }
 2162 
 2163     // ok, we have a leading membar, now for the sanity clauses
 2164 
 2165     // the leading membar must feed Mem to a releasing store or CAS
 2166     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
 2167     StoreNode *st = NULL;
 2168     LoadStoreNode *cas = NULL;
 2169     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
 2170       x = mem->fast_out(i);
 2171       if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
 2172 	// two stores or CASes is one too many
 2173 	if (st != NULL || cas != NULL) {
 2174 	  return NULL;
 2175 	}
 2176 	st = x->as_Store();
 2177       } else if (is_CAS(x->Opcode())) {
 2178 	if (st != NULL || cas != NULL) {
 2179 	  return NULL;
 2180 	}
 2181 	cas = x->as_LoadStore();
 2182       }
 2183     }
 2184 
 2185     // we should not have both a store and a cas
 2186     if (st == NULL & cas == NULL) {
 2187       return NULL;
 2188     }
 2189 
 2190     if (st == NULL) {
 2191       // nothing more to check
 2192       return leading;
 2193     } else {
 2194       // we should not have a store if we started from an acquire
 2195       if (is_cas) {
 2196 	return NULL;
 2197       }
 2198 
 2199       // the store should feed the merge we used to get here
 2200       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
 2201 	if (st->fast_out(i) == mm) {
 2202 	  return leading;
 2203 	}
 2204       }
 2205     }
 2206 
 2207     return NULL;
 2208   }
 2209 
 2210   // card_mark_to_trailing
 2211   //
 2212   // graph traversal helper which detects extra, non-normal Mem feed
 2213   // from a card mark volatile membar to a trailing membar i.e. it
 2214   // ensures that one of the following three GC post-write Mem flow
 2215   // subgraphs is present.
 2216   //
 2217   // 1)
 2218   //     . . .
 2219   //       |
 2220   //   MemBarVolatile (card mark)
 2221   //      |          |
 2222   //      |        StoreCM
 2223   //      |          |
 2224   //      |        . . .
 2225   //  Bot |  /
 2226   //   MergeMem
 2227   //      |
 2228   //      |
 2229   //    MemBarVolatile {trailing}
 2230   //
 2231   // 2)
 2232   //   MemBarRelease/CPUOrder (leading)
 2233   //    |
 2234   //    |
 2235   //    |\       . . .
 2236   //    | \        |
 2237   //    |  \  MemBarVolatile (card mark)
 2238   //    |   \   |     |
 2239   //     \   \  |   StoreCM    . . .
 2240   //      \   \ |
 2241   //       \  Phi
 2242   //        \ /
 2243   //        Phi  . . .
 2244   //     Bot |   /
 2245   //       MergeMem
 2246   //         |
 2247   //    MemBarVolatile {trailing}
 2248   //
 2249   //
 2250   // 3)
 2251   //   MemBarRelease/CPUOrder (leading)
 2252   //    |
 2253   //    |\
 2254   //    | \
 2255   //    |  \      . . .
 2256   //    |   \       |
 2257   //    |\   \  MemBarVolatile (card mark)
 2258   //    | \   \   |     |
 2259   //    |  \   \  |   StoreCM    . . .
 2260   //    |   \   \ |
 2261   //     \   \  Phi
 2262   //      \   \ /
 2263   //       \  Phi
 2264   //        \ /
 2265   //        Phi  . . .
 2266   //     Bot |   /
 2267   //       MergeMem
 2268   //         |
 2269   //         |
 2270   //    MemBarVolatile {trailing}
 2271   //
 2272   // configuration 1 is only valid if UseConcMarkSweepGC &&
 2273   // UseCondCardMark
 2274   //
 2275   // configurations 2 and 3 are only valid if UseG1GC.
 2276   //
 2277   // if a valid configuration is present returns the trailing membar
 2278   // otherwise NULL.
 2279   //
 2280   // n.b. the supplied membar is expected to be a card mark
 2281   // MemBarVolatile i.e. the caller must ensure the input node has the
 2282   // correct operand and feeds Mem to a StoreCM node
 2283 
 2284   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier)
 2285   {
 2286     // input must be a card mark volatile membar
 2287     assert(is_card_mark_membar(barrier), "expecting a card mark membar");
 2288 
 2289     Node *feed = barrier->proj_out(TypeFunc::Memory);
 2290     Node *x;
 2291     MergeMemNode *mm = NULL;
 2292 
 2293     const int MAX_PHIS = 3;	// max phis we will search through
 2294     int phicount = 0; 		// current search count
 2295 
 2296     bool retry_feed = true;
 2297     while (retry_feed) {
 2298       // see if we have a direct MergeMem feed
 2299       for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
 2300 	x = feed->fast_out(i);
 2301 	// the correct Phi will be merging a Bot memory slice
 2302 	if (x->is_MergeMem()) {
 2303 	  mm = x->as_MergeMem();
 2304 	  break;
 2305 	}
 2306       }
 2307       if (mm) {
 2308 	retry_feed = false;
 2309       } else if (UseG1GC & phicount++ < MAX_PHIS) {
 2310 	// the barrier may feed indirectly via one or two Phi nodes
 2311 	PhiNode *phi = NULL;
 2312 	for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
 2313 	  x = feed->fast_out(i);
 2314 	  // the correct Phi will be merging a Bot memory slice
 2315 	  if (x->is_Phi() && x->adr_type() == TypePtr::BOTTOM) {
 2316 	    phi = x->as_Phi();
 2317 	    break;
 2318 	  }
 2319 	}
 2320 	if (!phi) {
 2321 	  return NULL;
 2322 	}
 2323 	// look for another merge below this phi
 2324 	feed = phi;
 2325       } else {
 2326 	// couldn't find a merge
 2327 	return NULL;
 2328       }
 2329     }
 2330 
 2331     // sanity check this feed turns up as the expected slice
 2332     assert(mm->as_MergeMem()->in(Compile::AliasIdxBot) == feed, "expecting membar to feed AliasIdxBot slice to Merge");
 2333 
 2334     MemBarNode *trailing = NULL;
 2335     // be sure we have a trailing membar the merge
 2336     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
 2337       x = mm->fast_out(i);
 2338       if (x->is_MemBar() && x->Opcode() == Op_MemBarVolatile) {
 2339 	trailing = x->as_MemBar();
 2340 	break;
 2341       }
 2342     }
 2343 
 2344     return trailing;
 2345   }
 2346 
 2347   // trailing_to_card_mark
 2348   //
 2349   // graph traversal helper which detects extra, non-normal Mem feed
 2350   // from a trailing volatile membar to a preceding card mark volatile
 2351   // membar i.e. it identifies whether one of the three possible extra
 2352   // GC post-write Mem flow subgraphs is present
 2353   //
 2354   // this predicate checks for the same flow as the previous predicate
 2355   // but starting from the bottom rather than the top.
 2356   //
 2357   // if the configuration is present returns the card mark membar
 2358   // otherwise NULL
 2359   //
 2360   // n.b. the supplied membar is expected to be a trailing
 2361   // MemBarVolatile i.e. the caller must ensure the input node has the
 2362   // correct opcode
 2363 
 2364   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing)
 2365   {
 2366     assert(trailing->Opcode() == Op_MemBarVolatile,
 2367 	   "expecting a volatile membar");
 2368     assert(!is_card_mark_membar(trailing),
 2369 	   "not expecting a card mark membar");
 2370 
 2371     // the Mem feed to the membar should be a merge
 2372     Node *x = trailing->in(TypeFunc::Memory);
 2373     if (!x->is_MergeMem()) {
 2374       return NULL;
 2375     }
 2376 
 2377     MergeMemNode *mm = x->as_MergeMem();
 2378 
 2379     x = mm->in(Compile::AliasIdxBot);
 2380     // with G1 we may possibly see a Phi or two before we see a Memory
 2381     // Proj from the card mark membar
 2382 
 2383     const int MAX_PHIS = 3;	// max phis we will search through
 2384     int phicount = 0; 		// current search count
 2385 
 2386     bool retry_feed = !x->is_Proj();
 2387 
 2388     while (retry_feed) {
 2389       if (UseG1GC && x->is_Phi() && phicount++ < MAX_PHIS) {
 2390 	PhiNode *phi = x->as_Phi();
 2391 	ProjNode *proj = NULL;
 2392 	PhiNode *nextphi = NULL;
 2393 	bool found_leading = false;
 2394 	for (uint i = 1; i < phi->req(); i++) {
 2395 	  x = phi->in(i);
 2396 	  if (x->is_Phi()) {
 2397 	    nextphi = x->as_Phi();
 2398 	  } else if (x->is_Proj()) {
 2399 	    int opcode = x->in(0)->Opcode();
 2400 	    if (opcode == Op_MemBarVolatile) {
 2401 	      proj = x->as_Proj();
 2402 	    } else if (opcode == Op_MemBarRelease ||
 2403 		       opcode == Op_MemBarCPUOrder) {
 2404 	      // probably a leading membar
 2405 	      found_leading = true;
 2406 	    }
 2407 	  }
 2408 	}
 2409 	// if we found a correct looking proj then retry from there
 2410 	// otherwise we must see a leading and a phi or this the
 2411 	// wrong config
 2412 	if (proj != NULL) {
 2413 	  x = proj;
 2414 	  retry_feed = false;
 2415 	} else if (found_leading && nextphi != NULL) {
 2416 	  // retry from this phi to check phi2
 2417 	  x = nextphi;
 2418 	} else {
 2419 	  // not what we were looking for
 2420 	  return NULL;
 2421 	}
 2422       } else {
 2423 	return NULL;
 2424       }
 2425     }
 2426     // the proj has to come from the card mark membar
 2427     x = x->in(0);
 2428     if (!x->is_MemBar()) {
 2429       return NULL;
 2430     }
 2431 
 2432     MemBarNode *card_mark_membar = x->as_MemBar();
 2433 
 2434     if (!is_card_mark_membar(card_mark_membar)) {
 2435       return NULL;
 2436     }
 2437 
 2438     return card_mark_membar;
 2439   }
 2440 
 2441   // trailing_to_leading
 2442   //
 2443   // graph traversal helper which checks the Mem flow up the graph
 2444   // from a (non-card mark) trailing membar attempting to locate and
 2445   // return an associated leading membar. it first looks for a
 2446   // subgraph in the normal configuration (relying on helper
 2447   // normal_to_leading). failing that it then looks for one of the
 2448   // possible post-write card mark subgraphs linking the trailing node
 2449   // to a the card mark membar (relying on helper
 2450   // trailing_to_card_mark), and then checks that the card mark membar
 2451   // is fed by a leading membar (once again relying on auxiliary
 2452   // predicate normal_to_leading).
 2453   //
 2454   // if the configuration is valid returns the cpuorder member for
 2455   // preference or when absent the release membar otherwise NULL.
 2456   //
 2457   // n.b. the input membar is expected to be either a volatile or
 2458   // acquire membar but in the former case must *not* be a card mark
 2459   // membar.
 2460 
 2461   MemBarNode *trailing_to_leading(const MemBarNode *trailing)
 2462   {
 2463     assert((trailing->Opcode() == Op_MemBarAcquire ||
 2464 	    trailing->Opcode() == Op_MemBarVolatile),
 2465 	   "expecting an acquire or volatile membar");
 2466     assert((trailing->Opcode() != Op_MemBarVolatile ||
 2467 	    !is_card_mark_membar(trailing)),
 2468 	   "not expecting a card mark membar");
 2469 
 2470     MemBarNode *leading = normal_to_leading(trailing);
 2471 
 2472     if (leading) {
 2473       return leading;
 2474     }
 2475 
 2476     // nothing more to do if this is an acquire
 2477     if (trailing->Opcode() == Op_MemBarAcquire) {
 2478       return NULL;
 2479     }
 2480 
 2481     MemBarNode *card_mark_membar = trailing_to_card_mark(trailing);
 2482 
 2483     if (!card_mark_membar) {
 2484       return NULL;
 2485     }
 2486 
 2487     return normal_to_leading(card_mark_membar);
 2488   }
 2489 
 2490   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
 2491 
 2492 bool unnecessary_acquire(const Node *barrier)
 2493 {
 2494   assert(barrier->is_MemBar(), "expecting a membar");
 2495 
 2496   if (UseBarriersForVolatile) {
 2497     // we need to plant a dmb
 2498     return false;
 2499   }
 2500 
 2501   // a volatile read derived from bytecode (or also from an inlined
 2502   // SHA field read via LibraryCallKit::load_field_from_object)
 2503   // manifests as a LoadX[mo_acquire] followed by an acquire membar
 2504   // with a bogus read dependency on it's preceding load. so in those
 2505   // cases we will find the load node at the PARMS offset of the
 2506   // acquire membar.  n.b. there may be an intervening DecodeN node.
 2507   //
 2508   // a volatile load derived from an inlined unsafe field access
 2509   // manifests as a cpuorder membar with Ctl and Mem projections
 2510   // feeding both an acquire membar and a LoadX[mo_acquire]. The
 2511   // acquire then feeds another cpuorder membar via Ctl and Mem
 2512   // projections. The load has no output dependency on these trailing
 2513   // membars because subsequent nodes inserted into the graph take
 2514   // their control feed from the final membar cpuorder meaning they
 2515   // are all ordered after the load.
 2516 
 2517   Node *x = barrier->lookup(TypeFunc::Parms);
 2518   if (x) {
 2519     // we are starting from an acquire and it has a fake dependency
 2520     //
 2521     // need to check for
 2522     //
 2523     //   LoadX[mo_acquire]
 2524     //   {  |1   }
 2525     //   {DecodeN}
 2526     //      |Parms
 2527     //   MemBarAcquire*
 2528     //
 2529     // where * tags node we were passed
 2530     // and |k means input k
 2531     if (x->is_DecodeNarrowPtr()) {
 2532       x = x->in(1);
 2533     }
 2534 
 2535     return (x->is_Load() && x->as_Load()->is_acquire());
 2536   }
 2537 
 2538   // now check for an unsafe volatile get
 2539 
 2540   // need to check for
 2541   //
 2542   //   MemBarCPUOrder
 2543   //        ||       \\
 2544   //   MemBarAcquire* LoadX[mo_acquire]
 2545   //        ||
 2546   //   MemBarCPUOrder
 2547   //
 2548   // where * tags node we were passed
 2549   // and || or \\ are Ctl+Mem feeds via intermediate Proj Nodes
 2550 
 2551   // check for a parent MemBarCPUOrder
 2552   ProjNode *ctl;
 2553   ProjNode *mem;
 2554   MemBarNode *parent = parent_membar(barrier);
 2555   if (!parent || parent->Opcode() != Op_MemBarCPUOrder)
 2556     return false;
 2557   ctl = parent->proj_out(TypeFunc::Control);
 2558   mem = parent->proj_out(TypeFunc::Memory);
 2559   if (!ctl || !mem) {
 2560     return false;
 2561   }
 2562   // ensure the proj nodes both feed a LoadX[mo_acquire]
 2563   LoadNode *ld = NULL;
 2564   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
 2565     x = ctl->fast_out(i);
 2566     // if we see a load we keep hold of it and stop searching
 2567     if (x->is_Load()) {
 2568       ld = x->as_Load();
 2569       break;
 2570     }
 2571   }
 2572   // it must be an acquiring load
 2573   if (ld && ld->is_acquire()) {
 2574 
 2575     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
 2576       x = mem->fast_out(i);
 2577       // if we see the same load we drop it and stop searching
 2578       if (x == ld) {
 2579 	ld = NULL;
 2580 	break;
 2581       }
 2582     }
 2583     // we must have dropped the load
 2584     if (ld == NULL) {
 2585       // check for a child cpuorder membar
 2586       MemBarNode *child  = child_membar(barrier->as_MemBar());
 2587       if (child && child->Opcode() == Op_MemBarCPUOrder)
 2588 	return true;
 2589     }
 2590   }
 2591 
 2592   // final option for unnecessary mebar is that it is a trailing node
 2593   // belonging to a CAS
 2594 
 2595   MemBarNode *leading = trailing_to_leading(barrier->as_MemBar());
 2596 
 2597   return leading != NULL;
 2598 }
 2599 
 2600 bool needs_acquiring_load(const Node *n)
 2601 {
 2602   assert(n->is_Load(), "expecting a load");
 2603   if (UseBarriersForVolatile) {
 2604     // we use a normal load and a dmb
 2605     return false;
 2606   }
 2607 
 2608   LoadNode *ld = n->as_Load();
 2609 
 2610   if (!ld->is_acquire()) {
 2611     return false;
 2612   }
 2613 
 2614   // check if this load is feeding an acquire membar
 2615   //
 2616   //   LoadX[mo_acquire]
 2617   //   {  |1   }
 2618   //   {DecodeN}
 2619   //      |Parms
 2620   //   MemBarAcquire*
 2621   //
 2622   // where * tags node we were passed
 2623   // and |k means input k
 2624 
 2625   Node *start = ld;
 2626   Node *mbacq = NULL;
 2627 
 2628   // if we hit a DecodeNarrowPtr we reset the start node and restart
 2629   // the search through the outputs
 2630  restart:
 2631 
 2632   for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) {
 2633     Node *x = start->fast_out(i);
 2634     if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) {
 2635       mbacq = x;
 2636     } else if (!mbacq &&
 2637 	       (x->is_DecodeNarrowPtr() ||
 2638 		(x->is_Mach() && x->Opcode() == Op_DecodeN))) {
 2639       start = x;
 2640       goto restart;
 2641     }
 2642   }
 2643 
 2644   if (mbacq) {
 2645     return true;
 2646   }
 2647 
 2648   // now check for an unsafe volatile get
 2649 
 2650   // check if Ctl and Proj feed comes from a MemBarCPUOrder
 2651   //
 2652   //     MemBarCPUOrder
 2653   //        ||       \\
 2654   //   MemBarAcquire* LoadX[mo_acquire]
 2655   //        ||
 2656   //   MemBarCPUOrder
 2657 
 2658   MemBarNode *membar;
 2659 
 2660   membar = parent_membar(ld);
 2661 
 2662   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
 2663     return false;
 2664   }
 2665 
 2666   // ensure that there is a CPUOrder->Acquire->CPUOrder membar chain
 2667 
 2668   membar = child_membar(membar);
 2669 
 2670   if (!membar || !membar->Opcode() == Op_MemBarAcquire) {
 2671     return false;
 2672   }
 2673 
 2674   membar = child_membar(membar);
 2675 
 2676   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
 2677     return false;
 2678   }
 2679 
 2680   return true;
 2681 }
 2682 
 2683 bool unnecessary_release(const Node *n)
 2684 {
 2685   assert((n->is_MemBar() &&
 2686 	  n->Opcode() == Op_MemBarRelease),
 2687 	 "expecting a release membar");
 2688 
 2689   if (UseBarriersForVolatile) {
 2690     // we need to plant a dmb
 2691     return false;
 2692   }
 2693 
 2694   // if there is a dependent CPUOrder barrier then use that as the
 2695   // leading
 2696 
 2697   MemBarNode *barrier = n->as_MemBar();
 2698   // check for an intervening cpuorder membar
 2699   MemBarNode *b = child_membar(barrier);
 2700   if (b && b->Opcode() == Op_MemBarCPUOrder) {
 2701     // ok, so start the check from the dependent cpuorder barrier
 2702     barrier = b;
 2703   }
 2704 
 2705   // must start with a normal feed
 2706   MemBarNode *child_barrier = leading_to_normal(barrier);
 2707 
 2708   if (!child_barrier) {
 2709     return false;
 2710   }
 2711 
 2712   if (!is_card_mark_membar(child_barrier)) {
 2713     // this is the trailing membar and we are done
 2714     return true;
 2715   }
 2716 
 2717   // must be sure this card mark feeds a trailing membar
 2718   MemBarNode *trailing = card_mark_to_trailing(child_barrier);
 2719   return (trailing != NULL);
 2720 }
 2721 
 2722 bool unnecessary_volatile(const Node *n)
 2723 {
 2724   // assert n->is_MemBar();
 2725   if (UseBarriersForVolatile) {
 2726     // we need to plant a dmb
 2727     return false;
 2728   }
 2729 
 2730   MemBarNode *mbvol = n->as_MemBar();
 2731 
 2732   // first we check if this is part of a card mark. if so then we have
 2733   // to generate a StoreLoad barrier
 2734 
 2735   if (is_card_mark_membar(mbvol)) {
 2736       return false;
 2737   }
 2738 
 2739   // ok, if it's not a card mark then we still need to check if it is
 2740   // a trailing membar of a volatile put hgraph.
 2741 
 2742   return (trailing_to_leading(mbvol) != NULL);
 2743 }
 2744 
 2745 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
 2746 
 2747 bool needs_releasing_store(const Node *n)
 2748 {
 2749   // assert n->is_Store();
 2750   if (UseBarriersForVolatile) {
 2751     // we use a normal store and dmb combination
 2752     return false;
 2753   }
 2754 
 2755   StoreNode *st = n->as_Store();
 2756 
 2757   // the store must be marked as releasing
 2758   if (!st->is_release()) {
 2759     return false;
 2760   }
 2761 
 2762   // the store must be fed by a membar
 2763 
 2764   Node *x = st->lookup(StoreNode::Memory);
 2765 
 2766   if (! x || !x->is_Proj()) {
 2767     return false;
 2768   }
 2769 
 2770   ProjNode *proj = x->as_Proj();
 2771 
 2772   x = proj->lookup(0);
 2773 
 2774   if (!x || !x->is_MemBar()) {
 2775     return false;
 2776   }
 2777 
 2778   MemBarNode *barrier = x->as_MemBar();
 2779 
 2780   // if the barrier is a release membar or a cpuorder mmebar fed by a
 2781   // release membar then we need to check whether that forms part of a
 2782   // volatile put graph.
 2783 
 2784   // reject invalid candidates
 2785   if (!leading_membar(barrier)) {
 2786     return false;
 2787   }
 2788 
 2789   // does this lead a normal subgraph?
 2790   MemBarNode *mbvol = leading_to_normal(barrier);
 2791 
 2792   if (!mbvol) {
 2793     return false;
 2794   }
 2795 
 2796   // all done unless this is a card mark
 2797   if (!is_card_mark_membar(mbvol)) {
 2798     return true;
 2799   }
 2800 
 2801   // we found a card mark -- just make sure we have a trailing barrier
 2802 
 2803   return (card_mark_to_trailing(mbvol) != NULL);
 2804 }
 2805 
 2806 // predicate controlling translation of CAS
 2807 //
 2808 // returns true if CAS needs to use an acquiring load otherwise false
 2809 
 2810 bool needs_acquiring_load_exclusive(const Node *n)
 2811 {
 2812   assert(is_CAS(n->Opcode()), "expecting a compare and swap");
 2813   if (UseBarriersForVolatile) {
 2814     return false;
 2815   }
 2816 
 2817   // CAS nodes only ought to turn up in inlined unsafe CAS operations
 2818 #ifdef ASSERT
 2819   LoadStoreNode *st = n->as_LoadStore();
 2820 
 2821   // the store must be fed by a membar
 2822 
 2823   Node *x = st->lookup(StoreNode::Memory);
 2824 
 2825   assert (x && x->is_Proj(), "CAS not fed by memory proj!");
 2826 
 2827   ProjNode *proj = x->as_Proj();
 2828 
 2829   x = proj->lookup(0);
 2830 
 2831   assert (x && x->is_MemBar(), "CAS not fed by membar!");
 2832 
 2833   MemBarNode *barrier = x->as_MemBar();
 2834 
 2835   // the barrier must be a cpuorder mmebar fed by a release membar
 2836 
 2837   assert(barrier->Opcode() == Op_MemBarCPUOrder,
 2838 	 "CAS not fed by cpuorder membar!");
 2839 
 2840   MemBarNode *b = parent_membar(barrier);
 2841   assert ((b != NULL && b->Opcode() == Op_MemBarRelease),
 2842 	  "CAS not fed by cpuorder+release membar pair!");
 2843 
 2844   // does this lead a normal subgraph?
 2845   MemBarNode *mbar = leading_to_normal(barrier);
 2846 
 2847   assert(mbar != NULL, "CAS not embedded in normal graph!");
 2848 
 2849   assert(mbar->Opcode() == Op_MemBarAcquire, "trailing membar should be an acquire");
 2850 #endif // ASSERT
 2851   // so we can just return true here
 2852   return true;
 2853 }
 2854 
 2855 // predicate controlling translation of StoreCM
 2856 //
 2857 // returns true if a StoreStore must precede the card write otherwise
 2858 // false
 2859 
 2860 bool unnecessary_storestore(const Node *storecm)
 2861 {
 2862   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
 2863 
 2864   // we only ever need to generate a dmb ishst between an object put
 2865   // and the associated card mark when we are using CMS without
 2866   // conditional card marking
 2867 
 2868   if (!UseConcMarkSweepGC || UseCondCardMark) {
 2869     return true;
 2870   }
 2871 
 2872   // if we are implementing volatile puts using barriers then the
 2873   // object put as an str so we must insert the dmb ishst
 2874 
 2875   if (UseBarriersForVolatile) {
 2876     return false;
 2877   }
 2878 
 2879   // we can omit the dmb ishst if this StoreCM is part of a volatile
 2880   // put because in thta case the put will be implemented by stlr
 2881   //
 2882   // we need to check for a normal subgraph feeding this StoreCM.
 2883   // that means the StoreCM must be fed Memory from a leading membar,
 2884   // either a MemBarRelease or its dependent MemBarCPUOrder, and the
 2885   // leading membar must be part of a normal subgraph
 2886 
 2887   Node *x = storecm->in(StoreNode::Memory);
 2888 
 2889   if (!x->is_Proj()) {
 2890     return false;
 2891   }
 2892 
 2893   x = x->in(0);
 2894 
 2895   if (!x->is_MemBar()) {
 2896     return false;
 2897   }
 2898 
 2899   MemBarNode *leading = x->as_MemBar();
 2900 
 2901   // reject invalid candidates
 2902   if (!leading_membar(leading)) {
 2903     return false;
 2904   }
 2905 
 2906   // we can omit the StoreStore if it is the head of a normal subgraph
 2907   return (leading_to_normal(leading) != NULL);
 2908 }
 2909 
 2910 
 2911 #define __ _masm.
 2912 
 2913 // advance declarations for helper functions to convert register
 2914 // indices to register objects
 2915 
 2916 // the ad file has to provide implementations of certain methods
 2917 // expected by the generic code
 2918 //
 2919 // REQUIRED FUNCTIONALITY
 2920 
 2921 //=============================================================================
 2922 
 2923 // !!!!! Special hack to get all types of calls to specify the byte offset
 2924 //       from the start of the call to the point where the return address
 2925 //       will point.
 2926 
 2927 int MachCallStaticJavaNode::ret_addr_offset()
 2928 {
 2929   // call should be a simple bl
 2930   int off = 4;
 2931   return off;
 2932 }
 2933 
 2934 int MachCallDynamicJavaNode::ret_addr_offset()
 2935 {
 2936   return 16; // movz, movk, movk, bl
 2937 }
 2938 
 2939 int MachCallRuntimeNode::ret_addr_offset() {
 2940   // for generated stubs the call will be
 2941   //   far_call(addr)
 2942   // for real runtime callouts it will be six instructions
 2943   // see aarch64_enc_java_to_runtime
 2944   //   adr(rscratch2, retaddr)
 2945   //   lea(rscratch1, RuntimeAddress(addr)
 2946   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
 2947   //   blrt rscratch1
 2948   CodeBlob *cb = CodeCache::find_blob(_entry_point);
 2949   if (cb) {
 2950     return MacroAssembler::far_branch_size();
 2951   } else {
 2952     return 6 * NativeInstruction::instruction_size;
 2953   }
 2954 }
 2955 
 2956 // Indicate if the safepoint node needs the polling page as an input
 2957 
 2958 // the shared code plants the oop data at the start of the generated
 2959 // code for the safepoint node and that needs ot be at the load
 2960 // instruction itself. so we cannot plant a mov of the safepoint poll
 2961 // address followed by a load. setting this to true means the mov is
 2962 // scheduled as a prior instruction. that's better for scheduling
 2963 // anyway.
 2964 
 2965 bool SafePointNode::needs_polling_address_input()
 2966 {
 2967   return true;
 2968 }
 2969 
 2970 //=============================================================================
 2971 
 2972 #ifndef PRODUCT
 2973 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2974   st->print("BREAKPOINT");
 2975 }
 2976 #endif
 2977 
 2978 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 2979   MacroAssembler _masm(&cbuf);
 2980   __ brk(0);
 2981 }
 2982 
 2983 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
 2984   return MachNode::size(ra_);
 2985 }
 2986 
 2987 //=============================================================================
 2988 
 2989 #ifndef PRODUCT
 2990   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
 2991     st->print("nop \t# %d bytes pad for loops and calls", _count);
 2992   }
 2993 #endif
 2994 
 2995   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
 2996     MacroAssembler _masm(&cbuf);
 2997     for (int i = 0; i < _count; i++) {
 2998       __ nop();
 2999     }
 3000   }
 3001 
 3002   uint MachNopNode::size(PhaseRegAlloc*) const {
 3003     return _count * NativeInstruction::instruction_size;
 3004   }
 3005 
 3006 //=============================================================================
 3007 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
 3008 
 3009 int Compile::ConstantTable::calculate_table_base_offset() const {
 3010   return 0;  // absolute addressing, no offset
 3011 }
 3012 
 3013 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
 3014 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
 3015   ShouldNotReachHere();
 3016 }
 3017 
 3018 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
 3019   // Empty encoding
 3020 }
 3021 
 3022 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
 3023   return 0;
 3024 }
 3025 
 3026 #ifndef PRODUCT
 3027 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
 3028   st->print("-- \t// MachConstantBaseNode (empty encoding)");
 3029 }
 3030 #endif
 3031 
 3032 #ifndef PRODUCT
 3033 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 3034   Compile* C = ra_->C;
 3035 
 3036   int framesize = C->frame_slots() << LogBytesPerInt;
 3037 
 3038   if (C->need_stack_bang(framesize))
 3039     st->print("# stack bang size=%d\n\t", framesize);
 3040 
 3041   if (framesize < ((1 << 9) + 2 * wordSize)) {
 3042     st->print("sub  sp, sp, #%d\n\t", framesize);
 3043     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
 3044     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
 3045   } else {
 3046     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
 3047     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
 3048     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 3049     st->print("sub  sp, sp, rscratch1");
 3050   }
 3051 }
 3052 #endif
 3053 
 3054 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 3055   Compile* C = ra_->C;
 3056   MacroAssembler _masm(&cbuf);
 3057 
 3058   // n.b. frame size includes space for return pc and rfp
 3059   const long framesize = C->frame_size_in_bytes();
 3060   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
 3061 
 3062   // insert a nop at the start of the prolog so we can patch in a
 3063   // branch if we need to invalidate the method later
 3064   __ nop();
 3065 
 3066   int bangsize = C->bang_size_in_bytes();
 3067   if (C->need_stack_bang(bangsize) && UseStackBanging)
 3068     __ generate_stack_overflow_check(bangsize);
 3069 
 3070   __ build_frame(framesize);
 3071 
 3072   if (NotifySimulator) {
 3073     __ notify(Assembler::method_entry);
 3074   }
 3075 
 3076   if (VerifyStackAtCalls) {
 3077     Unimplemented();
 3078   }
 3079 
 3080   C->set_frame_complete(cbuf.insts_size());
 3081 
 3082   if (C->has_mach_constant_base_node()) {
 3083     // NOTE: We set the table base offset here because users might be
 3084     // emitted before MachConstantBaseNode.
 3085     Compile::ConstantTable& constant_table = C->constant_table();
 3086     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
 3087   }
 3088 }
 3089 
 3090 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
 3091 {
 3092   return MachNode::size(ra_); // too many variables; just compute it
 3093                               // the hard way
 3094 }
 3095 
 3096 int MachPrologNode::reloc() const
 3097 {
 3098   return 0;
 3099 }
 3100 
 3101 //=============================================================================
 3102 
 3103 #ifndef PRODUCT
 3104 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 3105   Compile* C = ra_->C;
 3106   int framesize = C->frame_slots() << LogBytesPerInt;
 3107 
 3108   st->print("# pop frame %d\n\t",framesize);
 3109 
 3110   if (framesize == 0) {
 3111     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 3112   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
 3113     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
 3114     st->print("add  sp, sp, #%d\n\t", framesize);
 3115   } else {
 3116     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 3117     st->print("add  sp, sp, rscratch1\n\t");
 3118     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 3119   }
 3120 
 3121   if (do_polling() && C->is_method_compilation()) {
 3122     st->print("# touch polling page\n\t");
 3123     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
 3124     st->print("ldr zr, [rscratch1]");
 3125   }
 3126 }
 3127 #endif
 3128 
 3129 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 3130   Compile* C = ra_->C;
 3131   MacroAssembler _masm(&cbuf);
 3132   int framesize = C->frame_slots() << LogBytesPerInt;
 3133 
 3134   __ remove_frame(framesize);
 3135 
 3136   if (NotifySimulator) {
 3137     __ notify(Assembler::method_reentry);
 3138   }
 3139 
 3140   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
 3141     __ reserved_stack_check();
 3142   }
 3143 
 3144   if (do_polling() && C->is_method_compilation()) {
 3145     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
 3146   }
 3147 }
 3148 
 3149 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 3150   // Variable size. Determine dynamically.
 3151   return MachNode::size(ra_);
 3152 }
 3153 
 3154 int MachEpilogNode::reloc() const {
 3155   // Return number of relocatable values contained in this instruction.
 3156   return 1; // 1 for polling page.
 3157 }
 3158 
 3159 const Pipeline * MachEpilogNode::pipeline() const {
 3160   return MachNode::pipeline_class();
 3161 }
 3162 
 3163 // This method seems to be obsolete. It is declared in machnode.hpp
 3164 // and defined in all *.ad files, but it is never called. Should we
 3165 // get rid of it?
 3166 int MachEpilogNode::safepoint_offset() const {
 3167   assert(do_polling(), "no return for this epilog node");
 3168   return 4;
 3169 }
 3170 
 3171 //=============================================================================
 3172 
 3173 // Figure out which register class each belongs in: rc_int, rc_float or
 3174 // rc_stack.
 3175 enum RC { rc_bad, rc_int, rc_float, rc_stack };
 3176 
 3177 static enum RC rc_class(OptoReg::Name reg) {
 3178 
 3179   if (reg == OptoReg::Bad) {
 3180     return rc_bad;
 3181   }
 3182 
 3183   // we have 30 int registers * 2 halves
 3184   // (rscratch1 and rscratch2 are omitted)
 3185 
 3186   if (reg < 60) {
 3187     return rc_int;
 3188   }
 3189 
 3190   // we have 32 float register * 2 halves
 3191   if (reg < 60 + 128) {
 3192     return rc_float;
 3193   }
 3194 
 3195   // Between float regs & stack is the flags regs.
 3196   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
 3197 
 3198   return rc_stack;
 3199 }
 3200 
 3201 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
 3202   Compile* C = ra_->C;
 3203 
 3204   // Get registers to move.
 3205   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
 3206   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
 3207   OptoReg::Name dst_hi = ra_->get_reg_second(this);
 3208   OptoReg::Name dst_lo = ra_->get_reg_first(this);
 3209 
 3210   enum RC src_hi_rc = rc_class(src_hi);
 3211   enum RC src_lo_rc = rc_class(src_lo);
 3212   enum RC dst_hi_rc = rc_class(dst_hi);
 3213   enum RC dst_lo_rc = rc_class(dst_lo);
 3214 
 3215   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
 3216 
 3217   if (src_hi != OptoReg::Bad) {
 3218     assert((src_lo&1)==0 && src_lo+1==src_hi &&
 3219            (dst_lo&1)==0 && dst_lo+1==dst_hi,
 3220            "expected aligned-adjacent pairs");
 3221   }
 3222 
 3223   if (src_lo == dst_lo && src_hi == dst_hi) {
 3224     return 0;            // Self copy, no move.
 3225   }
 3226 
 3227   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
 3228               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
 3229   int src_offset = ra_->reg2offset(src_lo);
 3230   int dst_offset = ra_->reg2offset(dst_lo);
 3231 
 3232   if (bottom_type()->isa_vect() != NULL) {
 3233     uint ireg = ideal_reg();
 3234     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
 3235     if (cbuf) {
 3236       MacroAssembler _masm(cbuf);
 3237       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
 3238       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 3239         // stack->stack
 3240         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
 3241         if (ireg == Op_VecD) {
 3242           __ unspill(rscratch1, true, src_offset);
 3243           __ spill(rscratch1, true, dst_offset);
 3244         } else {
 3245           __ spill_copy128(src_offset, dst_offset);
 3246         }
 3247       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 3248         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 3249                ireg == Op_VecD ? __ T8B : __ T16B,
 3250                as_FloatRegister(Matcher::_regEncode[src_lo]));
 3251       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 3252         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 3253                        ireg == Op_VecD ? __ D : __ Q,
 3254                        ra_->reg2offset(dst_lo));
 3255       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 3256         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 3257                        ireg == Op_VecD ? __ D : __ Q,
 3258                        ra_->reg2offset(src_lo));
 3259       } else {
 3260         ShouldNotReachHere();
 3261       }
 3262     }
 3263   } else if (cbuf) {
 3264     MacroAssembler _masm(cbuf);
 3265     switch (src_lo_rc) {
 3266     case rc_int:
 3267       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
 3268         if (is64) {
 3269             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
 3270                    as_Register(Matcher::_regEncode[src_lo]));
 3271         } else {
 3272             MacroAssembler _masm(cbuf);
 3273             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
 3274                     as_Register(Matcher::_regEncode[src_lo]));
 3275         }
 3276       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
 3277         if (is64) {
 3278             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 3279                      as_Register(Matcher::_regEncode[src_lo]));
 3280         } else {
 3281             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 3282                      as_Register(Matcher::_regEncode[src_lo]));
 3283         }
 3284       } else {                    // gpr --> stack spill
 3285         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 3286         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
 3287       }
 3288       break;
 3289     case rc_float:
 3290       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
 3291         if (is64) {
 3292             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
 3293                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 3294         } else {
 3295             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
 3296                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 3297         }
 3298       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
 3299           if (cbuf) {
 3300             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 3301                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 3302         } else {
 3303             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 3304                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 3305         }
 3306       } else {                    // fpr --> stack spill
 3307         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 3308         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 3309                  is64 ? __ D : __ S, dst_offset);
 3310       }
 3311       break;
 3312     case rc_stack:
 3313       if (dst_lo_rc == rc_int) {  // stack --> gpr load
 3314         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
 3315       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
 3316         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 3317                    is64 ? __ D : __ S, src_offset);
 3318       } else {                    // stack --> stack copy
 3319         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 3320         __ unspill(rscratch1, is64, src_offset);
 3321         __ spill(rscratch1, is64, dst_offset);
 3322       }
 3323       break;
 3324     default:
 3325       assert(false, "bad rc_class for spill");
 3326       ShouldNotReachHere();
 3327     }
 3328   }
 3329 
 3330   if (st) {
 3331     st->print("spill ");
 3332     if (src_lo_rc == rc_stack) {
 3333       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
 3334     } else {
 3335       st->print("%s -> ", Matcher::regName[src_lo]);
 3336     }
 3337     if (dst_lo_rc == rc_stack) {
 3338       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
 3339     } else {
 3340       st->print("%s", Matcher::regName[dst_lo]);
 3341     }
 3342     if (bottom_type()->isa_vect() != NULL) {
 3343       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
 3344     } else {
 3345       st->print("\t# spill size = %d", is64 ? 64:32);
 3346     }
 3347   }
 3348 
 3349   return 0;
 3350 
 3351 }
 3352 
 3353 #ifndef PRODUCT
 3354 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 3355   if (!ra_)
 3356     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
 3357   else
 3358     implementation(NULL, ra_, false, st);
 3359 }
 3360 #endif
 3361 
 3362 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 3363   implementation(&cbuf, ra_, false, NULL);
 3364 }
 3365 
 3366 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
 3367   return MachNode::size(ra_);
 3368 }
 3369 
 3370 //=============================================================================
 3371 
 3372 #ifndef PRODUCT
 3373 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 3374   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 3375   int reg = ra_->get_reg_first(this);
 3376   st->print("add %s, rsp, #%d]\t# box lock",
 3377             Matcher::regName[reg], offset);
 3378 }
 3379 #endif
 3380 
 3381 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 3382   MacroAssembler _masm(&cbuf);
 3383 
 3384   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 3385   int reg    = ra_->get_encode(this);
 3386 
 3387   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
 3388     __ add(as_Register(reg), sp, offset);
 3389   } else {
 3390     ShouldNotReachHere();
 3391   }
 3392 }
 3393 
 3394 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
 3395   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
 3396   return 4;
 3397 }
 3398 
 3399 //=============================================================================
 3400 
 3401 #ifndef PRODUCT
 3402 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 3403 {
 3404   st->print_cr("# MachUEPNode");
 3405   if (UseCompressedClassPointers) {
 3406     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 3407     if (Universe::narrow_klass_shift() != 0) {
 3408       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
 3409     }
 3410   } else {
 3411    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 3412   }
 3413   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
 3414   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
 3415 }
 3416 #endif
 3417 
 3418 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
 3419 {
 3420   // This is the unverified entry point.
 3421   MacroAssembler _masm(&cbuf);
 3422 
 3423   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
 3424   Label skip;
 3425   // TODO
 3426   // can we avoid this skip and still use a reloc?
 3427   __ br(Assembler::EQ, skip);
 3428   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 3429   __ bind(skip);
 3430 }
 3431 
 3432 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
 3433 {
 3434   return MachNode::size(ra_);
 3435 }
 3436 
 3437 // REQUIRED EMIT CODE
 3438 
 3439 //=============================================================================
 3440 
 3441 // Emit exception handler code.
 3442 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
 3443 {
 3444   // mov rscratch1 #exception_blob_entry_point
 3445   // br rscratch1
 3446   // Note that the code buffer's insts_mark is always relative to insts.
 3447   // That's why we must use the macroassembler to generate a handler.
 3448   MacroAssembler _masm(&cbuf);
 3449   address base = __ start_a_stub(size_exception_handler());
 3450   if (base == NULL) {
 3451     ciEnv::current()->record_failure("CodeCache is full");
 3452     return 0;  // CodeBuffer::expand failed
 3453   }
 3454   int offset = __ offset();
 3455   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
 3456   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
 3457   __ end_a_stub();
 3458   return offset;
 3459 }
 3460 
 3461 // Emit deopt handler code.
 3462 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
 3463 {
 3464   // Note that the code buffer's insts_mark is always relative to insts.
 3465   // That's why we must use the macroassembler to generate a handler.
 3466   MacroAssembler _masm(&cbuf);
 3467   address base = __ start_a_stub(size_deopt_handler());
 3468   if (base == NULL) {
 3469     ciEnv::current()->record_failure("CodeCache is full");
 3470     return 0;  // CodeBuffer::expand failed
 3471   }
 3472   int offset = __ offset();
 3473 
 3474   __ adr(lr, __ pc());
 3475   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 3476 
 3477   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
 3478   __ end_a_stub();
 3479   return offset;
 3480 }
 3481 
 3482 // REQUIRED MATCHER CODE
 3483 
 3484 //=============================================================================
 3485 
 3486 const bool Matcher::match_rule_supported(int opcode) {
 3487 
 3488   switch (opcode) {
 3489   default:
 3490     break;
 3491   }
 3492 
 3493   if (!has_match_rule(opcode)) {
 3494     return false;
 3495   }
 3496 
 3497   return true;  // Per default match rules are supported.
 3498 }
 3499 
 3500 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
 3501 
 3502   // TODO
 3503   // identify extra cases that we might want to provide match rules for
 3504   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
 3505   bool ret_value = match_rule_supported(opcode);
 3506   // Add rules here.
 3507 
 3508   return ret_value;  // Per default match rules are supported.
 3509 }
 3510 
 3511 const bool Matcher::has_predicated_vectors(void) {
 3512   return false;
 3513 }
 3514 
 3515 const int Matcher::float_pressure(int default_pressure_threshold) {
 3516   return default_pressure_threshold;
 3517 }
 3518 
 3519 int Matcher::regnum_to_fpu_offset(int regnum)
 3520 {
 3521   Unimplemented();
 3522   return 0;
 3523 }
 3524 
 3525 // Is this branch offset short enough that a short branch can be used?
 3526 //
 3527 // NOTE: If the platform does not provide any short branch variants, then
 3528 //       this method should return false for offset 0.
 3529 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
 3530   // The passed offset is relative to address of the branch.
 3531 
 3532   return (-32768 <= offset && offset < 32768);
 3533 }
 3534 
 3535 const bool Matcher::isSimpleConstant64(jlong value) {
 3536   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
 3537   // Probably always true, even if a temp register is required.
 3538   return true;
 3539 }
 3540 
 3541 // true just means we have fast l2f conversion
 3542 const bool Matcher::convL2FSupported(void) {
 3543   return true;
 3544 }
 3545 
 3546 // Vector width in bytes.
 3547 const int Matcher::vector_width_in_bytes(BasicType bt) {
 3548   int size = MIN2(16,(int)MaxVectorSize);
 3549   // Minimum 2 values in vector
 3550   if (size < 2*type2aelembytes(bt)) size = 0;
 3551   // But never < 4
 3552   if (size < 4) size = 0;
 3553   return size;
 3554 }
 3555 
 3556 // Limits on vector size (number of elements) loaded into vector.
 3557 const int Matcher::max_vector_size(const BasicType bt) {
 3558   return vector_width_in_bytes(bt)/type2aelembytes(bt);
 3559 }
 3560 const int Matcher::min_vector_size(const BasicType bt) {
 3561 //  For the moment limit the vector size to 8 bytes
 3562     int size = 8 / type2aelembytes(bt);
 3563     if (size < 2) size = 2;
 3564     return size;
 3565 }
 3566 
 3567 // Vector ideal reg.
 3568 const uint Matcher::vector_ideal_reg(int len) {
 3569   switch(len) {
 3570     case  8: return Op_VecD;
 3571     case 16: return Op_VecX;
 3572   }
 3573   ShouldNotReachHere();
 3574   return 0;
 3575 }
 3576 
 3577 const uint Matcher::vector_shift_count_ideal_reg(int size) {
 3578   return Op_VecX;
 3579 }
 3580 
 3581 // AES support not yet implemented
 3582 const bool Matcher::pass_original_key_for_aes() {
 3583   return false;
 3584 }
 3585 
 3586 // x86 supports misaligned vectors store/load.
 3587 const bool Matcher::misaligned_vectors_ok() {
 3588   return !AlignVector; // can be changed by flag
 3589 }
 3590 
 3591 // false => size gets scaled to BytesPerLong, ok.
 3592 const bool Matcher::init_array_count_is_in_bytes = false;
 3593 
 3594 // Use conditional move (CMOVL)
 3595 const int Matcher::long_cmove_cost() {
 3596   // long cmoves are no more expensive than int cmoves
 3597   return 0;
 3598 }
 3599 
 3600 const int Matcher::float_cmove_cost() {
 3601   // float cmoves are no more expensive than int cmoves
 3602   return 0;
 3603 }
 3604 
 3605 // Does the CPU require late expand (see block.cpp for description of late expand)?
 3606 const bool Matcher::require_postalloc_expand = false;
 3607 
 3608 // Do we need to mask the count passed to shift instructions or does
 3609 // the cpu only look at the lower 5/6 bits anyway?
 3610 const bool Matcher::need_masked_shift_count = false;
 3611 
 3612 // This affects two different things:
 3613 //  - how Decode nodes are matched
 3614 //  - how ImplicitNullCheck opportunities are recognized
 3615 // If true, the matcher will try to remove all Decodes and match them
 3616 // (as operands) into nodes. NullChecks are not prepared to deal with
 3617 // Decodes by final_graph_reshaping().
 3618 // If false, final_graph_reshaping() forces the decode behind the Cmp
 3619 // for a NullCheck. The matcher matches the Decode node into a register.
 3620 // Implicit_null_check optimization moves the Decode along with the
 3621 // memory operation back up before the NullCheck.
 3622 bool Matcher::narrow_oop_use_complex_address() {
 3623   return Universe::narrow_oop_shift() == 0;
 3624 }
 3625 
 3626 bool Matcher::narrow_klass_use_complex_address() {
 3627 // TODO
 3628 // decide whether we need to set this to true
 3629   return false;
 3630 }
 3631 
 3632 bool Matcher::const_oop_prefer_decode() {
 3633   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
 3634   return Universe::narrow_oop_base() == NULL;
 3635 }
 3636 
 3637 bool Matcher::const_klass_prefer_decode() {
 3638   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
 3639   return Universe::narrow_klass_base() == NULL;
 3640 }
 3641 
 3642 // Is it better to copy float constants, or load them directly from
 3643 // memory?  Intel can load a float constant from a direct address,
 3644 // requiring no extra registers.  Most RISCs will have to materialize
 3645 // an address into a register first, so they would do better to copy
 3646 // the constant from stack.
 3647 const bool Matcher::rematerialize_float_constants = false;
 3648 
 3649 // If CPU can load and store mis-aligned doubles directly then no
 3650 // fixup is needed.  Else we split the double into 2 integer pieces
 3651 // and move it piece-by-piece.  Only happens when passing doubles into
 3652 // C code as the Java calling convention forces doubles to be aligned.
 3653 const bool Matcher::misaligned_doubles_ok = true;
 3654 
 3655 // No-op on amd64
 3656 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
 3657   Unimplemented();
 3658 }
 3659 
 3660 // Advertise here if the CPU requires explicit rounding operations to
 3661 // implement the UseStrictFP mode.
 3662 const bool Matcher::strict_fp_requires_explicit_rounding = false;
 3663 
 3664 // Are floats converted to double when stored to stack during
 3665 // deoptimization?
 3666 bool Matcher::float_in_double() { return true; }
 3667 
 3668 // Do ints take an entire long register or just half?
 3669 // The relevant question is how the int is callee-saved:
 3670 // the whole long is written but de-opt'ing will have to extract
 3671 // the relevant 32 bits.
 3672 const bool Matcher::int_in_long = true;
 3673 
 3674 // Return whether or not this register is ever used as an argument.
 3675 // This function is used on startup to build the trampoline stubs in
 3676 // generateOptoStub.  Registers not mentioned will be killed by the VM
 3677 // call in the trampoline, and arguments in those registers not be
 3678 // available to the callee.
 3679 bool Matcher::can_be_java_arg(int reg)
 3680 {
 3681   return
 3682     reg ==  R0_num || reg == R0_H_num ||
 3683     reg ==  R1_num || reg == R1_H_num ||
 3684     reg ==  R2_num || reg == R2_H_num ||
 3685     reg ==  R3_num || reg == R3_H_num ||
 3686     reg ==  R4_num || reg == R4_H_num ||
 3687     reg ==  R5_num || reg == R5_H_num ||
 3688     reg ==  R6_num || reg == R6_H_num ||
 3689     reg ==  R7_num || reg == R7_H_num ||
 3690     reg ==  V0_num || reg == V0_H_num ||
 3691     reg ==  V1_num || reg == V1_H_num ||
 3692     reg ==  V2_num || reg == V2_H_num ||
 3693     reg ==  V3_num || reg == V3_H_num ||
 3694     reg ==  V4_num || reg == V4_H_num ||
 3695     reg ==  V5_num || reg == V5_H_num ||
 3696     reg ==  V6_num || reg == V6_H_num ||
 3697     reg ==  V7_num || reg == V7_H_num;
 3698 }
 3699 
 3700 bool Matcher::is_spillable_arg(int reg)
 3701 {
 3702   return can_be_java_arg(reg);
 3703 }
 3704 
 3705 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
 3706   return false;
 3707 }
 3708 
 3709 RegMask Matcher::divI_proj_mask() {
 3710   ShouldNotReachHere();
 3711   return RegMask();
 3712 }
 3713 
 3714 // Register for MODI projection of divmodI.
 3715 RegMask Matcher::modI_proj_mask() {
 3716   ShouldNotReachHere();
 3717   return RegMask();
 3718 }
 3719 
 3720 // Register for DIVL projection of divmodL.
 3721 RegMask Matcher::divL_proj_mask() {
 3722   ShouldNotReachHere();
 3723   return RegMask();
 3724 }
 3725 
 3726 // Register for MODL projection of divmodL.
 3727 RegMask Matcher::modL_proj_mask() {
 3728   ShouldNotReachHere();
 3729   return RegMask();
 3730 }
 3731 
 3732 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
 3733   return FP_REG_mask();
 3734 }
 3735 
 3736 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
 3737   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
 3738     Node* u = addp->fast_out(i);
 3739     if (u->is_Mem()) {
 3740       int opsize = u->as_Mem()->memory_size();
 3741       assert(opsize > 0, "unexpected memory operand size");
 3742       if (u->as_Mem()->memory_size() != (1<<shift)) {
 3743         return false;
 3744       }
 3745     }
 3746   }
 3747   return true;
 3748 }
 3749 
 3750 const bool Matcher::convi2l_type_required = false;
 3751 
 3752 // Should the Matcher clone shifts on addressing modes, expecting them
 3753 // to be subsumed into complex addressing expressions or compute them
 3754 // into registers?
 3755 bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
 3756   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
 3757     return true;
 3758   }
 3759 
 3760   Node *off = m->in(AddPNode::Offset);
 3761   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
 3762       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
 3763       // Are there other uses besides address expressions?
 3764       !is_visited(off)) {
 3765     address_visited.set(off->_idx); // Flag as address_visited
 3766     mstack.push(off->in(2), Visit);
 3767     Node *conv = off->in(1);
 3768     if (conv->Opcode() == Op_ConvI2L &&
 3769         // Are there other uses besides address expressions?
 3770         !is_visited(conv)) {
 3771       address_visited.set(conv->_idx); // Flag as address_visited
 3772       mstack.push(conv->in(1), Pre_Visit);
 3773     } else {
 3774       mstack.push(conv, Pre_Visit);
 3775     }
 3776     address_visited.test_set(m->_idx); // Flag as address_visited
 3777     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 3778     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 3779     return true;
 3780   } else if (off->Opcode() == Op_ConvI2L &&
 3781              // Are there other uses besides address expressions?
 3782              !is_visited(off)) {
 3783     address_visited.test_set(m->_idx); // Flag as address_visited
 3784     address_visited.set(off->_idx); // Flag as address_visited
 3785     mstack.push(off->in(1), Pre_Visit);
 3786     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 3787     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 3788     return true;
 3789   }
 3790   return false;
 3791 }
 3792 
 3793 // Transform:
 3794 // (AddP base (AddP base address (LShiftL index con)) offset)
 3795 // into:
 3796 // (AddP base (AddP base offset) (LShiftL index con))
 3797 // to take full advantage of ARM's addressing modes
 3798 void Compile::reshape_address(AddPNode* addp) {
 3799   Node *addr = addp->in(AddPNode::Address);
 3800   if (addr->is_AddP() && addr->in(AddPNode::Base) == addp->in(AddPNode::Base)) {
 3801     const AddPNode *addp2 = addr->as_AddP();
 3802     if ((addp2->in(AddPNode::Offset)->Opcode() == Op_LShiftL &&
 3803          addp2->in(AddPNode::Offset)->in(2)->is_Con() &&
 3804          size_fits_all_mem_uses(addp, addp2->in(AddPNode::Offset)->in(2)->get_int())) ||
 3805         addp2->in(AddPNode::Offset)->Opcode() == Op_ConvI2L) {
 3806 
 3807       // Any use that can't embed the address computation?
 3808       for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
 3809         Node* u = addp->fast_out(i);
 3810         if (!u->is_Mem()) {
 3811           return;
 3812         }
 3813         if (u->is_LoadVector() || u->is_StoreVector() || u->Opcode() == Op_StoreCM) {
 3814           return;
 3815         }
 3816         if (addp2->in(AddPNode::Offset)->Opcode() != Op_ConvI2L) {
 3817           int scale = 1 << addp2->in(AddPNode::Offset)->in(2)->get_int();
 3818           if (VM_Version::expensive_load(u->as_Mem()->memory_size(), scale)) {
 3819             return;
 3820           }
 3821         }
 3822       }
 3823 
 3824       Node* off = addp->in(AddPNode::Offset);
 3825       Node* addr2 = addp2->in(AddPNode::Address);
 3826       Node* base = addp->in(AddPNode::Base);
 3827 
 3828       Node* new_addr = NULL;
 3829       // Check whether the graph already has the new AddP we need
 3830       // before we create one (no GVN available here).
 3831       for (DUIterator_Fast imax, i = addr2->fast_outs(imax); i < imax; i++) {
 3832         Node* u = addr2->fast_out(i);
 3833         if (u->is_AddP() &&
 3834             u->in(AddPNode::Base) == base &&
 3835             u->in(AddPNode::Address) == addr2 &&
 3836             u->in(AddPNode::Offset) == off) {
 3837           new_addr = u;
 3838           break;
 3839         }
 3840       }
 3841 
 3842       if (new_addr == NULL) {
 3843         new_addr = new AddPNode(base, addr2, off);
 3844       }
 3845       Node* new_off = addp2->in(AddPNode::Offset);
 3846       addp->set_req(AddPNode::Address, new_addr);
 3847       if (addr->outcnt() == 0) {
 3848         addr->disconnect_inputs(NULL, this);
 3849       }
 3850       addp->set_req(AddPNode::Offset, new_off);
 3851       if (off->outcnt() == 0) {
 3852         off->disconnect_inputs(NULL, this);
 3853       }
 3854     }
 3855   }
 3856 }
 3857 
 3858 // helper for encoding java_to_runtime calls on sim
 3859 //
 3860 // this is needed to compute the extra arguments required when
 3861 // planting a call to the simulator blrt instruction. the TypeFunc
 3862 // can be queried to identify the counts for integral, and floating
 3863 // arguments and the return type
 3864 
 3865 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
 3866 {
 3867   int gps = 0;
 3868   int fps = 0;
 3869   const TypeTuple *domain = tf->domain();
 3870   int max = domain->cnt();
 3871   for (int i = TypeFunc::Parms; i < max; i++) {
 3872     const Type *t = domain->field_at(i);
 3873     switch(t->basic_type()) {
 3874     case T_FLOAT:
 3875     case T_DOUBLE:
 3876       fps++;
 3877     default:
 3878       gps++;
 3879     }
 3880   }
 3881   gpcnt = gps;
 3882   fpcnt = fps;
 3883   BasicType rt = tf->return_type();
 3884   switch (rt) {
 3885   case T_VOID:
 3886     rtype = MacroAssembler::ret_type_void;
 3887     break;
 3888   default:
 3889     rtype = MacroAssembler::ret_type_integral;
 3890     break;
 3891   case T_FLOAT:
 3892     rtype = MacroAssembler::ret_type_float;
 3893     break;
 3894   case T_DOUBLE:
 3895     rtype = MacroAssembler::ret_type_double;
 3896     break;
 3897   }
 3898 }
 3899 
 3900 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
 3901   MacroAssembler _masm(&cbuf);                                          \
 3902   {                                                                     \
 3903     guarantee(INDEX == -1, "mode not permitted for volatile");          \
 3904     guarantee(DISP == 0, "mode not permitted for volatile");            \
 3905     guarantee(SCALE == 0, "mode not permitted for volatile");           \
 3906     __ INSN(REG, as_Register(BASE));                                    \
 3907   }
 3908 
 3909 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
 3910 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
 3911 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
 3912                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
 3913 
 3914   // Used for all non-volatile memory accesses.  The use of
 3915   // $mem->opcode() to discover whether this pattern uses sign-extended
 3916   // offsets is something of a kludge.
 3917   static void loadStore(MacroAssembler masm, mem_insn insn,
 3918                          Register reg, int opcode,
 3919                          Register base, int index, int size, int disp)
 3920   {
 3921     Address::extend scale;
 3922 
 3923     // Hooboy, this is fugly.  We need a way to communicate to the
 3924     // encoder that the index needs to be sign extended, so we have to
 3925     // enumerate all the cases.
 3926     switch (opcode) {
 3927     case INDINDEXSCALEDI2L:
 3928     case INDINDEXSCALEDI2LN:
 3929     case INDINDEXI2L:
 3930     case INDINDEXI2LN:
 3931       scale = Address::sxtw(size);
 3932       break;
 3933     default:
 3934       scale = Address::lsl(size);
 3935     }
 3936 
 3937     if (index == -1) {
 3938       (masm.*insn)(reg, Address(base, disp));
 3939     } else {
 3940       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 3941       (masm.*insn)(reg, Address(base, as_Register(index), scale));
 3942     }
 3943   }
 3944 
 3945   static void loadStore(MacroAssembler masm, mem_float_insn insn,
 3946                          FloatRegister reg, int opcode,
 3947                          Register base, int index, int size, int disp)
 3948   {
 3949     Address::extend scale;
 3950 
 3951     switch (opcode) {
 3952     case INDINDEXSCALEDI2L:
 3953     case INDINDEXSCALEDI2LN:
 3954       scale = Address::sxtw(size);
 3955       break;
 3956     default:
 3957       scale = Address::lsl(size);
 3958     }
 3959 
 3960      if (index == -1) {
 3961       (masm.*insn)(reg, Address(base, disp));
 3962     } else {
 3963       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 3964       (masm.*insn)(reg, Address(base, as_Register(index), scale));
 3965     }
 3966   }
 3967 
 3968   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
 3969                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
 3970                          int opcode, Register base, int index, int size, int disp)
 3971   {
 3972     if (index == -1) {
 3973       (masm.*insn)(reg, T, Address(base, disp));
 3974     } else {
 3975       assert(disp == 0, "unsupported address mode");
 3976       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
 3977     }
 3978   }
 3979 
 3980 %}
 3981 
 3982 
 3983 
 3984 //----------ENCODING BLOCK-----------------------------------------------------
 3985 // This block specifies the encoding classes used by the compiler to
 3986 // output byte streams.  Encoding classes are parameterized macros
 3987 // used by Machine Instruction Nodes in order to generate the bit
 3988 // encoding of the instruction.  Operands specify their base encoding
 3989 // interface with the interface keyword.  There are currently
 3990 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
 3991 // COND_INTER.  REG_INTER causes an operand to generate a function
 3992 // which returns its register number when queried.  CONST_INTER causes
 3993 // an operand to generate a function which returns the value of the
 3994 // constant when queried.  MEMORY_INTER causes an operand to generate
 3995 // four functions which return the Base Register, the Index Register,
 3996 // the Scale Value, and the Offset Value of the operand when queried.
 3997 // COND_INTER causes an operand to generate six functions which return
 3998 // the encoding code (ie - encoding bits for the instruction)
 3999 // associated with each basic boolean condition for a conditional
 4000 // instruction.
 4001 //
 4002 // Instructions specify two basic values for encoding.  Again, a
 4003 // function is available to check if the constant displacement is an
 4004 // oop. They use the ins_encode keyword to specify their encoding
 4005 // classes (which must be a sequence of enc_class names, and their
 4006 // parameters, specified in the encoding block), and they use the
 4007 // opcode keyword to specify, in order, their primary, secondary, and
 4008 // tertiary opcode.  Only the opcode sections which a particular
 4009 // instruction needs for encoding need to be specified.
 4010 encode %{
 4011   // Build emit functions for each basic byte or larger field in the
 4012   // intel encoding scheme (opcode, rm, sib, immediate), and call them
 4013   // from C++ code in the enc_class source block.  Emit functions will
 4014   // live in the main source block for now.  In future, we can
 4015   // generalize this by adding a syntax that specifies the sizes of
 4016   // fields in an order, so that the adlc can build the emit functions
 4017   // automagically
 4018 
 4019   // catch all for unimplemented encodings
 4020   enc_class enc_unimplemented %{
 4021     MacroAssembler _masm(&cbuf);
 4022     __ unimplemented("C2 catch all");
 4023   %}
 4024 
 4025   // BEGIN Non-volatile memory access
 4026 
 4027   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
 4028     Register dst_reg = as_Register($dst$$reg);
 4029     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
 4030                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 4031   %}
 4032 
 4033   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
 4034     Register dst_reg = as_Register($dst$$reg);
 4035     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
 4036                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 4037   %}
 4038 
 4039   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
 4040     Register dst_reg = as_Register($dst$$reg);
 4041     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 4042                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 4043   %}
 4044 
 4045   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
 4046     Register dst_reg = as_Register($dst$$reg);
 4047     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 4048                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 4049   %}
 4050 
 4051   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
 4052     Register dst_reg = as_Register($dst$$reg);
 4053     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
 4054                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 4055   %}
 4056 
 4057   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
 4058     Register dst_reg = as_Register($dst$$reg);
 4059     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
 4060                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 4061   %}
 4062 
 4063   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
 4064     Register dst_reg = as_Register($dst$$reg);
 4065     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 4066                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 4067   %}
 4068 
 4069   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
 4070     Register dst_reg = as_Register($dst$$reg);
 4071     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 4072                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 4073   %}
 4074 
 4075   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
 4076     Register dst_reg = as_Register($dst$$reg);
 4077     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 4078                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 4079   %}
 4080 
 4081   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
 4082     Register dst_reg = as_Register($dst$$reg);
 4083     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 4084                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 4085   %}
 4086 
 4087   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
 4088     Register dst_reg = as_Register($dst$$reg);
 4089     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
 4090                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 4091   %}
 4092 
 4093   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
 4094     Register dst_reg = as_Register($dst$$reg);
 4095     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
 4096                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 4097   %}
 4098 
 4099   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
 4100     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 4101     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
 4102                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 4103   %}
 4104 
 4105   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
 4106     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 4107     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
 4108                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 4109   %}
 4110 
 4111   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
 4112     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 4113     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
 4114        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 4115   %}
 4116 
 4117   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
 4118     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 4119     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
 4120        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 4121   %}
 4122 
 4123   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
 4124     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 4125     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
 4126        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 4127   %}
 4128 
 4129   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
 4130     Register src_reg = as_Register($src$$reg);
 4131     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
 4132                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 4133   %}
 4134 
 4135   enc_class aarch64_enc_strb0(memory mem) %{
 4136     MacroAssembler _masm(&cbuf);
 4137     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
 4138                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 4139   %}
 4140 
 4141   enc_class aarch64_enc_strb0_ordered(memory mem) %{
 4142     MacroAssembler _masm(&cbuf);
 4143     __ membar(Assembler::StoreStore);
 4144     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
 4145                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 4146   %}
 4147 
 4148   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
 4149     Register src_reg = as_Register($src$$reg);
 4150     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
 4151                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 4152   %}
 4153 
 4154   enc_class aarch64_enc_strh0(memory mem) %{
 4155     MacroAssembler _masm(&cbuf);
 4156     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
 4157                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 4158   %}
 4159 
 4160   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
 4161     Register src_reg = as_Register($src$$reg);
 4162     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
 4163                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 4164   %}
 4165 
 4166   enc_class aarch64_enc_strw0(memory mem) %{
 4167     MacroAssembler _masm(&cbuf);
 4168     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
 4169                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 4170   %}
 4171 
 4172   enc_class aarch64_enc_str(iRegL src, memory mem) %{
 4173     Register src_reg = as_Register($src$$reg);
 4174     // we sometimes get asked to store the stack pointer into the
 4175     // current thread -- we cannot do that directly on AArch64
 4176     if (src_reg == r31_sp) {
 4177       MacroAssembler _masm(&cbuf);
 4178       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 4179       __ mov(rscratch2, sp);
 4180       src_reg = rscratch2;
 4181     }
 4182     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
 4183                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 4184   %}
 4185 
 4186   enc_class aarch64_enc_str0(memory mem) %{
 4187     MacroAssembler _masm(&cbuf);
 4188     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
 4189                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 4190   %}
 4191 
 4192   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
 4193     FloatRegister src_reg = as_FloatRegister($src$$reg);
 4194     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
 4195                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 4196   %}
 4197 
 4198   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
 4199     FloatRegister src_reg = as_FloatRegister($src$$reg);
 4200     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
 4201                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 4202   %}
 4203 
 4204   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
 4205     FloatRegister src_reg = as_FloatRegister($src$$reg);
 4206     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
 4207        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 4208   %}
 4209 
 4210   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
 4211     FloatRegister src_reg = as_FloatRegister($src$$reg);
 4212     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
 4213        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 4214   %}
 4215 
 4216   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
 4217     FloatRegister src_reg = as_FloatRegister($src$$reg);
 4218     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
 4219        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 4220   %}
 4221 
 4222   // END Non-volatile memory access
 4223 
 4224   // volatile loads and stores
 4225 
 4226   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
 4227     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 4228                  rscratch1, stlrb);
 4229   %}
 4230 
 4231   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
 4232     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 4233                  rscratch1, stlrh);
 4234   %}
 4235 
 4236   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
 4237     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 4238                  rscratch1, stlrw);
 4239   %}
 4240 
 4241 
 4242   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
 4243     Register dst_reg = as_Register($dst$$reg);
 4244     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 4245              rscratch1, ldarb);
 4246     __ sxtbw(dst_reg, dst_reg);
 4247   %}
 4248 
 4249   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
 4250     Register dst_reg = as_Register($dst$$reg);
 4251     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 4252              rscratch1, ldarb);
 4253     __ sxtb(dst_reg, dst_reg);
 4254   %}
 4255 
 4256   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
 4257     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 4258              rscratch1, ldarb);
 4259   %}
 4260 
 4261   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
 4262     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 4263              rscratch1, ldarb);
 4264   %}
 4265 
 4266   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
 4267     Register dst_reg = as_Register($dst$$reg);
 4268     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 4269              rscratch1, ldarh);
 4270     __ sxthw(dst_reg, dst_reg);
 4271   %}
 4272 
 4273   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
 4274     Register dst_reg = as_Register($dst$$reg);
 4275     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 4276              rscratch1, ldarh);
 4277     __ sxth(dst_reg, dst_reg);
 4278   %}
 4279 
 4280   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
 4281     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 4282              rscratch1, ldarh);
 4283   %}
 4284 
 4285   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
 4286     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 4287              rscratch1, ldarh);
 4288   %}
 4289 
 4290   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
 4291     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 4292              rscratch1, ldarw);
 4293   %}
 4294 
 4295   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
 4296     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 4297              rscratch1, ldarw);
 4298   %}
 4299 
 4300   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
 4301     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 4302              rscratch1, ldar);
 4303   %}
 4304 
 4305   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
 4306     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 4307              rscratch1, ldarw);
 4308     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
 4309   %}
 4310 
 4311   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
 4312     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 4313              rscratch1, ldar);
 4314     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
 4315   %}
 4316 
 4317   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
 4318     Register src_reg = as_Register($src$$reg);
 4319     // we sometimes get asked to store the stack pointer into the
 4320     // current thread -- we cannot do that directly on AArch64
 4321     if (src_reg == r31_sp) {
 4322         MacroAssembler _masm(&cbuf);
 4323       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 4324       __ mov(rscratch2, sp);
 4325       src_reg = rscratch2;
 4326     }
 4327     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 4328                  rscratch1, stlr);
 4329   %}
 4330 
 4331   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
 4332     {
 4333       MacroAssembler _masm(&cbuf);
 4334       FloatRegister src_reg = as_FloatRegister($src$$reg);
 4335       __ fmovs(rscratch2, src_reg);
 4336     }
 4337     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 4338                  rscratch1, stlrw);
 4339   %}
 4340 
 4341   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
 4342     {
 4343       MacroAssembler _masm(&cbuf);
 4344       FloatRegister src_reg = as_FloatRegister($src$$reg);
 4345       __ fmovd(rscratch2, src_reg);
 4346     }
 4347     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 4348                  rscratch1, stlr);
 4349   %}
 4350 
 4351   // synchronized read/update encodings
 4352 
 4353   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
 4354     MacroAssembler _masm(&cbuf);
 4355     Register dst_reg = as_Register($dst$$reg);
 4356     Register base = as_Register($mem$$base);
 4357     int index = $mem$$index;
 4358     int scale = $mem$$scale;
 4359     int disp = $mem$$disp;
 4360     if (index == -1) {
 4361        if (disp != 0) {
 4362         __ lea(rscratch1, Address(base, disp));
 4363         __ ldaxr(dst_reg, rscratch1);
 4364       } else {
 4365         // TODO
 4366         // should we ever get anything other than this case?
 4367         __ ldaxr(dst_reg, base);
 4368       }
 4369     } else {
 4370       Register index_reg = as_Register(index);
 4371       if (disp == 0) {
 4372         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
 4373         __ ldaxr(dst_reg, rscratch1);
 4374       } else {
 4375         __ lea(rscratch1, Address(base, disp));
 4376         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
 4377         __ ldaxr(dst_reg, rscratch1);
 4378       }
 4379     }
 4380   %}
 4381 
 4382   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
 4383     MacroAssembler _masm(&cbuf);
 4384     Register src_reg = as_Register($src$$reg);
 4385     Register base = as_Register($mem$$base);
 4386     int index = $mem$$index;
 4387     int scale = $mem$$scale;
 4388     int disp = $mem$$disp;
 4389     if (index == -1) {
 4390        if (disp != 0) {
 4391         __ lea(rscratch2, Address(base, disp));
 4392         __ stlxr(rscratch1, src_reg, rscratch2);
 4393       } else {
 4394         // TODO
 4395         // should we ever get anything other than this case?
 4396         __ stlxr(rscratch1, src_reg, base);
 4397       }
 4398     } else {
 4399       Register index_reg = as_Register(index);
 4400       if (disp == 0) {
 4401         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
 4402         __ stlxr(rscratch1, src_reg, rscratch2);
 4403       } else {
 4404         __ lea(rscratch2, Address(base, disp));
 4405         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
 4406         __ stlxr(rscratch1, src_reg, rscratch2);
 4407       }
 4408     }
 4409     __ cmpw(rscratch1, zr);
 4410   %}
 4411 
 4412   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 4413     MacroAssembler _masm(&cbuf);
 4414     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 4415     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 4416                Assembler::xword, /*acquire*/ false, /*release*/ true,
 4417                /*weak*/ false, noreg);
 4418   %}
 4419 
 4420   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 4421     MacroAssembler _masm(&cbuf);
 4422     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 4423     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 4424                Assembler::word, /*acquire*/ false, /*release*/ true,
 4425                /*weak*/ false, noreg);
 4426   %}
 4427 
 4428   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 4429     MacroAssembler _masm(&cbuf);
 4430     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 4431     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 4432                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 4433                /*weak*/ false, noreg);
 4434   %}
 4435 
 4436   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 4437     MacroAssembler _masm(&cbuf);
 4438     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 4439     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 4440                Assembler::byte, /*acquire*/ false, /*release*/ true,
 4441                /*weak*/ false, noreg);
 4442   %}
 4443 
 4444 
 4445   // The only difference between aarch64_enc_cmpxchg and
 4446   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
 4447   // CompareAndSwap sequence to serve as a barrier on acquiring a
 4448   // lock.
 4449   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 4450     MacroAssembler _masm(&cbuf);
 4451     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 4452     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 4453                Assembler::xword, /*acquire*/ true, /*release*/ true,
 4454                /*weak*/ false, noreg);
 4455   %}
 4456 
 4457   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 4458     MacroAssembler _masm(&cbuf);
 4459     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 4460     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 4461                Assembler::word, /*acquire*/ true, /*release*/ true,
 4462                /*weak*/ false, noreg);
 4463   %}
 4464 
 4465 
 4466   // auxiliary used for CompareAndSwapX to set result register
 4467   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
 4468     MacroAssembler _masm(&cbuf);
 4469     Register res_reg = as_Register($res$$reg);
 4470     __ cset(res_reg, Assembler::EQ);
 4471   %}
 4472 
 4473   // prefetch encodings
 4474 
 4475   enc_class aarch64_enc_prefetchw(memory mem) %{
 4476     MacroAssembler _masm(&cbuf);
 4477     Register base = as_Register($mem$$base);
 4478     int index = $mem$$index;
 4479     int scale = $mem$$scale;
 4480     int disp = $mem$$disp;
 4481     if (index == -1) {
 4482       __ prfm(Address(base, disp), PSTL1KEEP);
 4483     } else {
 4484       Register index_reg = as_Register(index);
 4485       if (disp == 0) {
 4486         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
 4487       } else {
 4488         __ lea(rscratch1, Address(base, disp));
 4489 	__ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
 4490       }
 4491     }
 4492   %}
 4493 
 4494   /// mov envcodings
 4495 
 4496   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
 4497     MacroAssembler _masm(&cbuf);
 4498     u_int32_t con = (u_int32_t)$src$$constant;
 4499     Register dst_reg = as_Register($dst$$reg);
 4500     if (con == 0) {
 4501       __ movw(dst_reg, zr);
 4502     } else {
 4503       __ movw(dst_reg, con);
 4504     }
 4505   %}
 4506 
 4507   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
 4508     MacroAssembler _masm(&cbuf);
 4509     Register dst_reg = as_Register($dst$$reg);
 4510     u_int64_t con = (u_int64_t)$src$$constant;
 4511     if (con == 0) {
 4512       __ mov(dst_reg, zr);
 4513     } else {
 4514       __ mov(dst_reg, con);
 4515     }
 4516   %}
 4517 
 4518   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
 4519     MacroAssembler _masm(&cbuf);
 4520     Register dst_reg = as_Register($dst$$reg);
 4521     address con = (address)$src$$constant;
 4522     if (con == NULL || con == (address)1) {
 4523       ShouldNotReachHere();
 4524     } else {
 4525       relocInfo::relocType rtype = $src->constant_reloc();
 4526       if (rtype == relocInfo::oop_type) {
 4527         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
 4528       } else if (rtype == relocInfo::metadata_type) {
 4529         __ mov_metadata(dst_reg, (Metadata*)con);
 4530       } else {
 4531         assert(rtype == relocInfo::none, "unexpected reloc type");
 4532         if (con < (address)(uintptr_t)os::vm_page_size()) {
 4533           __ mov(dst_reg, con);
 4534         } else {
 4535           unsigned long offset;
 4536           __ adrp(dst_reg, con, offset);
 4537           __ add(dst_reg, dst_reg, offset);
 4538         }
 4539       }
 4540     }
 4541   %}
 4542 
 4543   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
 4544     MacroAssembler _masm(&cbuf);
 4545     Register dst_reg = as_Register($dst$$reg);
 4546     __ mov(dst_reg, zr);
 4547   %}
 4548 
 4549   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
 4550     MacroAssembler _masm(&cbuf);
 4551     Register dst_reg = as_Register($dst$$reg);
 4552     __ mov(dst_reg, (u_int64_t)1);
 4553   %}
 4554 
 4555   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
 4556     MacroAssembler _masm(&cbuf);
 4557     address page = (address)$src$$constant;
 4558     Register dst_reg = as_Register($dst$$reg);
 4559     unsigned long off;
 4560     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
 4561     assert(off == 0, "assumed offset == 0");
 4562   %}
 4563 
 4564   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
 4565     MacroAssembler _masm(&cbuf);
 4566     __ load_byte_map_base($dst$$Register);
 4567   %}
 4568 
 4569   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
 4570     MacroAssembler _masm(&cbuf);
 4571     Register dst_reg = as_Register($dst$$reg);
 4572     address con = (address)$src$$constant;
 4573     if (con == NULL) {
 4574       ShouldNotReachHere();
 4575     } else {
 4576       relocInfo::relocType rtype = $src->constant_reloc();
 4577       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
 4578       __ set_narrow_oop(dst_reg, (jobject)con);
 4579     }
 4580   %}
 4581 
 4582   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
 4583     MacroAssembler _masm(&cbuf);
 4584     Register dst_reg = as_Register($dst$$reg);
 4585     __ mov(dst_reg, zr);
 4586   %}
 4587 
 4588   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
 4589     MacroAssembler _masm(&cbuf);
 4590     Register dst_reg = as_Register($dst$$reg);
 4591     address con = (address)$src$$constant;
 4592     if (con == NULL) {
 4593       ShouldNotReachHere();
 4594     } else {
 4595       relocInfo::relocType rtype = $src->constant_reloc();
 4596       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
 4597       __ set_narrow_klass(dst_reg, (Klass *)con);
 4598     }
 4599   %}
 4600 
 4601   // arithmetic encodings
 4602 
 4603   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
 4604     MacroAssembler _masm(&cbuf);
 4605     Register dst_reg = as_Register($dst$$reg);
 4606     Register src_reg = as_Register($src1$$reg);
 4607     int32_t con = (int32_t)$src2$$constant;
 4608     // add has primary == 0, subtract has primary == 1
 4609     if ($primary) { con = -con; }
 4610     if (con < 0) {
 4611       __ subw(dst_reg, src_reg, -con);
 4612     } else {
 4613       __ addw(dst_reg, src_reg, con);
 4614     }
 4615   %}
 4616 
 4617   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
 4618     MacroAssembler _masm(&cbuf);
 4619     Register dst_reg = as_Register($dst$$reg);
 4620     Register src_reg = as_Register($src1$$reg);
 4621     int32_t con = (int32_t)$src2$$constant;
 4622     // add has primary == 0, subtract has primary == 1
 4623     if ($primary) { con = -con; }
 4624     if (con < 0) {
 4625       __ sub(dst_reg, src_reg, -con);
 4626     } else {
 4627       __ add(dst_reg, src_reg, con);
 4628     }
 4629   %}
 4630 
 4631   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
 4632     MacroAssembler _masm(&cbuf);
 4633    Register dst_reg = as_Register($dst$$reg);
 4634    Register src1_reg = as_Register($src1$$reg);
 4635    Register src2_reg = as_Register($src2$$reg);
 4636     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
 4637   %}
 4638 
 4639   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
 4640     MacroAssembler _masm(&cbuf);
 4641    Register dst_reg = as_Register($dst$$reg);
 4642    Register src1_reg = as_Register($src1$$reg);
 4643    Register src2_reg = as_Register($src2$$reg);
 4644     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
 4645   %}
 4646 
 4647   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
 4648     MacroAssembler _masm(&cbuf);
 4649    Register dst_reg = as_Register($dst$$reg);
 4650    Register src1_reg = as_Register($src1$$reg);
 4651    Register src2_reg = as_Register($src2$$reg);
 4652     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
 4653   %}
 4654 
 4655   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
 4656     MacroAssembler _masm(&cbuf);
 4657    Register dst_reg = as_Register($dst$$reg);
 4658    Register src1_reg = as_Register($src1$$reg);
 4659    Register src2_reg = as_Register($src2$$reg);
 4660     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
 4661   %}
 4662 
 4663   // compare instruction encodings
 4664 
 4665   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
 4666     MacroAssembler _masm(&cbuf);
 4667     Register reg1 = as_Register($src1$$reg);
 4668     Register reg2 = as_Register($src2$$reg);
 4669     __ cmpw(reg1, reg2);
 4670   %}
 4671 
 4672   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
 4673     MacroAssembler _masm(&cbuf);
 4674     Register reg = as_Register($src1$$reg);
 4675     int32_t val = $src2$$constant;
 4676     if (val >= 0) {
 4677       __ subsw(zr, reg, val);
 4678     } else {
 4679       __ addsw(zr, reg, -val);
 4680     }
 4681   %}
 4682 
 4683   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
 4684     MacroAssembler _masm(&cbuf);
 4685     Register reg1 = as_Register($src1$$reg);
 4686     u_int32_t val = (u_int32_t)$src2$$constant;
 4687     __ movw(rscratch1, val);
 4688     __ cmpw(reg1, rscratch1);
 4689   %}
 4690 
 4691   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
 4692     MacroAssembler _masm(&cbuf);
 4693     Register reg1 = as_Register($src1$$reg);
 4694     Register reg2 = as_Register($src2$$reg);
 4695     __ cmp(reg1, reg2);
 4696   %}
 4697 
 4698   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
 4699     MacroAssembler _masm(&cbuf);
 4700     Register reg = as_Register($src1$$reg);
 4701     int64_t val = $src2$$constant;
 4702     if (val >= 0) {
 4703       __ subs(zr, reg, val);
 4704     } else if (val != -val) {
 4705       __ adds(zr, reg, -val);
 4706     } else {
 4707     // aargh, Long.MIN_VALUE is a special case
 4708       __ orr(rscratch1, zr, (u_int64_t)val);
 4709       __ subs(zr, reg, rscratch1);
 4710     }
 4711   %}
 4712 
 4713   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
 4714     MacroAssembler _masm(&cbuf);
 4715     Register reg1 = as_Register($src1$$reg);
 4716     u_int64_t val = (u_int64_t)$src2$$constant;
 4717     __ mov(rscratch1, val);
 4718     __ cmp(reg1, rscratch1);
 4719   %}
 4720 
 4721   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
 4722     MacroAssembler _masm(&cbuf);
 4723     Register reg1 = as_Register($src1$$reg);
 4724     Register reg2 = as_Register($src2$$reg);
 4725     __ cmp(reg1, reg2);
 4726   %}
 4727 
 4728   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
 4729     MacroAssembler _masm(&cbuf);
 4730     Register reg1 = as_Register($src1$$reg);
 4731     Register reg2 = as_Register($src2$$reg);
 4732     __ cmpw(reg1, reg2);
 4733   %}
 4734 
 4735   enc_class aarch64_enc_testp(iRegP src) %{
 4736     MacroAssembler _masm(&cbuf);
 4737     Register reg = as_Register($src$$reg);
 4738     __ cmp(reg, zr);
 4739   %}
 4740 
 4741   enc_class aarch64_enc_testn(iRegN src) %{
 4742     MacroAssembler _masm(&cbuf);
 4743     Register reg = as_Register($src$$reg);
 4744     __ cmpw(reg, zr);
 4745   %}
 4746 
 4747   enc_class aarch64_enc_b(label lbl) %{
 4748     MacroAssembler _masm(&cbuf);
 4749     Label *L = $lbl$$label;
 4750     __ b(*L);
 4751   %}
 4752 
 4753   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
 4754     MacroAssembler _masm(&cbuf);
 4755     Label *L = $lbl$$label;
 4756     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 4757   %}
 4758 
 4759   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
 4760     MacroAssembler _masm(&cbuf);
 4761     Label *L = $lbl$$label;
 4762     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 4763   %}
 4764 
 4765   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
 4766   %{
 4767      Register sub_reg = as_Register($sub$$reg);
 4768      Register super_reg = as_Register($super$$reg);
 4769      Register temp_reg = as_Register($temp$$reg);
 4770      Register result_reg = as_Register($result$$reg);
 4771 
 4772      Label miss;
 4773      MacroAssembler _masm(&cbuf);
 4774      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
 4775                                      NULL, &miss,
 4776                                      /*set_cond_codes:*/ true);
 4777      if ($primary) {
 4778        __ mov(result_reg, zr);
 4779      }
 4780      __ bind(miss);
 4781   %}
 4782 
 4783   enc_class aarch64_enc_java_static_call(method meth) %{
 4784     MacroAssembler _masm(&cbuf);
 4785 
 4786     address addr = (address)$meth$$method;
 4787     address call;
 4788     if (!_method) {
 4789       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
 4790       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
 4791     } else {
 4792       int method_index = resolved_method_index(cbuf);
 4793       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
 4794                                                   : static_call_Relocation::spec(method_index);
 4795       call = __ trampoline_call(Address(addr, rspec), &cbuf);
 4796 
 4797       // Emit stub for static call
 4798       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
 4799       if (stub == NULL) {
 4800         ciEnv::current()->record_failure("CodeCache is full");
 4801         return;
 4802       }
 4803     }
 4804     if (call == NULL) {
 4805       ciEnv::current()->record_failure("CodeCache is full");
 4806       return;
 4807     }
 4808   %}
 4809 
 4810   enc_class aarch64_enc_java_dynamic_call(method meth) %{
 4811     MacroAssembler _masm(&cbuf);
 4812     int method_index = resolved_method_index(cbuf);
 4813     address call = __ ic_call((address)$meth$$method, method_index);
 4814     if (call == NULL) {
 4815       ciEnv::current()->record_failure("CodeCache is full");
 4816       return;
 4817     }
 4818   %}
 4819 
 4820   enc_class aarch64_enc_call_epilog() %{
 4821     MacroAssembler _masm(&cbuf);
 4822     if (VerifyStackAtCalls) {
 4823       // Check that stack depth is unchanged: find majik cookie on stack
 4824       __ call_Unimplemented();
 4825     }
 4826   %}
 4827 
 4828   enc_class aarch64_enc_java_to_runtime(method meth) %{
 4829     MacroAssembler _masm(&cbuf);
 4830 
 4831     // some calls to generated routines (arraycopy code) are scheduled
 4832     // by C2 as runtime calls. if so we can call them using a br (they
 4833     // will be in a reachable segment) otherwise we have to use a blrt
 4834     // which loads the absolute address into a register.
 4835     address entry = (address)$meth$$method;
 4836     CodeBlob *cb = CodeCache::find_blob(entry);
 4837     if (cb) {
 4838       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
 4839       if (call == NULL) {
 4840         ciEnv::current()->record_failure("CodeCache is full");
 4841         return;
 4842       }
 4843     } else {
 4844       int gpcnt;
 4845       int fpcnt;
 4846       int rtype;
 4847       getCallInfo(tf(), gpcnt, fpcnt, rtype);
 4848       Label retaddr;
 4849       __ adr(rscratch2, retaddr);
 4850       __ lea(rscratch1, RuntimeAddress(entry));
 4851       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
 4852       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
 4853       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
 4854       __ bind(retaddr);
 4855       __ add(sp, sp, 2 * wordSize);
 4856     }
 4857   %}
 4858 
 4859   enc_class aarch64_enc_rethrow() %{
 4860     MacroAssembler _masm(&cbuf);
 4861     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
 4862   %}
 4863 
 4864   enc_class aarch64_enc_ret() %{
 4865     MacroAssembler _masm(&cbuf);
 4866     __ ret(lr);
 4867   %}
 4868 
 4869   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
 4870     MacroAssembler _masm(&cbuf);
 4871     Register target_reg = as_Register($jump_target$$reg);
 4872     __ br(target_reg);
 4873   %}
 4874 
 4875   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
 4876     MacroAssembler _masm(&cbuf);
 4877     Register target_reg = as_Register($jump_target$$reg);
 4878     // exception oop should be in r0
 4879     // ret addr has been popped into lr
 4880     // callee expects it in r3
 4881     __ mov(r3, lr);
 4882     __ br(target_reg);
 4883   %}
 4884 
 4885   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
 4886     MacroAssembler _masm(&cbuf);
 4887     Register oop = as_Register($object$$reg);
 4888     Register box = as_Register($box$$reg);
 4889     Register disp_hdr = as_Register($tmp$$reg);
 4890     Register tmp = as_Register($tmp2$$reg);
 4891     Label cont;
 4892     Label object_has_monitor;
 4893     Label cas_failed;
 4894 
 4895     assert_different_registers(oop, box, tmp, disp_hdr);
 4896 
 4897     // Load markOop from object into displaced_header.
 4898     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
 4899 
 4900     // Always do locking in runtime.
 4901     if (EmitSync & 0x01) {
 4902       __ cmp(oop, zr);
 4903       return;
 4904     }
 4905 
 4906     if (UseBiasedLocking && !UseOptoBiasInlining) {
 4907       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
 4908     }
 4909 
 4910     // Handle existing monitor
 4911     if ((EmitSync & 0x02) == 0) {
 4912       // we can use AArch64's bit test and branch here but
 4913       // markoopDesc does not define a bit index just the bit value
 4914       // so assert in case the bit pos changes
 4915 #     define __monitor_value_log2 1
 4916       assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
 4917       __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
 4918 #     undef __monitor_value_log2
 4919     }
 4920 
 4921     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
 4922     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
 4923 
 4924     // Load Compare Value application register.
 4925 
 4926     // Initialize the box. (Must happen before we update the object mark!)
 4927     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 4928 
 4929     // Compare object markOop with mark and if equal exchange scratch1
 4930     // with object markOop.
 4931     if (UseLSE) {
 4932       __ mov(tmp, disp_hdr);
 4933       __ casal(Assembler::xword, tmp, box, oop);
 4934       __ cmp(tmp, disp_hdr);
 4935       __ br(Assembler::EQ, cont);
 4936     } else {
 4937       Label retry_load;
 4938       if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
 4939         __ prfm(Address(oop), PSTL1STRM);
 4940       __ bind(retry_load);
 4941       __ ldaxr(tmp, oop);
 4942       __ cmp(tmp, disp_hdr);
 4943       __ br(Assembler::NE, cas_failed);
 4944       // use stlxr to ensure update is immediately visible
 4945       __ stlxr(tmp, box, oop);
 4946       __ cbzw(tmp, cont);
 4947       __ b(retry_load);
 4948     }
 4949 
 4950     // Formerly:
 4951     // __ cmpxchgptr(/*oldv=*/disp_hdr,
 4952     //               /*newv=*/box,
 4953     //               /*addr=*/oop,
 4954     //               /*tmp=*/tmp,
 4955     //               cont,
 4956     //               /*fail*/NULL);
 4957 
 4958     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
 4959 
 4960     // If the compare-and-exchange succeeded, then we found an unlocked
 4961     // object, will have now locked it will continue at label cont
 4962 
 4963     __ bind(cas_failed);
 4964     // We did not see an unlocked object so try the fast recursive case.
 4965 
 4966     // Check if the owner is self by comparing the value in the
 4967     // markOop of object (disp_hdr) with the stack pointer.
 4968     __ mov(rscratch1, sp);
 4969     __ sub(disp_hdr, disp_hdr, rscratch1);
 4970     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
 4971     // If condition is true we are cont and hence we can store 0 as the
 4972     // displaced header in the box, which indicates that it is a recursive lock.
 4973     __ ands(tmp/*==0?*/, disp_hdr, tmp);
 4974     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 4975 
 4976     // Handle existing monitor.
 4977     if ((EmitSync & 0x02) == 0) {
 4978       __ b(cont);
 4979 
 4980       __ bind(object_has_monitor);
 4981       // The object's monitor m is unlocked iff m->owner == NULL,
 4982       // otherwise m->owner may contain a thread or a stack address.
 4983       //
 4984       // Try to CAS m->owner from NULL to current thread.
 4985       __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
 4986       __ mov(disp_hdr, zr);
 4987 
 4988       if (UseLSE) {
 4989         __ mov(rscratch1, disp_hdr);
 4990         __ casal(Assembler::xword, rscratch1, rthread, tmp);
 4991         __ cmp(rscratch1, disp_hdr);
 4992       } else {
 4993         Label retry_load, fail;
 4994         if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
 4995           __ prfm(Address(tmp), PSTL1STRM);
 4996         __ bind(retry_load);
 4997         __ ldaxr(rscratch1, tmp);
 4998         __ cmp(disp_hdr, rscratch1);
 4999         __ br(Assembler::NE, fail);
 5000         // use stlxr to ensure update is immediately visible
 5001         __ stlxr(rscratch1, rthread, tmp);
 5002         __ cbnzw(rscratch1, retry_load);
 5003         __ bind(fail);
 5004       }
 5005 
 5006       // Label next;
 5007       // __ cmpxchgptr(/*oldv=*/disp_hdr,
 5008       //               /*newv=*/rthread,
 5009       //               /*addr=*/tmp,
 5010       //               /*tmp=*/rscratch1,
 5011       //               /*succeed*/next,
 5012       //               /*fail*/NULL);
 5013       // __ bind(next);
 5014 
 5015       // store a non-null value into the box.
 5016       __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 5017 
 5018       // PPC port checks the following invariants
 5019       // #ifdef ASSERT
 5020       // bne(flag, cont);
 5021       // We have acquired the monitor, check some invariants.
 5022       // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
 5023       // Invariant 1: _recursions should be 0.
 5024       // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
 5025       // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
 5026       //                        "monitor->_recursions should be 0", -1);
 5027       // Invariant 2: OwnerIsThread shouldn't be 0.
 5028       // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
 5029       //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
 5030       //                           "monitor->OwnerIsThread shouldn't be 0", -1);
 5031       // #endif
 5032     }
 5033 
 5034     __ bind(cont);
 5035     // flag == EQ indicates success
 5036     // flag == NE indicates failure
 5037 
 5038   %}
 5039 
 5040   // TODO
 5041   // reimplement this with custom cmpxchgptr code
 5042   // which avoids some of the unnecessary branching
 5043   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
 5044     MacroAssembler _masm(&cbuf);
 5045     Register oop = as_Register($object$$reg);
 5046     Register box = as_Register($box$$reg);
 5047     Register disp_hdr = as_Register($tmp$$reg);
 5048     Register tmp = as_Register($tmp2$$reg);
 5049     Label cont;
 5050     Label object_has_monitor;
 5051     Label cas_failed;
 5052 
 5053     assert_different_registers(oop, box, tmp, disp_hdr);
 5054 
 5055     // Always do locking in runtime.
 5056     if (EmitSync & 0x01) {
 5057       __ cmp(oop, zr); // Oop can't be 0 here => always false.
 5058       return;
 5059     }
 5060 
 5061     if (UseBiasedLocking && !UseOptoBiasInlining) {
 5062       __ biased_locking_exit(oop, tmp, cont);
 5063     }
 5064 
 5065     // Find the lock address and load the displaced header from the stack.
 5066     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 5067 
 5068     // If the displaced header is 0, we have a recursive unlock.
 5069     __ cmp(disp_hdr, zr);
 5070     __ br(Assembler::EQ, cont);
 5071 
 5072 
 5073     // Handle existing monitor.
 5074     if ((EmitSync & 0x02) == 0) {
 5075       __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
 5076       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
 5077     }
 5078 
 5079     // Check if it is still a light weight lock, this is is true if we
 5080     // see the stack address of the basicLock in the markOop of the
 5081     // object.
 5082 
 5083       if (UseLSE) {
 5084         __ mov(tmp, box);
 5085         __ casl(Assembler::xword, tmp, disp_hdr, oop);
 5086         __ cmp(tmp, box);
 5087       } else {
 5088         Label retry_load;
 5089         if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
 5090           __ prfm(Address(oop), PSTL1STRM);
 5091         __ bind(retry_load);
 5092         __ ldxr(tmp, oop);
 5093         __ cmp(box, tmp);
 5094         __ br(Assembler::NE, cas_failed);
 5095         // use stlxr to ensure update is immediately visible
 5096         __ stlxr(tmp, disp_hdr, oop);
 5097         __ cbzw(tmp, cont);
 5098         __ b(retry_load);
 5099       }
 5100 
 5101     // __ cmpxchgptr(/*compare_value=*/box,
 5102     //               /*exchange_value=*/disp_hdr,
 5103     //               /*where=*/oop,
 5104     //               /*result=*/tmp,
 5105     //               cont,
 5106     //               /*cas_failed*/NULL);
 5107     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
 5108 
 5109     __ bind(cas_failed);
 5110 
 5111     // Handle existing monitor.
 5112     if ((EmitSync & 0x02) == 0) {
 5113       __ b(cont);
 5114 
 5115       __ bind(object_has_monitor);
 5116       __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
 5117       __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
 5118       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
 5119       __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
 5120       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
 5121       __ cmp(rscratch1, zr);
 5122       __ br(Assembler::NE, cont);
 5123 
 5124       __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
 5125       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
 5126       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
 5127       __ cmp(rscratch1, zr);
 5128       __ cbnz(rscratch1, cont);
 5129       // need a release store here
 5130       __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
 5131       __ stlr(rscratch1, tmp); // rscratch1 is zero
 5132     }
 5133 
 5134     __ bind(cont);
 5135     // flag == EQ indicates success
 5136     // flag == NE indicates failure
 5137   %}
 5138 
 5139 %}
 5140 
 5141 //----------FRAME--------------------------------------------------------------
 5142 // Definition of frame structure and management information.
 5143 //
 5144 //  S T A C K   L A Y O U T    Allocators stack-slot number
 5145 //                             |   (to get allocators register number
 5146 //  G  Owned by    |        |  v    add OptoReg::stack0())
 5147 //  r   CALLER     |        |
 5148 //  o     |        +--------+      pad to even-align allocators stack-slot
 5149 //  w     V        |  pad0  |        numbers; owned by CALLER
 5150 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
 5151 //  h     ^        |   in   |  5
 5152 //        |        |  args  |  4   Holes in incoming args owned by SELF
 5153 //  |     |        |        |  3
 5154 //  |     |        +--------+
 5155 //  V     |        | old out|      Empty on Intel, window on Sparc
 5156 //        |    old |preserve|      Must be even aligned.
 5157 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
 5158 //        |        |   in   |  3   area for Intel ret address
 5159 //     Owned by    |preserve|      Empty on Sparc.
 5160 //       SELF      +--------+
 5161 //        |        |  pad2  |  2   pad to align old SP
 5162 //        |        +--------+  1
 5163 //        |        | locks  |  0
 5164 //        |        +--------+----> OptoReg::stack0(), even aligned
 5165 //        |        |  pad1  | 11   pad to align new SP
 5166 //        |        +--------+
 5167 //        |        |        | 10
 5168 //        |        | spills |  9   spills
 5169 //        V        |        |  8   (pad0 slot for callee)
 5170 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
 5171 //        ^        |  out   |  7
 5172 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
 5173 //     Owned by    +--------+
 5174 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
 5175 //        |    new |preserve|      Must be even-aligned.
 5176 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
 5177 //        |        |        |
 5178 //
 5179 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
 5180 //         known from SELF's arguments and the Java calling convention.
 5181 //         Region 6-7 is determined per call site.
 5182 // Note 2: If the calling convention leaves holes in the incoming argument
 5183 //         area, those holes are owned by SELF.  Holes in the outgoing area
 5184 //         are owned by the CALLEE.  Holes should not be nessecary in the
 5185 //         incoming area, as the Java calling convention is completely under
 5186 //         the control of the AD file.  Doubles can be sorted and packed to
 5187 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
 5188 //         varargs C calling conventions.
 5189 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
 5190 //         even aligned with pad0 as needed.
 5191 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
 5192 //           (the latter is true on Intel but is it false on AArch64?)
 5193 //         region 6-11 is even aligned; it may be padded out more so that
 5194 //         the region from SP to FP meets the minimum stack alignment.
 5195 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
 5196 //         alignment.  Region 11, pad1, may be dynamically extended so that
 5197 //         SP meets the minimum alignment.
 5198 
 5199 frame %{
 5200   // What direction does stack grow in (assumed to be same for C & Java)
 5201   stack_direction(TOWARDS_LOW);
 5202 
 5203   // These three registers define part of the calling convention
 5204   // between compiled code and the interpreter.
 5205 
 5206   // Inline Cache Register or methodOop for I2C.
 5207   inline_cache_reg(R12);
 5208 
 5209   // Method Oop Register when calling interpreter.
 5210   interpreter_method_oop_reg(R12);
 5211 
 5212   // Number of stack slots consumed by locking an object
 5213   sync_stack_slots(2);
 5214 
 5215   // Compiled code's Frame Pointer
 5216   frame_pointer(R31);
 5217 
 5218   // Interpreter stores its frame pointer in a register which is
 5219   // stored to the stack by I2CAdaptors.
 5220   // I2CAdaptors convert from interpreted java to compiled java.
 5221   interpreter_frame_pointer(R29);
 5222 
 5223   // Stack alignment requirement
 5224   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
 5225 
 5226   // Number of stack slots between incoming argument block and the start of
 5227   // a new frame.  The PROLOG must add this many slots to the stack.  The
 5228   // EPILOG must remove this many slots. aarch64 needs two slots for
 5229   // return address and fp.
 5230   // TODO think this is correct but check
 5231   in_preserve_stack_slots(4);
 5232 
 5233   // Number of outgoing stack slots killed above the out_preserve_stack_slots
 5234   // for calls to C.  Supports the var-args backing area for register parms.
 5235   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
 5236 
 5237   // The after-PROLOG location of the return address.  Location of
 5238   // return address specifies a type (REG or STACK) and a number
 5239   // representing the register number (i.e. - use a register name) or
 5240   // stack slot.
 5241   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
 5242   // Otherwise, it is above the locks and verification slot and alignment word
 5243   // TODO this may well be correct but need to check why that - 2 is there
 5244   // ppc port uses 0 but we definitely need to allow for fixed_slots
 5245   // which folds in the space used for monitors
 5246   return_addr(STACK - 2 +
 5247               align_up((Compile::current()->in_preserve_stack_slots() +
 5248                         Compile::current()->fixed_slots()),
 5249                        stack_alignment_in_slots()));
 5250 
 5251   // Body of function which returns an integer array locating
 5252   // arguments either in registers or in stack slots.  Passed an array
 5253   // of ideal registers called "sig" and a "length" count.  Stack-slot
 5254   // offsets are based on outgoing arguments, i.e. a CALLER setting up
 5255   // arguments for a CALLEE.  Incoming stack arguments are
 5256   // automatically biased by the preserve_stack_slots field above.
 5257 
 5258   calling_convention
 5259   %{
 5260     // No difference between ingoing/outgoing just pass false
 5261     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
 5262   %}
 5263 
 5264   c_calling_convention
 5265   %{
 5266     // This is obviously always outgoing
 5267     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
 5268   %}
 5269 
 5270   // Location of compiled Java return values.  Same as C for now.
 5271   return_value
 5272   %{
 5273     // TODO do we allow ideal_reg == Op_RegN???
 5274     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
 5275            "only return normal values");
 5276 
 5277     static const int lo[Op_RegL + 1] = { // enum name
 5278       0,                                 // Op_Node
 5279       0,                                 // Op_Set
 5280       R0_num,                            // Op_RegN
 5281       R0_num,                            // Op_RegI
 5282       R0_num,                            // Op_RegP
 5283       V0_num,                            // Op_RegF
 5284       V0_num,                            // Op_RegD
 5285       R0_num                             // Op_RegL
 5286     };
 5287 
 5288     static const int hi[Op_RegL + 1] = { // enum name
 5289       0,                                 // Op_Node
 5290       0,                                 // Op_Set
 5291       OptoReg::Bad,                       // Op_RegN
 5292       OptoReg::Bad,                      // Op_RegI
 5293       R0_H_num,                          // Op_RegP
 5294       OptoReg::Bad,                      // Op_RegF
 5295       V0_H_num,                          // Op_RegD
 5296       R0_H_num                           // Op_RegL
 5297     };
 5298 
 5299     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
 5300   %}
 5301 %}
 5302 
 5303 //----------ATTRIBUTES---------------------------------------------------------
 5304 //----------Operand Attributes-------------------------------------------------
 5305 op_attrib op_cost(1);        // Required cost attribute
 5306 
 5307 //----------Instruction Attributes---------------------------------------------
 5308 ins_attrib ins_cost(INSN_COST); // Required cost attribute
 5309 ins_attrib ins_size(32);        // Required size attribute (in bits)
 5310 ins_attrib ins_short_branch(0); // Required flag: is this instruction
 5311                                 // a non-matching short branch variant
 5312                                 // of some long branch?
 5313 ins_attrib ins_alignment(4);    // Required alignment attribute (must
 5314                                 // be a power of 2) specifies the
 5315                                 // alignment that some part of the
 5316                                 // instruction (not necessarily the
 5317                                 // start) requires.  If > 1, a
 5318                                 // compute_padding() function must be
 5319                                 // provided for the instruction
 5320 
 5321 //----------OPERANDS-----------------------------------------------------------
 5322 // Operand definitions must precede instruction definitions for correct parsing
 5323 // in the ADLC because operands constitute user defined types which are used in
 5324 // instruction definitions.
 5325 
 5326 //----------Simple Operands----------------------------------------------------
 5327 
 5328 // Integer operands 32 bit
 5329 // 32 bit immediate
 5330 operand immI()
 5331 %{
 5332   match(ConI);
 5333 
 5334   op_cost(0);
 5335   format %{ %}
 5336   interface(CONST_INTER);
 5337 %}
 5338 
 5339 // 32 bit zero
 5340 operand immI0()
 5341 %{
 5342   predicate(n->get_int() == 0);
 5343   match(ConI);
 5344 
 5345   op_cost(0);
 5346   format %{ %}
 5347   interface(CONST_INTER);
 5348 %}
 5349 
 5350 // 32 bit unit increment
 5351 operand immI_1()
 5352 %{
 5353   predicate(n->get_int() == 1);
 5354   match(ConI);
 5355 
 5356   op_cost(0);
 5357   format %{ %}
 5358   interface(CONST_INTER);
 5359 %}
 5360 
 5361 // 32 bit unit decrement
 5362 operand immI_M1()
 5363 %{
 5364   predicate(n->get_int() == -1);
 5365   match(ConI);
 5366 
 5367   op_cost(0);
 5368   format %{ %}
 5369   interface(CONST_INTER);
 5370 %}
 5371 
 5372 // Shift values for add/sub extension shift
 5373 operand immIExt()
 5374 %{
 5375   predicate(0 <= n->get_int() && (n->get_int() <= 4));
 5376   match(ConI);
 5377 
 5378   op_cost(0);
 5379   format %{ %}
 5380   interface(CONST_INTER);
 5381 %}
 5382 
 5383 operand immI_le_4()
 5384 %{
 5385   predicate(n->get_int() <= 4);
 5386   match(ConI);
 5387 
 5388   op_cost(0);
 5389   format %{ %}
 5390   interface(CONST_INTER);
 5391 %}
 5392 
 5393 operand immI_31()
 5394 %{
 5395   predicate(n->get_int() == 31);
 5396   match(ConI);
 5397 
 5398   op_cost(0);
 5399   format %{ %}
 5400   interface(CONST_INTER);
 5401 %}
 5402 
 5403 operand immI_8()
 5404 %{
 5405   predicate(n->get_int() == 8);
 5406   match(ConI);
 5407 
 5408   op_cost(0);
 5409   format %{ %}
 5410   interface(CONST_INTER);
 5411 %}
 5412 
 5413 operand immI_16()
 5414 %{
 5415   predicate(n->get_int() == 16);
 5416   match(ConI);
 5417 
 5418   op_cost(0);
 5419   format %{ %}
 5420   interface(CONST_INTER);
 5421 %}
 5422 
 5423 operand immI_24()
 5424 %{
 5425   predicate(n->get_int() == 24);
 5426   match(ConI);
 5427 
 5428   op_cost(0);
 5429   format %{ %}
 5430   interface(CONST_INTER);
 5431 %}
 5432 
 5433 operand immI_32()
 5434 %{
 5435   predicate(n->get_int() == 32);
 5436   match(ConI);
 5437 
 5438   op_cost(0);
 5439   format %{ %}
 5440   interface(CONST_INTER);
 5441 %}
 5442 
 5443 operand immI_48()
 5444 %{
 5445   predicate(n->get_int() == 48);
 5446   match(ConI);
 5447 
 5448   op_cost(0);
 5449   format %{ %}
 5450   interface(CONST_INTER);
 5451 %}
 5452 
 5453 operand immI_56()
 5454 %{
 5455   predicate(n->get_int() == 56);
 5456   match(ConI);
 5457 
 5458   op_cost(0);
 5459   format %{ %}
 5460   interface(CONST_INTER);
 5461 %}
 5462 
 5463 operand immI_63()
 5464 %{
 5465   predicate(n->get_int() == 63);
 5466   match(ConI);
 5467 
 5468   op_cost(0);
 5469   format %{ %}
 5470   interface(CONST_INTER);
 5471 %}
 5472 
 5473 operand immI_64()
 5474 %{
 5475   predicate(n->get_int() == 64);
 5476   match(ConI);
 5477 
 5478   op_cost(0);
 5479   format %{ %}
 5480   interface(CONST_INTER);
 5481 %}
 5482 
 5483 operand immI_255()
 5484 %{
 5485   predicate(n->get_int() == 255);
 5486   match(ConI);
 5487 
 5488   op_cost(0);
 5489   format %{ %}
 5490   interface(CONST_INTER);
 5491 %}
 5492 
 5493 operand immI_65535()
 5494 %{
 5495   predicate(n->get_int() == 65535);
 5496   match(ConI);
 5497 
 5498   op_cost(0);
 5499   format %{ %}
 5500   interface(CONST_INTER);
 5501 %}
 5502 
 5503 operand immL_255()
 5504 %{
 5505   predicate(n->get_long() == 255L);
 5506   match(ConL);
 5507 
 5508   op_cost(0);
 5509   format %{ %}
 5510   interface(CONST_INTER);
 5511 %}
 5512 
 5513 operand immL_65535()
 5514 %{
 5515   predicate(n->get_long() == 65535L);
 5516   match(ConL);
 5517 
 5518   op_cost(0);
 5519   format %{ %}
 5520   interface(CONST_INTER);
 5521 %}
 5522 
 5523 operand immL_4294967295()
 5524 %{
 5525   predicate(n->get_long() == 4294967295L);
 5526   match(ConL);
 5527 
 5528   op_cost(0);
 5529   format %{ %}
 5530   interface(CONST_INTER);
 5531 %}
 5532 
 5533 operand immL_bitmask()
 5534 %{
 5535   predicate(((n->get_long() & 0xc000000000000000l) == 0)
 5536             && is_power_of_2(n->get_long() + 1));
 5537   match(ConL);
 5538 
 5539   op_cost(0);
 5540   format %{ %}
 5541   interface(CONST_INTER);
 5542 %}
 5543 
 5544 operand immI_bitmask()
 5545 %{
 5546   predicate(((n->get_int() & 0xc0000000) == 0)
 5547             && is_power_of_2(n->get_int() + 1));
 5548   match(ConI);
 5549 
 5550   op_cost(0);
 5551   format %{ %}
 5552   interface(CONST_INTER);
 5553 %}
 5554 
 5555 // Scale values for scaled offset addressing modes (up to long but not quad)
 5556 operand immIScale()
 5557 %{
 5558   predicate(0 <= n->get_int() && (n->get_int() <= 3));
 5559   match(ConI);
 5560 
 5561   op_cost(0);
 5562   format %{ %}
 5563   interface(CONST_INTER);
 5564 %}
 5565 
 5566 // 26 bit signed offset -- for pc-relative branches
 5567 operand immI26()
 5568 %{
 5569   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
 5570   match(ConI);
 5571 
 5572   op_cost(0);
 5573   format %{ %}
 5574   interface(CONST_INTER);
 5575 %}
 5576 
 5577 // 19 bit signed offset -- for pc-relative loads
 5578 operand immI19()
 5579 %{
 5580   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
 5581   match(ConI);
 5582 
 5583   op_cost(0);
 5584   format %{ %}
 5585   interface(CONST_INTER);
 5586 %}
 5587 
 5588 // 12 bit unsigned offset -- for base plus immediate loads
 5589 operand immIU12()
 5590 %{
 5591   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
 5592   match(ConI);
 5593 
 5594   op_cost(0);
 5595   format %{ %}
 5596   interface(CONST_INTER);
 5597 %}
 5598 
 5599 operand immLU12()
 5600 %{
 5601   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
 5602   match(ConL);
 5603 
 5604   op_cost(0);
 5605   format %{ %}
 5606   interface(CONST_INTER);
 5607 %}
 5608 
 5609 // Offset for scaled or unscaled immediate loads and stores
 5610 operand immIOffset()
 5611 %{
 5612   predicate(Address::offset_ok_for_immed(n->get_int()));
 5613   match(ConI);
 5614 
 5615   op_cost(0);
 5616   format %{ %}
 5617   interface(CONST_INTER);
 5618 %}
 5619 
 5620 operand immIOffset4()
 5621 %{
 5622   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
 5623   match(ConI);
 5624 
 5625   op_cost(0);
 5626   format %{ %}
 5627   interface(CONST_INTER);
 5628 %}
 5629 
 5630 operand immIOffset8()
 5631 %{
 5632   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
 5633   match(ConI);
 5634 
 5635   op_cost(0);
 5636   format %{ %}
 5637   interface(CONST_INTER);
 5638 %}
 5639 
 5640 operand immIOffset16()
 5641 %{
 5642   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
 5643   match(ConI);
 5644 
 5645   op_cost(0);
 5646   format %{ %}
 5647   interface(CONST_INTER);
 5648 %}
 5649 
 5650 operand immLoffset()
 5651 %{
 5652   predicate(Address::offset_ok_for_immed(n->get_long()));
 5653   match(ConL);
 5654 
 5655   op_cost(0);
 5656   format %{ %}
 5657   interface(CONST_INTER);
 5658 %}
 5659 
 5660 operand immLoffset4()
 5661 %{
 5662   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
 5663   match(ConL);
 5664 
 5665   op_cost(0);
 5666   format %{ %}
 5667   interface(CONST_INTER);
 5668 %}
 5669 
 5670 operand immLoffset8()
 5671 %{
 5672   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
 5673   match(ConL);
 5674 
 5675   op_cost(0);
 5676   format %{ %}
 5677   interface(CONST_INTER);
 5678 %}
 5679 
 5680 operand immLoffset16()
 5681 %{
 5682   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
 5683   match(ConL);
 5684 
 5685   op_cost(0);
 5686   format %{ %}
 5687   interface(CONST_INTER);
 5688 %}
 5689 
 5690 // 32 bit integer valid for add sub immediate
 5691 operand immIAddSub()
 5692 %{
 5693   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
 5694   match(ConI);
 5695   op_cost(0);
 5696   format %{ %}
 5697   interface(CONST_INTER);
 5698 %}
 5699 
 5700 // 32 bit unsigned integer valid for logical immediate
 5701 // TODO -- check this is right when e.g the mask is 0x80000000
 5702 operand immILog()
 5703 %{
 5704   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
 5705   match(ConI);
 5706 
 5707   op_cost(0);
 5708   format %{ %}
 5709   interface(CONST_INTER);
 5710 %}
 5711 
 5712 // Integer operands 64 bit
 5713 // 64 bit immediate
 5714 operand immL()
 5715 %{
 5716   match(ConL);
 5717 
 5718   op_cost(0);
 5719   format %{ %}
 5720   interface(CONST_INTER);
 5721 %}
 5722 
 5723 // 64 bit zero
 5724 operand immL0()
 5725 %{
 5726   predicate(n->get_long() == 0);
 5727   match(ConL);
 5728 
 5729   op_cost(0);
 5730   format %{ %}
 5731   interface(CONST_INTER);
 5732 %}
 5733 
 5734 // 64 bit unit increment
 5735 operand immL_1()
 5736 %{
 5737   predicate(n->get_long() == 1);
 5738   match(ConL);
 5739 
 5740   op_cost(0);
 5741   format %{ %}
 5742   interface(CONST_INTER);
 5743 %}
 5744 
 5745 // 64 bit unit decrement
 5746 operand immL_M1()
 5747 %{
 5748   predicate(n->get_long() == -1);
 5749   match(ConL);
 5750 
 5751   op_cost(0);
 5752   format %{ %}
 5753   interface(CONST_INTER);
 5754 %}
 5755 
 5756 // 32 bit offset of pc in thread anchor
 5757 
 5758 operand immL_pc_off()
 5759 %{
 5760   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
 5761                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
 5762   match(ConL);
 5763 
 5764   op_cost(0);
 5765   format %{ %}
 5766   interface(CONST_INTER);
 5767 %}
 5768 
 5769 // 64 bit integer valid for add sub immediate
 5770 operand immLAddSub()
 5771 %{
 5772   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
 5773   match(ConL);
 5774   op_cost(0);
 5775   format %{ %}
 5776   interface(CONST_INTER);
 5777 %}
 5778 
 5779 // 64 bit integer valid for logical immediate
 5780 operand immLLog()
 5781 %{
 5782   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
 5783   match(ConL);
 5784   op_cost(0);
 5785   format %{ %}
 5786   interface(CONST_INTER);
 5787 %}
 5788 
 5789 // Long Immediate: low 32-bit mask
 5790 operand immL_32bits()
 5791 %{
 5792   predicate(n->get_long() == 0xFFFFFFFFL);
 5793   match(ConL);
 5794   op_cost(0);
 5795   format %{ %}
 5796   interface(CONST_INTER);
 5797 %}
 5798 
 5799 // Pointer operands
 5800 // Pointer Immediate
 5801 operand immP()
 5802 %{
 5803   match(ConP);
 5804 
 5805   op_cost(0);
 5806   format %{ %}
 5807   interface(CONST_INTER);
 5808 %}
 5809 
 5810 // NULL Pointer Immediate
 5811 operand immP0()
 5812 %{
 5813   predicate(n->get_ptr() == 0);
 5814   match(ConP);
 5815 
 5816   op_cost(0);
 5817   format %{ %}
 5818   interface(CONST_INTER);
 5819 %}
 5820 
 5821 // Pointer Immediate One
 5822 // this is used in object initialization (initial object header)
 5823 operand immP_1()
 5824 %{
 5825   predicate(n->get_ptr() == 1);
 5826   match(ConP);
 5827 
 5828   op_cost(0);
 5829   format %{ %}
 5830   interface(CONST_INTER);
 5831 %}
 5832 
 5833 // Polling Page Pointer Immediate
 5834 operand immPollPage()
 5835 %{
 5836   predicate((address)n->get_ptr() == os::get_polling_page());
 5837   match(ConP);
 5838 
 5839   op_cost(0);
 5840   format %{ %}
 5841   interface(CONST_INTER);
 5842 %}
 5843 
 5844 // Card Table Byte Map Base
 5845 operand immByteMapBase()
 5846 %{
 5847   // Get base of card map
 5848   predicate(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableModRef) &&
 5849             (jbyte*)n->get_ptr() == ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->card_table()->byte_map_base());
 5850   match(ConP);
 5851 
 5852   op_cost(0);
 5853   format %{ %}
 5854   interface(CONST_INTER);
 5855 %}
 5856 
 5857 // Pointer Immediate Minus One
 5858 // this is used when we want to write the current PC to the thread anchor
 5859 operand immP_M1()
 5860 %{
 5861   predicate(n->get_ptr() == -1);
 5862   match(ConP);
 5863 
 5864   op_cost(0);
 5865   format %{ %}
 5866   interface(CONST_INTER);
 5867 %}
 5868 
 5869 // Pointer Immediate Minus Two
 5870 // this is used when we want to write the current PC to the thread anchor
 5871 operand immP_M2()
 5872 %{
 5873   predicate(n->get_ptr() == -2);
 5874   match(ConP);
 5875 
 5876   op_cost(0);
 5877   format %{ %}
 5878   interface(CONST_INTER);
 5879 %}
 5880 
 5881 // Float and Double operands
 5882 // Double Immediate
 5883 operand immD()
 5884 %{
 5885   match(ConD);
 5886   op_cost(0);
 5887   format %{ %}
 5888   interface(CONST_INTER);
 5889 %}
 5890 
 5891 // Double Immediate: +0.0d
 5892 operand immD0()
 5893 %{
 5894   predicate(jlong_cast(n->getd()) == 0);
 5895   match(ConD);
 5896 
 5897   op_cost(0);
 5898   format %{ %}
 5899   interface(CONST_INTER);
 5900 %}
 5901 
 5902 // constant 'double +0.0'.
 5903 operand immDPacked()
 5904 %{
 5905   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
 5906   match(ConD);
 5907   op_cost(0);
 5908   format %{ %}
 5909   interface(CONST_INTER);
 5910 %}
 5911 
 5912 // Float Immediate
 5913 operand immF()
 5914 %{
 5915   match(ConF);
 5916   op_cost(0);
 5917   format %{ %}
 5918   interface(CONST_INTER);
 5919 %}
 5920 
 5921 // Float Immediate: +0.0f.
 5922 operand immF0()
 5923 %{
 5924   predicate(jint_cast(n->getf()) == 0);
 5925   match(ConF);
 5926 
 5927   op_cost(0);
 5928   format %{ %}
 5929   interface(CONST_INTER);
 5930 %}
 5931 
 5932 //
 5933 operand immFPacked()
 5934 %{
 5935   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
 5936   match(ConF);
 5937   op_cost(0);
 5938   format %{ %}
 5939   interface(CONST_INTER);
 5940 %}
 5941 
 5942 // Narrow pointer operands
 5943 // Narrow Pointer Immediate
 5944 operand immN()
 5945 %{
 5946   match(ConN);
 5947 
 5948   op_cost(0);
 5949   format %{ %}
 5950   interface(CONST_INTER);
 5951 %}
 5952 
 5953 // Narrow NULL Pointer Immediate
 5954 operand immN0()
 5955 %{
 5956   predicate(n->get_narrowcon() == 0);
 5957   match(ConN);
 5958 
 5959   op_cost(0);
 5960   format %{ %}
 5961   interface(CONST_INTER);
 5962 %}
 5963 
 5964 operand immNKlass()
 5965 %{
 5966   match(ConNKlass);
 5967 
 5968   op_cost(0);
 5969   format %{ %}
 5970   interface(CONST_INTER);
 5971 %}
 5972 
 5973 // Integer 32 bit Register Operands
 5974 // Integer 32 bitRegister (excludes SP)
 5975 operand iRegI()
 5976 %{
 5977   constraint(ALLOC_IN_RC(any_reg32));
 5978   match(RegI);
 5979   match(iRegINoSp);
 5980   op_cost(0);
 5981   format %{ %}
 5982   interface(REG_INTER);
 5983 %}
 5984 
 5985 // Integer 32 bit Register not Special
 5986 operand iRegINoSp()
 5987 %{
 5988   constraint(ALLOC_IN_RC(no_special_reg32));
 5989   match(RegI);
 5990   op_cost(0);
 5991   format %{ %}
 5992   interface(REG_INTER);
 5993 %}
 5994 
 5995 // Integer 64 bit Register Operands
 5996 // Integer 64 bit Register (includes SP)
 5997 operand iRegL()
 5998 %{
 5999   constraint(ALLOC_IN_RC(any_reg));
 6000   match(RegL);
 6001   match(iRegLNoSp);
 6002   op_cost(0);
 6003   format %{ %}
 6004   interface(REG_INTER);
 6005 %}
 6006 
 6007 // Integer 64 bit Register not Special
 6008 operand iRegLNoSp()
 6009 %{
 6010   constraint(ALLOC_IN_RC(no_special_reg));
 6011   match(RegL);
 6012   match(iRegL_R0);
 6013   format %{ %}
 6014   interface(REG_INTER);
 6015 %}
 6016 
 6017 // Pointer Register Operands
 6018 // Pointer Register
 6019 operand iRegP()
 6020 %{
 6021   constraint(ALLOC_IN_RC(ptr_reg));
 6022   match(RegP);
 6023   match(iRegPNoSp);
 6024   match(iRegP_R0);
 6025   //match(iRegP_R2);
 6026   //match(iRegP_R4);
 6027   //match(iRegP_R5);
 6028   match(thread_RegP);
 6029   op_cost(0);
 6030   format %{ %}
 6031   interface(REG_INTER);
 6032 %}
 6033 
 6034 // Pointer 64 bit Register not Special
 6035 operand iRegPNoSp()
 6036 %{
 6037   constraint(ALLOC_IN_RC(no_special_ptr_reg));
 6038   match(RegP);
 6039   // match(iRegP);
 6040   // match(iRegP_R0);
 6041   // match(iRegP_R2);
 6042   // match(iRegP_R4);
 6043   // match(iRegP_R5);
 6044   // match(thread_RegP);
 6045   op_cost(0);
 6046   format %{ %}
 6047   interface(REG_INTER);
 6048 %}
 6049 
 6050 // Pointer 64 bit Register R0 only
 6051 operand iRegP_R0()
 6052 %{
 6053   constraint(ALLOC_IN_RC(r0_reg));
 6054   match(RegP);
 6055   // match(iRegP);
 6056   match(iRegPNoSp);
 6057   op_cost(0);
 6058   format %{ %}
 6059   interface(REG_INTER);
 6060 %}
 6061 
 6062 // Pointer 64 bit Register R1 only
 6063 operand iRegP_R1()
 6064 %{
 6065   constraint(ALLOC_IN_RC(r1_reg));
 6066   match(RegP);
 6067   // match(iRegP);
 6068   match(iRegPNoSp);
 6069   op_cost(0);
 6070   format %{ %}
 6071   interface(REG_INTER);
 6072 %}
 6073 
 6074 // Pointer 64 bit Register R2 only
 6075 operand iRegP_R2()
 6076 %{
 6077   constraint(ALLOC_IN_RC(r2_reg));
 6078   match(RegP);
 6079   // match(iRegP);
 6080   match(iRegPNoSp);
 6081   op_cost(0);
 6082   format %{ %}
 6083   interface(REG_INTER);
 6084 %}
 6085 
 6086 // Pointer 64 bit Register R3 only
 6087 operand iRegP_R3()
 6088 %{
 6089   constraint(ALLOC_IN_RC(r3_reg));
 6090   match(RegP);
 6091   // match(iRegP);
 6092   match(iRegPNoSp);
 6093   op_cost(0);
 6094   format %{ %}
 6095   interface(REG_INTER);
 6096 %}
 6097 
 6098 // Pointer 64 bit Register R4 only
 6099 operand iRegP_R4()
 6100 %{
 6101   constraint(ALLOC_IN_RC(r4_reg));
 6102   match(RegP);
 6103   // match(iRegP);
 6104   match(iRegPNoSp);
 6105   op_cost(0);
 6106   format %{ %}
 6107   interface(REG_INTER);
 6108 %}
 6109 
 6110 // Pointer 64 bit Register R5 only
 6111 operand iRegP_R5()
 6112 %{
 6113   constraint(ALLOC_IN_RC(r5_reg));
 6114   match(RegP);
 6115   // match(iRegP);
 6116   match(iRegPNoSp);
 6117   op_cost(0);
 6118   format %{ %}
 6119   interface(REG_INTER);
 6120 %}
 6121 
 6122 // Pointer 64 bit Register R10 only
 6123 operand iRegP_R10()
 6124 %{
 6125   constraint(ALLOC_IN_RC(r10_reg));
 6126   match(RegP);
 6127   // match(iRegP);
 6128   match(iRegPNoSp);
 6129   op_cost(0);
 6130   format %{ %}
 6131   interface(REG_INTER);
 6132 %}
 6133 
 6134 // Long 64 bit Register R0 only
 6135 operand iRegL_R0()
 6136 %{
 6137   constraint(ALLOC_IN_RC(r0_reg));
 6138   match(RegL);
 6139   match(iRegLNoSp);
 6140   op_cost(0);
 6141   format %{ %}
 6142   interface(REG_INTER);
 6143 %}
 6144 
 6145 // Long 64 bit Register R2 only
 6146 operand iRegL_R2()
 6147 %{
 6148   constraint(ALLOC_IN_RC(r2_reg));
 6149   match(RegL);
 6150   match(iRegLNoSp);
 6151   op_cost(0);
 6152   format %{ %}
 6153   interface(REG_INTER);
 6154 %}
 6155 
 6156 // Long 64 bit Register R3 only
 6157 operand iRegL_R3()
 6158 %{
 6159   constraint(ALLOC_IN_RC(r3_reg));
 6160   match(RegL);
 6161   match(iRegLNoSp);
 6162   op_cost(0);
 6163   format %{ %}
 6164   interface(REG_INTER);
 6165 %}
 6166 
 6167 // Long 64 bit Register R11 only
 6168 operand iRegL_R11()
 6169 %{
 6170   constraint(ALLOC_IN_RC(r11_reg));
 6171   match(RegL);
 6172   match(iRegLNoSp);
 6173   op_cost(0);
 6174   format %{ %}
 6175   interface(REG_INTER);
 6176 %}
 6177 
 6178 // Pointer 64 bit Register FP only
 6179 operand iRegP_FP()
 6180 %{
 6181   constraint(ALLOC_IN_RC(fp_reg));
 6182   match(RegP);
 6183   // match(iRegP);
 6184   op_cost(0);
 6185   format %{ %}
 6186   interface(REG_INTER);
 6187 %}
 6188 
 6189 // Register R0 only
 6190 operand iRegI_R0()
 6191 %{
 6192   constraint(ALLOC_IN_RC(int_r0_reg));
 6193   match(RegI);
 6194   match(iRegINoSp);
 6195   op_cost(0);
 6196   format %{ %}
 6197   interface(REG_INTER);
 6198 %}
 6199 
 6200 // Register R2 only
 6201 operand iRegI_R2()
 6202 %{
 6203   constraint(ALLOC_IN_RC(int_r2_reg));
 6204   match(RegI);
 6205   match(iRegINoSp);
 6206   op_cost(0);
 6207   format %{ %}
 6208   interface(REG_INTER);
 6209 %}
 6210 
 6211 // Register R3 only
 6212 operand iRegI_R3()
 6213 %{
 6214   constraint(ALLOC_IN_RC(int_r3_reg));
 6215   match(RegI);
 6216   match(iRegINoSp);
 6217   op_cost(0);
 6218   format %{ %}
 6219   interface(REG_INTER);
 6220 %}
 6221 
 6222 
 6223 // Register R4 only
 6224 operand iRegI_R4()
 6225 %{
 6226   constraint(ALLOC_IN_RC(int_r4_reg));
 6227   match(RegI);
 6228   match(iRegINoSp);
 6229   op_cost(0);
 6230   format %{ %}
 6231   interface(REG_INTER);
 6232 %}
 6233 
 6234 
 6235 // Pointer Register Operands
 6236 // Narrow Pointer Register
 6237 operand iRegN()
 6238 %{
 6239   constraint(ALLOC_IN_RC(any_reg32));
 6240   match(RegN);
 6241   match(iRegNNoSp);
 6242   op_cost(0);
 6243   format %{ %}
 6244   interface(REG_INTER);
 6245 %}
 6246 
 6247 operand iRegN_R0()
 6248 %{
 6249   constraint(ALLOC_IN_RC(r0_reg));
 6250   match(iRegN);
 6251   op_cost(0);
 6252   format %{ %}
 6253   interface(REG_INTER);
 6254 %}
 6255 
 6256 operand iRegN_R2()
 6257 %{
 6258   constraint(ALLOC_IN_RC(r2_reg));
 6259   match(iRegN);
 6260   op_cost(0);
 6261   format %{ %}
 6262   interface(REG_INTER);
 6263 %}
 6264 
 6265 operand iRegN_R3()
 6266 %{
 6267   constraint(ALLOC_IN_RC(r3_reg));
 6268   match(iRegN);
 6269   op_cost(0);
 6270   format %{ %}
 6271   interface(REG_INTER);
 6272 %}
 6273 
 6274 // Integer 64 bit Register not Special
 6275 operand iRegNNoSp()
 6276 %{
 6277   constraint(ALLOC_IN_RC(no_special_reg32));
 6278   match(RegN);
 6279   op_cost(0);
 6280   format %{ %}
 6281   interface(REG_INTER);
 6282 %}
 6283 
 6284 // heap base register -- used for encoding immN0
 6285 
 6286 operand iRegIHeapbase()
 6287 %{
 6288   constraint(ALLOC_IN_RC(heapbase_reg));
 6289   match(RegI);
 6290   op_cost(0);
 6291   format %{ %}
 6292   interface(REG_INTER);
 6293 %}
 6294 
 6295 // Float Register
 6296 // Float register operands
 6297 operand vRegF()
 6298 %{
 6299   constraint(ALLOC_IN_RC(float_reg));
 6300   match(RegF);
 6301 
 6302   op_cost(0);
 6303   format %{ %}
 6304   interface(REG_INTER);
 6305 %}
 6306 
 6307 // Double Register
 6308 // Double register operands
 6309 operand vRegD()
 6310 %{
 6311   constraint(ALLOC_IN_RC(double_reg));
 6312   match(RegD);
 6313 
 6314   op_cost(0);
 6315   format %{ %}
 6316   interface(REG_INTER);
 6317 %}
 6318 
 6319 operand vecD()
 6320 %{
 6321   constraint(ALLOC_IN_RC(vectord_reg));
 6322   match(VecD);
 6323 
 6324   op_cost(0);
 6325   format %{ %}
 6326   interface(REG_INTER);
 6327 %}
 6328 
 6329 operand vecX()
 6330 %{
 6331   constraint(ALLOC_IN_RC(vectorx_reg));
 6332   match(VecX);
 6333 
 6334   op_cost(0);
 6335   format %{ %}
 6336   interface(REG_INTER);
 6337 %}
 6338 
 6339 operand vRegD_V0()
 6340 %{
 6341   constraint(ALLOC_IN_RC(v0_reg));
 6342   match(RegD);
 6343   op_cost(0);
 6344   format %{ %}
 6345   interface(REG_INTER);
 6346 %}
 6347 
 6348 operand vRegD_V1()
 6349 %{
 6350   constraint(ALLOC_IN_RC(v1_reg));
 6351   match(RegD);
 6352   op_cost(0);
 6353   format %{ %}
 6354   interface(REG_INTER);
 6355 %}
 6356 
 6357 operand vRegD_V2()
 6358 %{
 6359   constraint(ALLOC_IN_RC(v2_reg));
 6360   match(RegD);
 6361   op_cost(0);
 6362   format %{ %}
 6363   interface(REG_INTER);
 6364 %}
 6365 
 6366 operand vRegD_V3()
 6367 %{
 6368   constraint(ALLOC_IN_RC(v3_reg));
 6369   match(RegD);
 6370   op_cost(0);
 6371   format %{ %}
 6372   interface(REG_INTER);
 6373 %}
 6374 
 6375 // Flags register, used as output of signed compare instructions
 6376 
 6377 // note that on AArch64 we also use this register as the output for
 6378 // for floating point compare instructions (CmpF CmpD). this ensures
 6379 // that ordered inequality tests use GT, GE, LT or LE none of which
 6380 // pass through cases where the result is unordered i.e. one or both
 6381 // inputs to the compare is a NaN. this means that the ideal code can
 6382 // replace e.g. a GT with an LE and not end up capturing the NaN case
 6383 // (where the comparison should always fail). EQ and NE tests are
 6384 // always generated in ideal code so that unordered folds into the NE
 6385 // case, matching the behaviour of AArch64 NE.
 6386 //
 6387 // This differs from x86 where the outputs of FP compares use a
 6388 // special FP flags registers and where compares based on this
 6389 // register are distinguished into ordered inequalities (cmpOpUCF) and
 6390 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
 6391 // to explicitly handle the unordered case in branches. x86 also has
 6392 // to include extra CMoveX rules to accept a cmpOpUCF input.
 6393 
 6394 operand rFlagsReg()
 6395 %{
 6396   constraint(ALLOC_IN_RC(int_flags));
 6397   match(RegFlags);
 6398 
 6399   op_cost(0);
 6400   format %{ "RFLAGS" %}
 6401   interface(REG_INTER);
 6402 %}
 6403 
 6404 // Flags register, used as output of unsigned compare instructions
 6405 operand rFlagsRegU()
 6406 %{
 6407   constraint(ALLOC_IN_RC(int_flags));
 6408   match(RegFlags);
 6409 
 6410   op_cost(0);
 6411   format %{ "RFLAGSU" %}
 6412   interface(REG_INTER);
 6413 %}
 6414 
 6415 // Special Registers
 6416 
 6417 // Method Register
 6418 operand inline_cache_RegP(iRegP reg)
 6419 %{
 6420   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
 6421   match(reg);
 6422   match(iRegPNoSp);
 6423   op_cost(0);
 6424   format %{ %}
 6425   interface(REG_INTER);
 6426 %}
 6427 
 6428 operand interpreter_method_oop_RegP(iRegP reg)
 6429 %{
 6430   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
 6431   match(reg);
 6432   match(iRegPNoSp);
 6433   op_cost(0);
 6434   format %{ %}
 6435   interface(REG_INTER);
 6436 %}
 6437 
 6438 // Thread Register
 6439 operand thread_RegP(iRegP reg)
 6440 %{
 6441   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
 6442   match(reg);
 6443   op_cost(0);
 6444   format %{ %}
 6445   interface(REG_INTER);
 6446 %}
 6447 
 6448 operand lr_RegP(iRegP reg)
 6449 %{
 6450   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
 6451   match(reg);
 6452   op_cost(0);
 6453   format %{ %}
 6454   interface(REG_INTER);
 6455 %}
 6456 
 6457 //----------Memory Operands----------------------------------------------------
 6458 
 6459 operand indirect(iRegP reg)
 6460 %{
 6461   constraint(ALLOC_IN_RC(ptr_reg));
 6462   match(reg);
 6463   op_cost(0);
 6464   format %{ "[$reg]" %}
 6465   interface(MEMORY_INTER) %{
 6466     base($reg);
 6467     index(0xffffffff);
 6468     scale(0x0);
 6469     disp(0x0);
 6470   %}
 6471 %}
 6472 
 6473 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
 6474 %{
 6475   constraint(ALLOC_IN_RC(ptr_reg));
 6476   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 6477   match(AddP reg (LShiftL (ConvI2L ireg) scale));
 6478   op_cost(0);
 6479   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
 6480   interface(MEMORY_INTER) %{
 6481     base($reg);
 6482     index($ireg);
 6483     scale($scale);
 6484     disp(0x0);
 6485   %}
 6486 %}
 6487 
 6488 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
 6489 %{
 6490   constraint(ALLOC_IN_RC(ptr_reg));
 6491   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 6492   match(AddP reg (LShiftL lreg scale));
 6493   op_cost(0);
 6494   format %{ "$reg, $lreg lsl($scale)" %}
 6495   interface(MEMORY_INTER) %{
 6496     base($reg);
 6497     index($lreg);
 6498     scale($scale);
 6499     disp(0x0);
 6500   %}
 6501 %}
 6502 
 6503 operand indIndexI2L(iRegP reg, iRegI ireg)
 6504 %{
 6505   constraint(ALLOC_IN_RC(ptr_reg));
 6506   match(AddP reg (ConvI2L ireg));
 6507   op_cost(0);
 6508   format %{ "$reg, $ireg, 0, I2L" %}
 6509   interface(MEMORY_INTER) %{
 6510     base($reg);
 6511     index($ireg);
 6512     scale(0x0);
 6513     disp(0x0);
 6514   %}
 6515 %}
 6516 
 6517 operand indIndex(iRegP reg, iRegL lreg)
 6518 %{
 6519   constraint(ALLOC_IN_RC(ptr_reg));
 6520   match(AddP reg lreg);
 6521   op_cost(0);
 6522   format %{ "$reg, $lreg" %}
 6523   interface(MEMORY_INTER) %{
 6524     base($reg);
 6525     index($lreg);
 6526     scale(0x0);
 6527     disp(0x0);
 6528   %}
 6529 %}
 6530 
 6531 operand indOffI(iRegP reg, immIOffset off)
 6532 %{
 6533   constraint(ALLOC_IN_RC(ptr_reg));
 6534   match(AddP reg off);
 6535   op_cost(0);
 6536   format %{ "[$reg, $off]" %}
 6537   interface(MEMORY_INTER) %{
 6538     base($reg);
 6539     index(0xffffffff);
 6540     scale(0x0);
 6541     disp($off);
 6542   %}
 6543 %}
 6544 
 6545 operand indOffI4(iRegP reg, immIOffset4 off)
 6546 %{
 6547   constraint(ALLOC_IN_RC(ptr_reg));
 6548   match(AddP reg off);
 6549   op_cost(0);
 6550   format %{ "[$reg, $off]" %}
 6551   interface(MEMORY_INTER) %{
 6552     base($reg);
 6553     index(0xffffffff);
 6554     scale(0x0);
 6555     disp($off);
 6556   %}
 6557 %}
 6558 
 6559 operand indOffI8(iRegP reg, immIOffset8 off)
 6560 %{
 6561   constraint(ALLOC_IN_RC(ptr_reg));
 6562   match(AddP reg off);
 6563   op_cost(0);
 6564   format %{ "[$reg, $off]" %}
 6565   interface(MEMORY_INTER) %{
 6566     base($reg);
 6567     index(0xffffffff);
 6568     scale(0x0);
 6569     disp($off);
 6570   %}
 6571 %}
 6572 
 6573 operand indOffI16(iRegP reg, immIOffset16 off)
 6574 %{
 6575   constraint(ALLOC_IN_RC(ptr_reg));
 6576   match(AddP reg off);
 6577   op_cost(0);
 6578   format %{ "[$reg, $off]" %}
 6579   interface(MEMORY_INTER) %{
 6580     base($reg);
 6581     index(0xffffffff);
 6582     scale(0x0);
 6583     disp($off);
 6584   %}
 6585 %}
 6586 
 6587 operand indOffL(iRegP reg, immLoffset off)
 6588 %{
 6589   constraint(ALLOC_IN_RC(ptr_reg));
 6590   match(AddP reg off);
 6591   op_cost(0);
 6592   format %{ "[$reg, $off]" %}
 6593   interface(MEMORY_INTER) %{
 6594     base($reg);
 6595     index(0xffffffff);
 6596     scale(0x0);
 6597     disp($off);
 6598   %}
 6599 %}
 6600 
 6601 operand indOffL4(iRegP reg, immLoffset4 off)
 6602 %{
 6603   constraint(ALLOC_IN_RC(ptr_reg));
 6604   match(AddP reg off);
 6605   op_cost(0);
 6606   format %{ "[$reg, $off]" %}
 6607   interface(MEMORY_INTER) %{
 6608     base($reg);
 6609     index(0xffffffff);
 6610     scale(0x0);
 6611     disp($off);
 6612   %}
 6613 %}
 6614 
 6615 operand indOffL8(iRegP reg, immLoffset8 off)
 6616 %{
 6617   constraint(ALLOC_IN_RC(ptr_reg));
 6618   match(AddP reg off);
 6619   op_cost(0);
 6620   format %{ "[$reg, $off]" %}
 6621   interface(MEMORY_INTER) %{
 6622     base($reg);
 6623     index(0xffffffff);
 6624     scale(0x0);
 6625     disp($off);
 6626   %}
 6627 %}
 6628 
 6629 operand indOffL16(iRegP reg, immLoffset16 off)
 6630 %{
 6631   constraint(ALLOC_IN_RC(ptr_reg));
 6632   match(AddP reg off);
 6633   op_cost(0);
 6634   format %{ "[$reg, $off]" %}
 6635   interface(MEMORY_INTER) %{
 6636     base($reg);
 6637     index(0xffffffff);
 6638     scale(0x0);
 6639     disp($off);
 6640   %}
 6641 %}
 6642 
 6643 operand indirectN(iRegN reg)
 6644 %{
 6645   predicate(Universe::narrow_oop_shift() == 0);
 6646   constraint(ALLOC_IN_RC(ptr_reg));
 6647   match(DecodeN reg);
 6648   op_cost(0);
 6649   format %{ "[$reg]\t# narrow" %}
 6650   interface(MEMORY_INTER) %{
 6651     base($reg);
 6652     index(0xffffffff);
 6653     scale(0x0);
 6654     disp(0x0);
 6655   %}
 6656 %}
 6657 
 6658 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
 6659 %{
 6660   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 6661   constraint(ALLOC_IN_RC(ptr_reg));
 6662   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
 6663   op_cost(0);
 6664   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
 6665   interface(MEMORY_INTER) %{
 6666     base($reg);
 6667     index($ireg);
 6668     scale($scale);
 6669     disp(0x0);
 6670   %}
 6671 %}
 6672 
 6673 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
 6674 %{
 6675   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 6676   constraint(ALLOC_IN_RC(ptr_reg));
 6677   match(AddP (DecodeN reg) (LShiftL lreg scale));
 6678   op_cost(0);
 6679   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
 6680   interface(MEMORY_INTER) %{
 6681     base($reg);
 6682     index($lreg);
 6683     scale($scale);
 6684     disp(0x0);
 6685   %}
 6686 %}
 6687 
 6688 operand indIndexI2LN(iRegN reg, iRegI ireg)
 6689 %{
 6690   predicate(Universe::narrow_oop_shift() == 0);
 6691   constraint(ALLOC_IN_RC(ptr_reg));
 6692   match(AddP (DecodeN reg) (ConvI2L ireg));
 6693   op_cost(0);
 6694   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
 6695   interface(MEMORY_INTER) %{
 6696     base($reg);
 6697     index($ireg);
 6698     scale(0x0);
 6699     disp(0x0);
 6700   %}
 6701 %}
 6702 
 6703 operand indIndexN(iRegN reg, iRegL lreg)
 6704 %{
 6705   predicate(Universe::narrow_oop_shift() == 0);
 6706   constraint(ALLOC_IN_RC(ptr_reg));
 6707   match(AddP (DecodeN reg) lreg);
 6708   op_cost(0);
 6709   format %{ "$reg, $lreg\t# narrow" %}
 6710   interface(MEMORY_INTER) %{
 6711     base($reg);
 6712     index($lreg);
 6713     scale(0x0);
 6714     disp(0x0);
 6715   %}
 6716 %}
 6717 
 6718 operand indOffIN(iRegN reg, immIOffset off)
 6719 %{
 6720   predicate(Universe::narrow_oop_shift() == 0);
 6721   constraint(ALLOC_IN_RC(ptr_reg));
 6722   match(AddP (DecodeN reg) off);
 6723   op_cost(0);
 6724   format %{ "[$reg, $off]\t# narrow" %}
 6725   interface(MEMORY_INTER) %{
 6726     base($reg);
 6727     index(0xffffffff);
 6728     scale(0x0);
 6729     disp($off);
 6730   %}
 6731 %}
 6732 
 6733 operand indOffLN(iRegN reg, immLoffset off)
 6734 %{
 6735   predicate(Universe::narrow_oop_shift() == 0);
 6736   constraint(ALLOC_IN_RC(ptr_reg));
 6737   match(AddP (DecodeN reg) off);
 6738   op_cost(0);
 6739   format %{ "[$reg, $off]\t# narrow" %}
 6740   interface(MEMORY_INTER) %{
 6741     base($reg);
 6742     index(0xffffffff);
 6743     scale(0x0);
 6744     disp($off);
 6745   %}
 6746 %}
 6747 
 6748 
 6749 
 6750 // AArch64 opto stubs need to write to the pc slot in the thread anchor
 6751 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
 6752 %{
 6753   constraint(ALLOC_IN_RC(ptr_reg));
 6754   match(AddP reg off);
 6755   op_cost(0);
 6756   format %{ "[$reg, $off]" %}
 6757   interface(MEMORY_INTER) %{
 6758     base($reg);
 6759     index(0xffffffff);
 6760     scale(0x0);
 6761     disp($off);
 6762   %}
 6763 %}
 6764 
 6765 //----------Special Memory Operands--------------------------------------------
 6766 // Stack Slot Operand - This operand is used for loading and storing temporary
 6767 //                      values on the stack where a match requires a value to
 6768 //                      flow through memory.
 6769 operand stackSlotP(sRegP reg)
 6770 %{
 6771   constraint(ALLOC_IN_RC(stack_slots));
 6772   op_cost(100);
 6773   // No match rule because this operand is only generated in matching
 6774   // match(RegP);
 6775   format %{ "[$reg]" %}
 6776   interface(MEMORY_INTER) %{
 6777     base(0x1e);  // RSP
 6778     index(0x0);  // No Index
 6779     scale(0x0);  // No Scale
 6780     disp($reg);  // Stack Offset
 6781   %}
 6782 %}
 6783 
 6784 operand stackSlotI(sRegI reg)
 6785 %{
 6786   constraint(ALLOC_IN_RC(stack_slots));
 6787   // No match rule because this operand is only generated in matching
 6788   // match(RegI);
 6789   format %{ "[$reg]" %}
 6790   interface(MEMORY_INTER) %{
 6791     base(0x1e);  // RSP
 6792     index(0x0);  // No Index
 6793     scale(0x0);  // No Scale
 6794     disp($reg);  // Stack Offset
 6795   %}
 6796 %}
 6797 
 6798 operand stackSlotF(sRegF reg)
 6799 %{
 6800   constraint(ALLOC_IN_RC(stack_slots));
 6801   // No match rule because this operand is only generated in matching
 6802   // match(RegF);
 6803   format %{ "[$reg]" %}
 6804   interface(MEMORY_INTER) %{
 6805     base(0x1e);  // RSP
 6806     index(0x0);  // No Index
 6807     scale(0x0);  // No Scale
 6808     disp($reg);  // Stack Offset
 6809   %}
 6810 %}
 6811 
 6812 operand stackSlotD(sRegD reg)
 6813 %{
 6814   constraint(ALLOC_IN_RC(stack_slots));
 6815   // No match rule because this operand is only generated in matching
 6816   // match(RegD);
 6817   format %{ "[$reg]" %}
 6818   interface(MEMORY_INTER) %{
 6819     base(0x1e);  // RSP
 6820     index(0x0);  // No Index
 6821     scale(0x0);  // No Scale
 6822     disp($reg);  // Stack Offset
 6823   %}
 6824 %}
 6825 
 6826 operand stackSlotL(sRegL reg)
 6827 %{
 6828   constraint(ALLOC_IN_RC(stack_slots));
 6829   // No match rule because this operand is only generated in matching
 6830   // match(RegL);
 6831   format %{ "[$reg]" %}
 6832   interface(MEMORY_INTER) %{
 6833     base(0x1e);  // RSP
 6834     index(0x0);  // No Index
 6835     scale(0x0);  // No Scale
 6836     disp($reg);  // Stack Offset
 6837   %}
 6838 %}
 6839 
 6840 // Operands for expressing Control Flow
 6841 // NOTE: Label is a predefined operand which should not be redefined in
 6842 //       the AD file. It is generically handled within the ADLC.
 6843 
 6844 //----------Conditional Branch Operands----------------------------------------
 6845 // Comparison Op  - This is the operation of the comparison, and is limited to
 6846 //                  the following set of codes:
 6847 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
 6848 //
 6849 // Other attributes of the comparison, such as unsignedness, are specified
 6850 // by the comparison instruction that sets a condition code flags register.
 6851 // That result is represented by a flags operand whose subtype is appropriate
 6852 // to the unsignedness (etc.) of the comparison.
 6853 //
 6854 // Later, the instruction which matches both the Comparison Op (a Bool) and
 6855 // the flags (produced by the Cmp) specifies the coding of the comparison op
 6856 // by matching a specific subtype of Bool operand below, such as cmpOpU.
 6857 
 6858 // used for signed integral comparisons and fp comparisons
 6859 
 6860 operand cmpOp()
 6861 %{
 6862   match(Bool);
 6863 
 6864   format %{ "" %}
 6865   interface(COND_INTER) %{
 6866     equal(0x0, "eq");
 6867     not_equal(0x1, "ne");
 6868     less(0xb, "lt");
 6869     greater_equal(0xa, "ge");
 6870     less_equal(0xd, "le");
 6871     greater(0xc, "gt");
 6872     overflow(0x6, "vs");
 6873     no_overflow(0x7, "vc");
 6874   %}
 6875 %}
 6876 
 6877 // used for unsigned integral comparisons
 6878 
 6879 operand cmpOpU()
 6880 %{
 6881   match(Bool);
 6882 
 6883   format %{ "" %}
 6884   interface(COND_INTER) %{
 6885     equal(0x0, "eq");
 6886     not_equal(0x1, "ne");
 6887     less(0x3, "lo");
 6888     greater_equal(0x2, "hs");
 6889     less_equal(0x9, "ls");
 6890     greater(0x8, "hi");
 6891     overflow(0x6, "vs");
 6892     no_overflow(0x7, "vc");
 6893   %}
 6894 %}
 6895 
 6896 // used for certain integral comparisons which can be
 6897 // converted to cbxx or tbxx instructions
 6898 
 6899 operand cmpOpEqNe()
 6900 %{
 6901   match(Bool);
 6902   match(CmpOp);
 6903   op_cost(0);
 6904   predicate(n->as_Bool()->_test._test == BoolTest::ne
 6905             || n->as_Bool()->_test._test == BoolTest::eq);
 6906 
 6907   format %{ "" %}
 6908   interface(COND_INTER) %{
 6909     equal(0x0, "eq");
 6910     not_equal(0x1, "ne");
 6911     less(0xb, "lt");
 6912     greater_equal(0xa, "ge");
 6913     less_equal(0xd, "le");
 6914     greater(0xc, "gt");
 6915     overflow(0x6, "vs");
 6916     no_overflow(0x7, "vc");
 6917   %}
 6918 %}
 6919 
 6920 // used for certain integral comparisons which can be
 6921 // converted to cbxx or tbxx instructions
 6922 
 6923 operand cmpOpLtGe()
 6924 %{
 6925   match(Bool);
 6926   match(CmpOp);
 6927   op_cost(0);
 6928 
 6929   predicate(n->as_Bool()->_test._test == BoolTest::lt
 6930             || n->as_Bool()->_test._test == BoolTest::ge);
 6931 
 6932   format %{ "" %}
 6933   interface(COND_INTER) %{
 6934     equal(0x0, "eq");
 6935     not_equal(0x1, "ne");
 6936     less(0xb, "lt");
 6937     greater_equal(0xa, "ge");
 6938     less_equal(0xd, "le");
 6939     greater(0xc, "gt");
 6940     overflow(0x6, "vs");
 6941     no_overflow(0x7, "vc");
 6942   %}
 6943 %}
 6944 
 6945 // used for certain unsigned integral comparisons which can be
 6946 // converted to cbxx or tbxx instructions
 6947 
 6948 operand cmpOpUEqNeLtGe()
 6949 %{
 6950   match(Bool);
 6951   match(CmpOp);
 6952   op_cost(0);
 6953 
 6954   predicate(n->as_Bool()->_test._test == BoolTest::eq
 6955             || n->as_Bool()->_test._test == BoolTest::ne
 6956             || n->as_Bool()->_test._test == BoolTest::lt
 6957             || n->as_Bool()->_test._test == BoolTest::ge);
 6958 
 6959   format %{ "" %}
 6960   interface(COND_INTER) %{
 6961     equal(0x0, "eq");
 6962     not_equal(0x1, "ne");
 6963     less(0xb, "lt");
 6964     greater_equal(0xa, "ge");
 6965     less_equal(0xd, "le");
 6966     greater(0xc, "gt");
 6967     overflow(0x6, "vs");
 6968     no_overflow(0x7, "vc");
 6969   %}
 6970 %}
 6971 
 6972 // Special operand allowing long args to int ops to be truncated for free
 6973 
 6974 operand iRegL2I(iRegL reg) %{
 6975 
 6976   op_cost(0);
 6977 
 6978   match(ConvL2I reg);
 6979 
 6980   format %{ "l2i($reg)" %}
 6981 
 6982   interface(REG_INTER)
 6983 %}
 6984 
 6985 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
 6986 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
 6987 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
 6988 
 6989 //----------OPERAND CLASSES----------------------------------------------------
 6990 // Operand Classes are groups of operands that are used as to simplify
 6991 // instruction definitions by not requiring the AD writer to specify
 6992 // separate instructions for every form of operand when the
 6993 // instruction accepts multiple operand types with the same basic
 6994 // encoding and format. The classic case of this is memory operands.
 6995 
 6996 // memory is used to define read/write location for load/store
 6997 // instruction defs. we can turn a memory op into an Address
 6998 
 6999 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI, indOffL,
 7000                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
 7001 
 7002 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
 7003 // operations. it allows the src to be either an iRegI or a (ConvL2I
 7004 // iRegL). in the latter case the l2i normally planted for a ConvL2I
 7005 // can be elided because the 32-bit instruction will just employ the
 7006 // lower 32 bits anyway.
 7007 //
 7008 // n.b. this does not elide all L2I conversions. if the truncated
 7009 // value is consumed by more than one operation then the ConvL2I
 7010 // cannot be bundled into the consuming nodes so an l2i gets planted
 7011 // (actually a movw $dst $src) and the downstream instructions consume
 7012 // the result of the l2i as an iRegI input. That's a shame since the
 7013 // movw is actually redundant but its not too costly.
 7014 
 7015 opclass iRegIorL2I(iRegI, iRegL2I);
 7016 
 7017 //----------PIPELINE-----------------------------------------------------------
 7018 // Rules which define the behavior of the target architectures pipeline.
 7019 
 7020 // For specific pipelines, eg A53, define the stages of that pipeline
 7021 //pipe_desc(ISS, EX1, EX2, WR);
 7022 #define ISS S0
 7023 #define EX1 S1
 7024 #define EX2 S2
 7025 #define WR  S3
 7026 
 7027 // Integer ALU reg operation
 7028 pipeline %{
 7029 
 7030 attributes %{
 7031   // ARM instructions are of fixed length
 7032   fixed_size_instructions;        // Fixed size instructions TODO does
 7033   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
 7034   // ARM instructions come in 32-bit word units
 7035   instruction_unit_size = 4;         // An instruction is 4 bytes long
 7036   instruction_fetch_unit_size = 64;  // The processor fetches one line
 7037   instruction_fetch_units = 1;       // of 64 bytes
 7038 
 7039   // List of nop instructions
 7040   nops( MachNop );
 7041 %}
 7042 
 7043 // We don't use an actual pipeline model so don't care about resources
 7044 // or description. we do use pipeline classes to introduce fixed
 7045 // latencies
 7046 
 7047 //----------RESOURCES----------------------------------------------------------
 7048 // Resources are the functional units available to the machine
 7049 
 7050 resources( INS0, INS1, INS01 = INS0 | INS1,
 7051            ALU0, ALU1, ALU = ALU0 | ALU1,
 7052            MAC,
 7053            DIV,
 7054            BRANCH,
 7055            LDST,
 7056            NEON_FP);
 7057 
 7058 //----------PIPELINE DESCRIPTION-----------------------------------------------
 7059 // Pipeline Description specifies the stages in the machine's pipeline
 7060 
 7061 // Define the pipeline as a generic 6 stage pipeline
 7062 pipe_desc(S0, S1, S2, S3, S4, S5);
 7063 
 7064 //----------PIPELINE CLASSES---------------------------------------------------
 7065 // Pipeline Classes describe the stages in which input and output are
 7066 // referenced by the hardware pipeline.
 7067 
 7068 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
 7069 %{
 7070   single_instruction;
 7071   src1   : S1(read);
 7072   src2   : S2(read);
 7073   dst    : S5(write);
 7074   INS01  : ISS;
 7075   NEON_FP : S5;
 7076 %}
 7077 
 7078 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
 7079 %{
 7080   single_instruction;
 7081   src1   : S1(read);
 7082   src2   : S2(read);
 7083   dst    : S5(write);
 7084   INS01  : ISS;
 7085   NEON_FP : S5;
 7086 %}
 7087 
 7088 pipe_class fp_uop_s(vRegF dst, vRegF src)
 7089 %{
 7090   single_instruction;
 7091   src    : S1(read);
 7092   dst    : S5(write);
 7093   INS01  : ISS;
 7094   NEON_FP : S5;
 7095 %}
 7096 
 7097 pipe_class fp_uop_d(vRegD dst, vRegD src)
 7098 %{
 7099   single_instruction;
 7100   src    : S1(read);
 7101   dst    : S5(write);
 7102   INS01  : ISS;
 7103   NEON_FP : S5;
 7104 %}
 7105 
 7106 pipe_class fp_d2f(vRegF dst, vRegD src)
 7107 %{
 7108   single_instruction;
 7109   src    : S1(read);
 7110   dst    : S5(write);
 7111   INS01  : ISS;
 7112   NEON_FP : S5;
 7113 %}
 7114 
 7115 pipe_class fp_f2d(vRegD dst, vRegF src)
 7116 %{
 7117   single_instruction;
 7118   src    : S1(read);
 7119   dst    : S5(write);
 7120   INS01  : ISS;
 7121   NEON_FP : S5;
 7122 %}
 7123 
 7124 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
 7125 %{
 7126   single_instruction;
 7127   src    : S1(read);
 7128   dst    : S5(write);
 7129   INS01  : ISS;
 7130   NEON_FP : S5;
 7131 %}
 7132 
 7133 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
 7134 %{
 7135   single_instruction;
 7136   src    : S1(read);
 7137   dst    : S5(write);
 7138   INS01  : ISS;
 7139   NEON_FP : S5;
 7140 %}
 7141 
 7142 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
 7143 %{
 7144   single_instruction;
 7145   src    : S1(read);
 7146   dst    : S5(write);
 7147   INS01  : ISS;
 7148   NEON_FP : S5;
 7149 %}
 7150 
 7151 pipe_class fp_l2f(vRegF dst, iRegL src)
 7152 %{
 7153   single_instruction;
 7154   src    : S1(read);
 7155   dst    : S5(write);
 7156   INS01  : ISS;
 7157   NEON_FP : S5;
 7158 %}
 7159 
 7160 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
 7161 %{
 7162   single_instruction;
 7163   src    : S1(read);
 7164   dst    : S5(write);
 7165   INS01  : ISS;
 7166   NEON_FP : S5;
 7167 %}
 7168 
 7169 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
 7170 %{
 7171   single_instruction;
 7172   src    : S1(read);
 7173   dst    : S5(write);
 7174   INS01  : ISS;
 7175   NEON_FP : S5;
 7176 %}
 7177 
 7178 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
 7179 %{
 7180   single_instruction;
 7181   src    : S1(read);
 7182   dst    : S5(write);
 7183   INS01  : ISS;
 7184   NEON_FP : S5;
 7185 %}
 7186 
 7187 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
 7188 %{
 7189   single_instruction;
 7190   src    : S1(read);
 7191   dst    : S5(write);
 7192   INS01  : ISS;
 7193   NEON_FP : S5;
 7194 %}
 7195 
 7196 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
 7197 %{
 7198   single_instruction;
 7199   src1   : S1(read);
 7200   src2   : S2(read);
 7201   dst    : S5(write);
 7202   INS0   : ISS;
 7203   NEON_FP : S5;
 7204 %}
 7205 
 7206 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
 7207 %{
 7208   single_instruction;
 7209   src1   : S1(read);
 7210   src2   : S2(read);
 7211   dst    : S5(write);
 7212   INS0   : ISS;
 7213   NEON_FP : S5;
 7214 %}
 7215 
 7216 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
 7217 %{
 7218   single_instruction;
 7219   cr     : S1(read);
 7220   src1   : S1(read);
 7221   src2   : S1(read);
 7222   dst    : S3(write);
 7223   INS01  : ISS;
 7224   NEON_FP : S3;
 7225 %}
 7226 
 7227 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
 7228 %{
 7229   single_instruction;
 7230   cr     : S1(read);
 7231   src1   : S1(read);
 7232   src2   : S1(read);
 7233   dst    : S3(write);
 7234   INS01  : ISS;
 7235   NEON_FP : S3;
 7236 %}
 7237 
 7238 pipe_class fp_imm_s(vRegF dst)
 7239 %{
 7240   single_instruction;
 7241   dst    : S3(write);
 7242   INS01  : ISS;
 7243   NEON_FP : S3;
 7244 %}
 7245 
 7246 pipe_class fp_imm_d(vRegD dst)
 7247 %{
 7248   single_instruction;
 7249   dst    : S3(write);
 7250   INS01  : ISS;
 7251   NEON_FP : S3;
 7252 %}
 7253 
 7254 pipe_class fp_load_constant_s(vRegF dst)
 7255 %{
 7256   single_instruction;
 7257   dst    : S4(write);
 7258   INS01  : ISS;
 7259   NEON_FP : S4;
 7260 %}
 7261 
 7262 pipe_class fp_load_constant_d(vRegD dst)
 7263 %{
 7264   single_instruction;
 7265   dst    : S4(write);
 7266   INS01  : ISS;
 7267   NEON_FP : S4;
 7268 %}
 7269 
 7270 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
 7271 %{
 7272   single_instruction;
 7273   dst    : S5(write);
 7274   src1   : S1(read);
 7275   src2   : S1(read);
 7276   INS01  : ISS;
 7277   NEON_FP : S5;
 7278 %}
 7279 
 7280 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
 7281 %{
 7282   single_instruction;
 7283   dst    : S5(write);
 7284   src1   : S1(read);
 7285   src2   : S1(read);
 7286   INS0   : ISS;
 7287   NEON_FP : S5;
 7288 %}
 7289 
 7290 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
 7291 %{
 7292   single_instruction;
 7293   dst    : S5(write);
 7294   src1   : S1(read);
 7295   src2   : S1(read);
 7296   dst    : S1(read);
 7297   INS01  : ISS;
 7298   NEON_FP : S5;
 7299 %}
 7300 
 7301 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
 7302 %{
 7303   single_instruction;
 7304   dst    : S5(write);
 7305   src1   : S1(read);
 7306   src2   : S1(read);
 7307   dst    : S1(read);
 7308   INS0   : ISS;
 7309   NEON_FP : S5;
 7310 %}
 7311 
 7312 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
 7313 %{
 7314   single_instruction;
 7315   dst    : S4(write);
 7316   src1   : S2(read);
 7317   src2   : S2(read);
 7318   INS01  : ISS;
 7319   NEON_FP : S4;
 7320 %}
 7321 
 7322 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
 7323 %{
 7324   single_instruction;
 7325   dst    : S4(write);
 7326   src1   : S2(read);
 7327   src2   : S2(read);
 7328   INS0   : ISS;
 7329   NEON_FP : S4;
 7330 %}
 7331 
 7332 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
 7333 %{
 7334   single_instruction;
 7335   dst    : S3(write);
 7336   src1   : S2(read);
 7337   src2   : S2(read);
 7338   INS01  : ISS;
 7339   NEON_FP : S3;
 7340 %}
 7341 
 7342 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
 7343 %{
 7344   single_instruction;
 7345   dst    : S3(write);
 7346   src1   : S2(read);
 7347   src2   : S2(read);
 7348   INS0   : ISS;
 7349   NEON_FP : S3;
 7350 %}
 7351 
 7352 pipe_class vshift64(vecD dst, vecD src, vecX shift)
 7353 %{
 7354   single_instruction;
 7355   dst    : S3(write);
 7356   src    : S1(read);
 7357   shift  : S1(read);
 7358   INS01  : ISS;
 7359   NEON_FP : S3;
 7360 %}
 7361 
 7362 pipe_class vshift128(vecX dst, vecX src, vecX shift)
 7363 %{
 7364   single_instruction;
 7365   dst    : S3(write);
 7366   src    : S1(read);
 7367   shift  : S1(read);
 7368   INS0   : ISS;
 7369   NEON_FP : S3;
 7370 %}
 7371 
 7372 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
 7373 %{
 7374   single_instruction;
 7375   dst    : S3(write);
 7376   src    : S1(read);
 7377   INS01  : ISS;
 7378   NEON_FP : S3;
 7379 %}
 7380 
 7381 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
 7382 %{
 7383   single_instruction;
 7384   dst    : S3(write);
 7385   src    : S1(read);
 7386   INS0   : ISS;
 7387   NEON_FP : S3;
 7388 %}
 7389 
 7390 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
 7391 %{
 7392   single_instruction;
 7393   dst    : S5(write);
 7394   src1   : S1(read);
 7395   src2   : S1(read);
 7396   INS01  : ISS;
 7397   NEON_FP : S5;
 7398 %}
 7399 
 7400 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
 7401 %{
 7402   single_instruction;
 7403   dst    : S5(write);
 7404   src1   : S1(read);
 7405   src2   : S1(read);
 7406   INS0   : ISS;
 7407   NEON_FP : S5;
 7408 %}
 7409 
 7410 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
 7411 %{
 7412   single_instruction;
 7413   dst    : S5(write);
 7414   src1   : S1(read);
 7415   src2   : S1(read);
 7416   INS0   : ISS;
 7417   NEON_FP : S5;
 7418 %}
 7419 
 7420 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
 7421 %{
 7422   single_instruction;
 7423   dst    : S5(write);
 7424   src1   : S1(read);
 7425   src2   : S1(read);
 7426   INS0   : ISS;
 7427   NEON_FP : S5;
 7428 %}
 7429 
 7430 pipe_class vsqrt_fp128(vecX dst, vecX src)
 7431 %{
 7432   single_instruction;
 7433   dst    : S5(write);
 7434   src    : S1(read);
 7435   INS0   : ISS;
 7436   NEON_FP : S5;
 7437 %}
 7438 
 7439 pipe_class vunop_fp64(vecD dst, vecD src)
 7440 %{
 7441   single_instruction;
 7442   dst    : S5(write);
 7443   src    : S1(read);
 7444   INS01  : ISS;
 7445   NEON_FP : S5;
 7446 %}
 7447 
 7448 pipe_class vunop_fp128(vecX dst, vecX src)
 7449 %{
 7450   single_instruction;
 7451   dst    : S5(write);
 7452   src    : S1(read);
 7453   INS0   : ISS;
 7454   NEON_FP : S5;
 7455 %}
 7456 
 7457 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
 7458 %{
 7459   single_instruction;
 7460   dst    : S3(write);
 7461   src    : S1(read);
 7462   INS01  : ISS;
 7463   NEON_FP : S3;
 7464 %}
 7465 
 7466 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
 7467 %{
 7468   single_instruction;
 7469   dst    : S3(write);
 7470   src    : S1(read);
 7471   INS01  : ISS;
 7472   NEON_FP : S3;
 7473 %}
 7474 
 7475 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
 7476 %{
 7477   single_instruction;
 7478   dst    : S3(write);
 7479   src    : S1(read);
 7480   INS01  : ISS;
 7481   NEON_FP : S3;
 7482 %}
 7483 
 7484 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
 7485 %{
 7486   single_instruction;
 7487   dst    : S3(write);
 7488   src    : S1(read);
 7489   INS01  : ISS;
 7490   NEON_FP : S3;
 7491 %}
 7492 
 7493 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
 7494 %{
 7495   single_instruction;
 7496   dst    : S3(write);
 7497   src    : S1(read);
 7498   INS01  : ISS;
 7499   NEON_FP : S3;
 7500 %}
 7501 
 7502 pipe_class vmovi_reg_imm64(vecD dst)
 7503 %{
 7504   single_instruction;
 7505   dst    : S3(write);
 7506   INS01  : ISS;
 7507   NEON_FP : S3;
 7508 %}
 7509 
 7510 pipe_class vmovi_reg_imm128(vecX dst)
 7511 %{
 7512   single_instruction;
 7513   dst    : S3(write);
 7514   INS0   : ISS;
 7515   NEON_FP : S3;
 7516 %}
 7517 
 7518 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
 7519 %{
 7520   single_instruction;
 7521   dst    : S5(write);
 7522   mem    : ISS(read);
 7523   INS01  : ISS;
 7524   NEON_FP : S3;
 7525 %}
 7526 
 7527 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
 7528 %{
 7529   single_instruction;
 7530   dst    : S5(write);
 7531   mem    : ISS(read);
 7532   INS01  : ISS;
 7533   NEON_FP : S3;
 7534 %}
 7535 
 7536 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
 7537 %{
 7538   single_instruction;
 7539   mem    : ISS(read);
 7540   src    : S2(read);
 7541   INS01  : ISS;
 7542   NEON_FP : S3;
 7543 %}
 7544 
 7545 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
 7546 %{
 7547   single_instruction;
 7548   mem    : ISS(read);
 7549   src    : S2(read);
 7550   INS01  : ISS;
 7551   NEON_FP : S3;
 7552 %}
 7553 
 7554 //------- Integer ALU operations --------------------------
 7555 
 7556 // Integer ALU reg-reg operation
 7557 // Operands needed in EX1, result generated in EX2
 7558 // Eg.  ADD     x0, x1, x2
 7559 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 7560 %{
 7561   single_instruction;
 7562   dst    : EX2(write);
 7563   src1   : EX1(read);
 7564   src2   : EX1(read);
 7565   INS01  : ISS; // Dual issue as instruction 0 or 1
 7566   ALU    : EX2;
 7567 %}
 7568 
 7569 // Integer ALU reg-reg operation with constant shift
 7570 // Shifted register must be available in LATE_ISS instead of EX1
 7571 // Eg.  ADD     x0, x1, x2, LSL #2
 7572 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
 7573 %{
 7574   single_instruction;
 7575   dst    : EX2(write);
 7576   src1   : EX1(read);
 7577   src2   : ISS(read);
 7578   INS01  : ISS;
 7579   ALU    : EX2;
 7580 %}
 7581 
 7582 // Integer ALU reg operation with constant shift
 7583 // Eg.  LSL     x0, x1, #shift
 7584 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
 7585 %{
 7586   single_instruction;
 7587   dst    : EX2(write);
 7588   src1   : ISS(read);
 7589   INS01  : ISS;
 7590   ALU    : EX2;
 7591 %}
 7592 
 7593 // Integer ALU reg-reg operation with variable shift
 7594 // Both operands must be available in LATE_ISS instead of EX1
 7595 // Result is available in EX1 instead of EX2
 7596 // Eg.  LSLV    x0, x1, x2
 7597 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
 7598 %{
 7599   single_instruction;
 7600   dst    : EX1(write);
 7601   src1   : ISS(read);
 7602   src2   : ISS(read);
 7603   INS01  : ISS;
 7604   ALU    : EX1;
 7605 %}
 7606 
 7607 // Integer ALU reg-reg operation with extract
 7608 // As for _vshift above, but result generated in EX2
 7609 // Eg.  EXTR    x0, x1, x2, #N
 7610 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
 7611 %{
 7612   single_instruction;
 7613   dst    : EX2(write);
 7614   src1   : ISS(read);
 7615   src2   : ISS(read);
 7616   INS1   : ISS; // Can only dual issue as Instruction 1
 7617   ALU    : EX1;
 7618 %}
 7619 
 7620 // Integer ALU reg operation
 7621 // Eg.  NEG     x0, x1
 7622 pipe_class ialu_reg(iRegI dst, iRegI src)
 7623 %{
 7624   single_instruction;
 7625   dst    : EX2(write);
 7626   src    : EX1(read);
 7627   INS01  : ISS;
 7628   ALU    : EX2;
 7629 %}
 7630 
 7631 // Integer ALU reg mmediate operation
 7632 // Eg.  ADD     x0, x1, #N
 7633 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
 7634 %{
 7635   single_instruction;
 7636   dst    : EX2(write);
 7637   src1   : EX1(read);
 7638   INS01  : ISS;
 7639   ALU    : EX2;
 7640 %}
 7641 
 7642 // Integer ALU immediate operation (no source operands)
 7643 // Eg.  MOV     x0, #N
 7644 pipe_class ialu_imm(iRegI dst)
 7645 %{
 7646   single_instruction;
 7647   dst    : EX1(write);
 7648   INS01  : ISS;
 7649   ALU    : EX1;
 7650 %}
 7651 
 7652 //------- Compare operation -------------------------------
 7653 
 7654 // Compare reg-reg
 7655 // Eg.  CMP     x0, x1
 7656 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
 7657 %{
 7658   single_instruction;
 7659 //  fixed_latency(16);
 7660   cr     : EX2(write);
 7661   op1    : EX1(read);
 7662   op2    : EX1(read);
 7663   INS01  : ISS;
 7664   ALU    : EX2;
 7665 %}
 7666 
 7667 // Compare reg-reg
 7668 // Eg.  CMP     x0, #N
 7669 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
 7670 %{
 7671   single_instruction;
 7672 //  fixed_latency(16);
 7673   cr     : EX2(write);
 7674   op1    : EX1(read);
 7675   INS01  : ISS;
 7676   ALU    : EX2;
 7677 %}
 7678 
 7679 //------- Conditional instructions ------------------------
 7680 
 7681 // Conditional no operands
 7682 // Eg.  CSINC   x0, zr, zr, <cond>
 7683 pipe_class icond_none(iRegI dst, rFlagsReg cr)
 7684 %{
 7685   single_instruction;
 7686   cr     : EX1(read);
 7687   dst    : EX2(write);
 7688   INS01  : ISS;
 7689   ALU    : EX2;
 7690 %}
 7691 
 7692 // Conditional 2 operand
 7693 // EG.  CSEL    X0, X1, X2, <cond>
 7694 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
 7695 %{
 7696   single_instruction;
 7697   cr     : EX1(read);
 7698   src1   : EX1(read);
 7699   src2   : EX1(read);
 7700   dst    : EX2(write);
 7701   INS01  : ISS;
 7702   ALU    : EX2;
 7703 %}
 7704 
 7705 // Conditional 2 operand
 7706 // EG.  CSEL    X0, X1, X2, <cond>
 7707 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
 7708 %{
 7709   single_instruction;
 7710   cr     : EX1(read);
 7711   src    : EX1(read);
 7712   dst    : EX2(write);
 7713   INS01  : ISS;
 7714   ALU    : EX2;
 7715 %}
 7716 
 7717 //------- Multiply pipeline operations --------------------
 7718 
 7719 // Multiply reg-reg
 7720 // Eg.  MUL     w0, w1, w2
 7721 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 7722 %{
 7723   single_instruction;
 7724   dst    : WR(write);
 7725   src1   : ISS(read);
 7726   src2   : ISS(read);
 7727   INS01  : ISS;
 7728   MAC    : WR;
 7729 %}
 7730 
 7731 // Multiply accumulate
 7732 // Eg.  MADD    w0, w1, w2, w3
 7733 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 7734 %{
 7735   single_instruction;
 7736   dst    : WR(write);
 7737   src1   : ISS(read);
 7738   src2   : ISS(read);
 7739   src3   : ISS(read);
 7740   INS01  : ISS;
 7741   MAC    : WR;
 7742 %}
 7743 
 7744 // Eg.  MUL     w0, w1, w2
 7745 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 7746 %{
 7747   single_instruction;
 7748   fixed_latency(3); // Maximum latency for 64 bit mul
 7749   dst    : WR(write);
 7750   src1   : ISS(read);
 7751   src2   : ISS(read);
 7752   INS01  : ISS;
 7753   MAC    : WR;
 7754 %}
 7755 
 7756 // Multiply accumulate
 7757 // Eg.  MADD    w0, w1, w2, w3
 7758 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 7759 %{
 7760   single_instruction;
 7761   fixed_latency(3); // Maximum latency for 64 bit mul
 7762   dst    : WR(write);
 7763   src1   : ISS(read);
 7764   src2   : ISS(read);
 7765   src3   : ISS(read);
 7766   INS01  : ISS;
 7767   MAC    : WR;
 7768 %}
 7769 
 7770 //------- Divide pipeline operations --------------------
 7771 
 7772 // Eg.  SDIV    w0, w1, w2
 7773 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 7774 %{
 7775   single_instruction;
 7776   fixed_latency(8); // Maximum latency for 32 bit divide
 7777   dst    : WR(write);
 7778   src1   : ISS(read);
 7779   src2   : ISS(read);
 7780   INS0   : ISS; // Can only dual issue as instruction 0
 7781   DIV    : WR;
 7782 %}
 7783 
 7784 // Eg.  SDIV    x0, x1, x2
 7785 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 7786 %{
 7787   single_instruction;
 7788   fixed_latency(16); // Maximum latency for 64 bit divide
 7789   dst    : WR(write);
 7790   src1   : ISS(read);
 7791   src2   : ISS(read);
 7792   INS0   : ISS; // Can only dual issue as instruction 0
 7793   DIV    : WR;
 7794 %}
 7795 
 7796 //------- Load pipeline operations ------------------------
 7797 
 7798 // Load - prefetch
 7799 // Eg.  PFRM    <mem>
 7800 pipe_class iload_prefetch(memory mem)
 7801 %{
 7802   single_instruction;
 7803   mem    : ISS(read);
 7804   INS01  : ISS;
 7805   LDST   : WR;
 7806 %}
 7807 
 7808 // Load - reg, mem
 7809 // Eg.  LDR     x0, <mem>
 7810 pipe_class iload_reg_mem(iRegI dst, memory mem)
 7811 %{
 7812   single_instruction;
 7813   dst    : WR(write);
 7814   mem    : ISS(read);
 7815   INS01  : ISS;
 7816   LDST   : WR;
 7817 %}
 7818 
 7819 // Load - reg, reg
 7820 // Eg.  LDR     x0, [sp, x1]
 7821 pipe_class iload_reg_reg(iRegI dst, iRegI src)
 7822 %{
 7823   single_instruction;
 7824   dst    : WR(write);
 7825   src    : ISS(read);
 7826   INS01  : ISS;
 7827   LDST   : WR;
 7828 %}
 7829 
 7830 //------- Store pipeline operations -----------------------
 7831 
 7832 // Store - zr, mem
 7833 // Eg.  STR     zr, <mem>
 7834 pipe_class istore_mem(memory mem)
 7835 %{
 7836   single_instruction;
 7837   mem    : ISS(read);
 7838   INS01  : ISS;
 7839   LDST   : WR;
 7840 %}
 7841 
 7842 // Store - reg, mem
 7843 // Eg.  STR     x0, <mem>
 7844 pipe_class istore_reg_mem(iRegI src, memory mem)
 7845 %{
 7846   single_instruction;
 7847   mem    : ISS(read);
 7848   src    : EX2(read);
 7849   INS01  : ISS;
 7850   LDST   : WR;
 7851 %}
 7852 
 7853 // Store - reg, reg
 7854 // Eg. STR      x0, [sp, x1]
 7855 pipe_class istore_reg_reg(iRegI dst, iRegI src)
 7856 %{
 7857   single_instruction;
 7858   dst    : ISS(read);
 7859   src    : EX2(read);
 7860   INS01  : ISS;
 7861   LDST   : WR;
 7862 %}
 7863 
 7864 //------- Store pipeline operations -----------------------
 7865 
 7866 // Branch
 7867 pipe_class pipe_branch()
 7868 %{
 7869   single_instruction;
 7870   INS01  : ISS;
 7871   BRANCH : EX1;
 7872 %}
 7873 
 7874 // Conditional branch
 7875 pipe_class pipe_branch_cond(rFlagsReg cr)
 7876 %{
 7877   single_instruction;
 7878   cr     : EX1(read);
 7879   INS01  : ISS;
 7880   BRANCH : EX1;
 7881 %}
 7882 
 7883 // Compare & Branch
 7884 // EG.  CBZ/CBNZ
 7885 pipe_class pipe_cmp_branch(iRegI op1)
 7886 %{
 7887   single_instruction;
 7888   op1    : EX1(read);
 7889   INS01  : ISS;
 7890   BRANCH : EX1;
 7891 %}
 7892 
 7893 //------- Synchronisation operations ----------------------
 7894 
 7895 // Any operation requiring serialization.
 7896 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
 7897 pipe_class pipe_serial()
 7898 %{
 7899   single_instruction;
 7900   force_serialization;
 7901   fixed_latency(16);
 7902   INS01  : ISS(2); // Cannot dual issue with any other instruction
 7903   LDST   : WR;
 7904 %}
 7905 
 7906 // Generic big/slow expanded idiom - also serialized
 7907 pipe_class pipe_slow()
 7908 %{
 7909   instruction_count(10);
 7910   multiple_bundles;
 7911   force_serialization;
 7912   fixed_latency(16);
 7913   INS01  : ISS(2); // Cannot dual issue with any other instruction
 7914   LDST   : WR;
 7915 %}
 7916 
 7917 // Empty pipeline class
 7918 pipe_class pipe_class_empty()
 7919 %{
 7920   single_instruction;
 7921   fixed_latency(0);
 7922 %}
 7923 
 7924 // Default pipeline class.
 7925 pipe_class pipe_class_default()
 7926 %{
 7927   single_instruction;
 7928   fixed_latency(2);
 7929 %}
 7930 
 7931 // Pipeline class for compares.
 7932 pipe_class pipe_class_compare()
 7933 %{
 7934   single_instruction;
 7935   fixed_latency(16);
 7936 %}
 7937 
 7938 // Pipeline class for memory operations.
 7939 pipe_class pipe_class_memory()
 7940 %{
 7941   single_instruction;
 7942   fixed_latency(16);
 7943 %}
 7944 
 7945 // Pipeline class for call.
 7946 pipe_class pipe_class_call()
 7947 %{
 7948   single_instruction;
 7949   fixed_latency(100);
 7950 %}
 7951 
 7952 // Define the class for the Nop node.
 7953 define %{
 7954    MachNop = pipe_class_empty;
 7955 %}
 7956 
 7957 %}
 7958 //----------INSTRUCTIONS-------------------------------------------------------
 7959 //
 7960 // match      -- States which machine-independent subtree may be replaced
 7961 //               by this instruction.
 7962 // ins_cost   -- The estimated cost of this instruction is used by instruction
 7963 //               selection to identify a minimum cost tree of machine
 7964 //               instructions that matches a tree of machine-independent
 7965 //               instructions.
 7966 // format     -- A string providing the disassembly for this instruction.
 7967 //               The value of an instruction's operand may be inserted
 7968 //               by referring to it with a '$' prefix.
 7969 // opcode     -- Three instruction opcodes may be provided.  These are referred
 7970 //               to within an encode class as $primary, $secondary, and $tertiary
 7971 //               rrspectively.  The primary opcode is commonly used to
 7972 //               indicate the type of machine instruction, while secondary
 7973 //               and tertiary are often used for prefix options or addressing
 7974 //               modes.
 7975 // ins_encode -- A list of encode classes with parameters. The encode class
 7976 //               name must have been defined in an 'enc_class' specification
 7977 //               in the encode section of the architecture description.
 7978 
 7979 // ============================================================================
 7980 // Memory (Load/Store) Instructions
 7981 
 7982 // Load Instructions
 7983 
 7984 // Load Byte (8 bit signed)
 7985 instruct loadB(iRegINoSp dst, memory mem)
 7986 %{
 7987   match(Set dst (LoadB mem));
 7988   predicate(!needs_acquiring_load(n));
 7989 
 7990   ins_cost(4 * INSN_COST);
 7991   format %{ "ldrsbw  $dst, $mem\t# byte" %}
 7992 
 7993   ins_encode(aarch64_enc_ldrsbw(dst, mem));
 7994 
 7995   ins_pipe(iload_reg_mem);
 7996 %}
 7997 
 7998 // Load Byte (8 bit signed) into long
 7999 instruct loadB2L(iRegLNoSp dst, memory mem)
 8000 %{
 8001   match(Set dst (ConvI2L (LoadB mem)));
 8002   predicate(!needs_acquiring_load(n->in(1)));
 8003 
 8004   ins_cost(4 * INSN_COST);
 8005   format %{ "ldrsb  $dst, $mem\t# byte" %}
 8006 
 8007   ins_encode(aarch64_enc_ldrsb(dst, mem));
 8008 
 8009   ins_pipe(iload_reg_mem);
 8010 %}
 8011 
 8012 // Load Byte (8 bit unsigned)
 8013 instruct loadUB(iRegINoSp dst, memory mem)
 8014 %{
 8015   match(Set dst (LoadUB mem));
 8016   predicate(!needs_acquiring_load(n));
 8017 
 8018   ins_cost(4 * INSN_COST);
 8019   format %{ "ldrbw  $dst, $mem\t# byte" %}
 8020 
 8021   ins_encode(aarch64_enc_ldrb(dst, mem));
 8022 
 8023   ins_pipe(iload_reg_mem);
 8024 %}
 8025 
 8026 // Load Byte (8 bit unsigned) into long
 8027 instruct loadUB2L(iRegLNoSp dst, memory mem)
 8028 %{
 8029   match(Set dst (ConvI2L (LoadUB mem)));
 8030   predicate(!needs_acquiring_load(n->in(1)));
 8031 
 8032   ins_cost(4 * INSN_COST);
 8033   format %{ "ldrb  $dst, $mem\t# byte" %}
 8034 
 8035   ins_encode(aarch64_enc_ldrb(dst, mem));
 8036 
 8037   ins_pipe(iload_reg_mem);
 8038 %}
 8039 
 8040 // Load Short (16 bit signed)
 8041 instruct loadS(iRegINoSp dst, memory mem)
 8042 %{
 8043   match(Set dst (LoadS mem));
 8044   predicate(!needs_acquiring_load(n));
 8045 
 8046   ins_cost(4 * INSN_COST);
 8047   format %{ "ldrshw  $dst, $mem\t# short" %}
 8048 
 8049   ins_encode(aarch64_enc_ldrshw(dst, mem));
 8050 
 8051   ins_pipe(iload_reg_mem);
 8052 %}
 8053 
 8054 // Load Short (16 bit signed) into long
 8055 instruct loadS2L(iRegLNoSp dst, memory mem)
 8056 %{
 8057   match(Set dst (ConvI2L (LoadS mem)));
 8058   predicate(!needs_acquiring_load(n->in(1)));
 8059 
 8060   ins_cost(4 * INSN_COST);
 8061   format %{ "ldrsh  $dst, $mem\t# short" %}
 8062 
 8063   ins_encode(aarch64_enc_ldrsh(dst, mem));
 8064 
 8065   ins_pipe(iload_reg_mem);
 8066 %}
 8067 
 8068 // Load Char (16 bit unsigned)
 8069 instruct loadUS(iRegINoSp dst, memory mem)
 8070 %{
 8071   match(Set dst (LoadUS mem));
 8072   predicate(!needs_acquiring_load(n));
 8073 
 8074   ins_cost(4 * INSN_COST);
 8075   format %{ "ldrh  $dst, $mem\t# short" %}
 8076 
 8077   ins_encode(aarch64_enc_ldrh(dst, mem));
 8078 
 8079   ins_pipe(iload_reg_mem);
 8080 %}
 8081 
 8082 // Load Short/Char (16 bit unsigned) into long
 8083 instruct loadUS2L(iRegLNoSp dst, memory mem)
 8084 %{
 8085   match(Set dst (ConvI2L (LoadUS mem)));
 8086   predicate(!needs_acquiring_load(n->in(1)));
 8087 
 8088   ins_cost(4 * INSN_COST);
 8089   format %{ "ldrh  $dst, $mem\t# short" %}
 8090 
 8091   ins_encode(aarch64_enc_ldrh(dst, mem));
 8092 
 8093   ins_pipe(iload_reg_mem);
 8094 %}
 8095 
 8096 // Load Integer (32 bit signed)
 8097 instruct loadI(iRegINoSp dst, memory mem)
 8098 %{
 8099   match(Set dst (LoadI mem));
 8100   predicate(!needs_acquiring_load(n));
 8101 
 8102   ins_cost(4 * INSN_COST);
 8103   format %{ "ldrw  $dst, $mem\t# int" %}
 8104 
 8105   ins_encode(aarch64_enc_ldrw(dst, mem));
 8106 
 8107   ins_pipe(iload_reg_mem);
 8108 %}
 8109 
 8110 // Load Integer (32 bit signed) into long
 8111 instruct loadI2L(iRegLNoSp dst, memory mem)
 8112 %{
 8113   match(Set dst (ConvI2L (LoadI mem)));
 8114   predicate(!needs_acquiring_load(n->in(1)));
 8115 
 8116   ins_cost(4 * INSN_COST);
 8117   format %{ "ldrsw  $dst, $mem\t# int" %}
 8118 
 8119   ins_encode(aarch64_enc_ldrsw(dst, mem));
 8120 
 8121   ins_pipe(iload_reg_mem);
 8122 %}
 8123 
 8124 // Load Integer (32 bit unsigned) into long
 8125 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
 8126 %{
 8127   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 8128   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
 8129 
 8130   ins_cost(4 * INSN_COST);
 8131   format %{ "ldrw  $dst, $mem\t# int" %}
 8132 
 8133   ins_encode(aarch64_enc_ldrw(dst, mem));
 8134 
 8135   ins_pipe(iload_reg_mem);
 8136 %}
 8137 
 8138 // Load Long (64 bit signed)
 8139 instruct loadL(iRegLNoSp dst, memory mem)
 8140 %{
 8141   match(Set dst (LoadL mem));
 8142   predicate(!needs_acquiring_load(n));
 8143 
 8144   ins_cost(4 * INSN_COST);
 8145   format %{ "ldr  $dst, $mem\t# int" %}
 8146 
 8147   ins_encode(aarch64_enc_ldr(dst, mem));
 8148 
 8149   ins_pipe(iload_reg_mem);
 8150 %}
 8151 
 8152 // Load Range
 8153 instruct loadRange(iRegINoSp dst, memory mem)
 8154 %{
 8155   match(Set dst (LoadRange mem));
 8156 
 8157   ins_cost(4 * INSN_COST);
 8158   format %{ "ldrw  $dst, $mem\t# range" %}
 8159 
 8160   ins_encode(aarch64_enc_ldrw(dst, mem));
 8161 
 8162   ins_pipe(iload_reg_mem);
 8163 %}
 8164 
 8165 // Load Pointer
 8166 instruct loadP(iRegPNoSp dst, memory mem)
 8167 %{
 8168   match(Set dst (LoadP mem));
 8169   predicate(!needs_acquiring_load(n));
 8170 
 8171   ins_cost(4 * INSN_COST);
 8172   format %{ "ldr  $dst, $mem\t# ptr" %}
 8173 
 8174   ins_encode(aarch64_enc_ldr(dst, mem));
 8175 
 8176   ins_pipe(iload_reg_mem);
 8177 %}
 8178 
 8179 // Load Compressed Pointer
 8180 instruct loadN(iRegNNoSp dst, memory mem)
 8181 %{
 8182   match(Set dst (LoadN mem));
 8183   predicate(!needs_acquiring_load(n));
 8184 
 8185   ins_cost(4 * INSN_COST);
 8186   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
 8187 
 8188   ins_encode(aarch64_enc_ldrw(dst, mem));
 8189 
 8190   ins_pipe(iload_reg_mem);
 8191 %}
 8192 
 8193 // Load Klass Pointer
 8194 instruct loadKlass(iRegPNoSp dst, memory mem)
 8195 %{
 8196   match(Set dst (LoadKlass mem));
 8197   predicate(!needs_acquiring_load(n));
 8198 
 8199   ins_cost(4 * INSN_COST);
 8200   format %{ "ldr  $dst, $mem\t# class" %}
 8201 
 8202   ins_encode(aarch64_enc_ldr(dst, mem));
 8203 
 8204   ins_pipe(iload_reg_mem);
 8205 %}
 8206 
 8207 // Load Narrow Klass Pointer
 8208 instruct loadNKlass(iRegNNoSp dst, memory mem)
 8209 %{
 8210   match(Set dst (LoadNKlass mem));
 8211   predicate(!needs_acquiring_load(n));
 8212 
 8213   ins_cost(4 * INSN_COST);
 8214   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
 8215 
 8216   ins_encode(aarch64_enc_ldrw(dst, mem));
 8217 
 8218   ins_pipe(iload_reg_mem);
 8219 %}
 8220 
 8221 // Load Float
 8222 instruct loadF(vRegF dst, memory mem)
 8223 %{
 8224   match(Set dst (LoadF mem));
 8225   predicate(!needs_acquiring_load(n));
 8226 
 8227   ins_cost(4 * INSN_COST);
 8228   format %{ "ldrs  $dst, $mem\t# float" %}
 8229 
 8230   ins_encode( aarch64_enc_ldrs(dst, mem) );
 8231 
 8232   ins_pipe(pipe_class_memory);
 8233 %}
 8234 
 8235 // Load Double
 8236 instruct loadD(vRegD dst, memory mem)
 8237 %{
 8238   match(Set dst (LoadD mem));
 8239   predicate(!needs_acquiring_load(n));
 8240 
 8241   ins_cost(4 * INSN_COST);
 8242   format %{ "ldrd  $dst, $mem\t# double" %}
 8243 
 8244   ins_encode( aarch64_enc_ldrd(dst, mem) );
 8245 
 8246   ins_pipe(pipe_class_memory);
 8247 %}
 8248 
 8249 
 8250 // Load Int Constant
 8251 instruct loadConI(iRegINoSp dst, immI src)
 8252 %{
 8253   match(Set dst src);
 8254 
 8255   ins_cost(INSN_COST);
 8256   format %{ "mov $dst, $src\t# int" %}
 8257 
 8258   ins_encode( aarch64_enc_movw_imm(dst, src) );
 8259 
 8260   ins_pipe(ialu_imm);
 8261 %}
 8262 
 8263 // Load Long Constant
 8264 instruct loadConL(iRegLNoSp dst, immL src)
 8265 %{
 8266   match(Set dst src);
 8267 
 8268   ins_cost(INSN_COST);
 8269   format %{ "mov $dst, $src\t# long" %}
 8270 
 8271   ins_encode( aarch64_enc_mov_imm(dst, src) );
 8272 
 8273   ins_pipe(ialu_imm);
 8274 %}
 8275 
 8276 // Load Pointer Constant
 8277 
 8278 instruct loadConP(iRegPNoSp dst, immP con)
 8279 %{
 8280   match(Set dst con);
 8281 
 8282   ins_cost(INSN_COST * 4);
 8283   format %{
 8284     "mov  $dst, $con\t# ptr\n\t"
 8285   %}
 8286 
 8287   ins_encode(aarch64_enc_mov_p(dst, con));
 8288 
 8289   ins_pipe(ialu_imm);
 8290 %}
 8291 
 8292 // Load Null Pointer Constant
 8293 
 8294 instruct loadConP0(iRegPNoSp dst, immP0 con)
 8295 %{
 8296   match(Set dst con);
 8297 
 8298   ins_cost(INSN_COST);
 8299   format %{ "mov  $dst, $con\t# NULL ptr" %}
 8300 
 8301   ins_encode(aarch64_enc_mov_p0(dst, con));
 8302 
 8303   ins_pipe(ialu_imm);
 8304 %}
 8305 
 8306 // Load Pointer Constant One
 8307 
 8308 instruct loadConP1(iRegPNoSp dst, immP_1 con)
 8309 %{
 8310   match(Set dst con);
 8311 
 8312   ins_cost(INSN_COST);
 8313   format %{ "mov  $dst, $con\t# NULL ptr" %}
 8314 
 8315   ins_encode(aarch64_enc_mov_p1(dst, con));
 8316 
 8317   ins_pipe(ialu_imm);
 8318 %}
 8319 
 8320 // Load Poll Page Constant
 8321 
 8322 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
 8323 %{
 8324   match(Set dst con);
 8325 
 8326   ins_cost(INSN_COST);
 8327   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
 8328 
 8329   ins_encode(aarch64_enc_mov_poll_page(dst, con));
 8330 
 8331   ins_pipe(ialu_imm);
 8332 %}
 8333 
 8334 // Load Byte Map Base Constant
 8335 
 8336 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
 8337 %{
 8338   match(Set dst con);
 8339 
 8340   ins_cost(INSN_COST);
 8341   format %{ "adr  $dst, $con\t# Byte Map Base" %}
 8342 
 8343   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
 8344 
 8345   ins_pipe(ialu_imm);
 8346 %}
 8347 
 8348 // Load Narrow Pointer Constant
 8349 
 8350 instruct loadConN(iRegNNoSp dst, immN con)
 8351 %{
 8352   match(Set dst con);
 8353 
 8354   ins_cost(INSN_COST * 4);
 8355   format %{ "mov  $dst, $con\t# compressed ptr" %}
 8356 
 8357   ins_encode(aarch64_enc_mov_n(dst, con));
 8358 
 8359   ins_pipe(ialu_imm);
 8360 %}
 8361 
 8362 // Load Narrow Null Pointer Constant
 8363 
 8364 instruct loadConN0(iRegNNoSp dst, immN0 con)
 8365 %{
 8366   match(Set dst con);
 8367 
 8368   ins_cost(INSN_COST);
 8369   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
 8370 
 8371   ins_encode(aarch64_enc_mov_n0(dst, con));
 8372 
 8373   ins_pipe(ialu_imm);
 8374 %}
 8375 
 8376 // Load Narrow Klass Constant
 8377 
 8378 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
 8379 %{
 8380   match(Set dst con);
 8381 
 8382   ins_cost(INSN_COST);
 8383   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
 8384 
 8385   ins_encode(aarch64_enc_mov_nk(dst, con));
 8386 
 8387   ins_pipe(ialu_imm);
 8388 %}
 8389 
 8390 // Load Packed Float Constant
 8391 
 8392 instruct loadConF_packed(vRegF dst, immFPacked con) %{
 8393   match(Set dst con);
 8394   ins_cost(INSN_COST * 4);
 8395   format %{ "fmovs  $dst, $con"%}
 8396   ins_encode %{
 8397     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
 8398   %}
 8399 
 8400   ins_pipe(fp_imm_s);
 8401 %}
 8402 
 8403 // Load Float Constant
 8404 
 8405 instruct loadConF(vRegF dst, immF con) %{
 8406   match(Set dst con);
 8407 
 8408   ins_cost(INSN_COST * 4);
 8409 
 8410   format %{
 8411     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 8412   %}
 8413 
 8414   ins_encode %{
 8415     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
 8416   %}
 8417 
 8418   ins_pipe(fp_load_constant_s);
 8419 %}
 8420 
 8421 // Load Packed Double Constant
 8422 
 8423 instruct loadConD_packed(vRegD dst, immDPacked con) %{
 8424   match(Set dst con);
 8425   ins_cost(INSN_COST);
 8426   format %{ "fmovd  $dst, $con"%}
 8427   ins_encode %{
 8428     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
 8429   %}
 8430 
 8431   ins_pipe(fp_imm_d);
 8432 %}
 8433 
 8434 // Load Double Constant
 8435 
 8436 instruct loadConD(vRegD dst, immD con) %{
 8437   match(Set dst con);
 8438 
 8439   ins_cost(INSN_COST * 5);
 8440   format %{
 8441     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 8442   %}
 8443 
 8444   ins_encode %{
 8445     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
 8446   %}
 8447 
 8448   ins_pipe(fp_load_constant_d);
 8449 %}
 8450 
 8451 // Store Instructions
 8452 
 8453 // Store CMS card-mark Immediate
 8454 instruct storeimmCM0(immI0 zero, memory mem)
 8455 %{
 8456   match(Set mem (StoreCM mem zero));
 8457   predicate(unnecessary_storestore(n));
 8458 
 8459   ins_cost(INSN_COST);
 8460   format %{ "strb zr, $mem\t# byte" %}
 8461 
 8462   ins_encode(aarch64_enc_strb0(mem));
 8463 
 8464   ins_pipe(istore_mem);
 8465 %}
 8466 
 8467 // Store CMS card-mark Immediate with intervening StoreStore
 8468 // needed when using CMS with no conditional card marking
 8469 instruct storeimmCM0_ordered(immI0 zero, memory mem)
 8470 %{
 8471   match(Set mem (StoreCM mem zero));
 8472 
 8473   ins_cost(INSN_COST * 2);
 8474   format %{ "dmb ishst"
 8475       "\n\tstrb zr, $mem\t# byte" %}
 8476 
 8477   ins_encode(aarch64_enc_strb0_ordered(mem));
 8478 
 8479   ins_pipe(istore_mem);
 8480 %}
 8481 
 8482 // Store Byte
 8483 instruct storeB(iRegIorL2I src, memory mem)
 8484 %{
 8485   match(Set mem (StoreB mem src));
 8486   predicate(!needs_releasing_store(n));
 8487 
 8488   ins_cost(INSN_COST);
 8489   format %{ "strb  $src, $mem\t# byte" %}
 8490 
 8491   ins_encode(aarch64_enc_strb(src, mem));
 8492 
 8493   ins_pipe(istore_reg_mem);
 8494 %}
 8495 
 8496 
 8497 instruct storeimmB0(immI0 zero, memory mem)
 8498 %{
 8499   match(Set mem (StoreB mem zero));
 8500   predicate(!needs_releasing_store(n));
 8501 
 8502   ins_cost(INSN_COST);
 8503   format %{ "strb rscractch2, $mem\t# byte" %}
 8504 
 8505   ins_encode(aarch64_enc_strb0(mem));
 8506 
 8507   ins_pipe(istore_mem);
 8508 %}
 8509 
 8510 // Store Char/Short
 8511 instruct storeC(iRegIorL2I src, memory mem)
 8512 %{
 8513   match(Set mem (StoreC mem src));
 8514   predicate(!needs_releasing_store(n));
 8515 
 8516   ins_cost(INSN_COST);
 8517   format %{ "strh  $src, $mem\t# short" %}
 8518 
 8519   ins_encode(aarch64_enc_strh(src, mem));
 8520 
 8521   ins_pipe(istore_reg_mem);
 8522 %}
 8523 
 8524 instruct storeimmC0(immI0 zero, memory mem)
 8525 %{
 8526   match(Set mem (StoreC mem zero));
 8527   predicate(!needs_releasing_store(n));
 8528 
 8529   ins_cost(INSN_COST);
 8530   format %{ "strh  zr, $mem\t# short" %}
 8531 
 8532   ins_encode(aarch64_enc_strh0(mem));
 8533 
 8534   ins_pipe(istore_mem);
 8535 %}
 8536 
 8537 // Store Integer
 8538 
 8539 instruct storeI(iRegIorL2I src, memory mem)
 8540 %{
 8541   match(Set mem(StoreI mem src));
 8542   predicate(!needs_releasing_store(n));
 8543 
 8544   ins_cost(INSN_COST);
 8545   format %{ "strw  $src, $mem\t# int" %}
 8546 
 8547   ins_encode(aarch64_enc_strw(src, mem));
 8548 
 8549   ins_pipe(istore_reg_mem);
 8550 %}
 8551 
 8552 instruct storeimmI0(immI0 zero, memory mem)
 8553 %{
 8554   match(Set mem(StoreI mem zero));
 8555   predicate(!needs_releasing_store(n));
 8556 
 8557   ins_cost(INSN_COST);
 8558   format %{ "strw  zr, $mem\t# int" %}
 8559 
 8560   ins_encode(aarch64_enc_strw0(mem));
 8561 
 8562   ins_pipe(istore_mem);
 8563 %}
 8564 
 8565 // Store Long (64 bit signed)
 8566 instruct storeL(iRegL src, memory mem)
 8567 %{
 8568   match(Set mem (StoreL mem src));
 8569   predicate(!needs_releasing_store(n));
 8570 
 8571   ins_cost(INSN_COST);
 8572   format %{ "str  $src, $mem\t# int" %}
 8573 
 8574   ins_encode(aarch64_enc_str(src, mem));
 8575 
 8576   ins_pipe(istore_reg_mem);
 8577 %}
 8578 
 8579 // Store Long (64 bit signed)
 8580 instruct storeimmL0(immL0 zero, memory mem)
 8581 %{
 8582   match(Set mem (StoreL mem zero));
 8583   predicate(!needs_releasing_store(n));
 8584 
 8585   ins_cost(INSN_COST);
 8586   format %{ "str  zr, $mem\t# int" %}
 8587 
 8588   ins_encode(aarch64_enc_str0(mem));
 8589 
 8590   ins_pipe(istore_mem);
 8591 %}
 8592 
 8593 // Store Pointer
 8594 instruct storeP(iRegP src, memory mem)
 8595 %{
 8596   match(Set mem (StoreP mem src));
 8597   predicate(!needs_releasing_store(n));
 8598 
 8599   ins_cost(INSN_COST);
 8600   format %{ "str  $src, $mem\t# ptr" %}
 8601 
 8602   ins_encode(aarch64_enc_str(src, mem));
 8603 
 8604   ins_pipe(istore_reg_mem);
 8605 %}
 8606 
 8607 // Store Pointer
 8608 instruct storeimmP0(immP0 zero, memory mem)
 8609 %{
 8610   match(Set mem (StoreP mem zero));
 8611   predicate(!needs_releasing_store(n));
 8612 
 8613   ins_cost(INSN_COST);
 8614   format %{ "str zr, $mem\t# ptr" %}
 8615 
 8616   ins_encode(aarch64_enc_str0(mem));
 8617 
 8618   ins_pipe(istore_mem);
 8619 %}
 8620 
 8621 // Store Compressed Pointer
 8622 instruct storeN(iRegN src, memory mem)
 8623 %{
 8624   match(Set mem (StoreN mem src));
 8625   predicate(!needs_releasing_store(n));
 8626 
 8627   ins_cost(INSN_COST);
 8628   format %{ "strw  $src, $mem\t# compressed ptr" %}
 8629 
 8630   ins_encode(aarch64_enc_strw(src, mem));
 8631 
 8632   ins_pipe(istore_reg_mem);
 8633 %}
 8634 
 8635 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
 8636 %{
 8637   match(Set mem (StoreN mem zero));
 8638   predicate(Universe::narrow_oop_base() == NULL &&
 8639             Universe::narrow_klass_base() == NULL &&
 8640             (!needs_releasing_store(n)));
 8641 
 8642   ins_cost(INSN_COST);
 8643   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
 8644 
 8645   ins_encode(aarch64_enc_strw(heapbase, mem));
 8646 
 8647   ins_pipe(istore_reg_mem);
 8648 %}
 8649 
 8650 // Store Float
 8651 instruct storeF(vRegF src, memory mem)
 8652 %{
 8653   match(Set mem (StoreF mem src));
 8654   predicate(!needs_releasing_store(n));
 8655 
 8656   ins_cost(INSN_COST);
 8657   format %{ "strs  $src, $mem\t# float" %}
 8658 
 8659   ins_encode( aarch64_enc_strs(src, mem) );
 8660 
 8661   ins_pipe(pipe_class_memory);
 8662 %}
 8663 
 8664 // TODO
 8665 // implement storeImmF0 and storeFImmPacked
 8666 
 8667 // Store Double
 8668 instruct storeD(vRegD src, memory mem)
 8669 %{
 8670   match(Set mem (StoreD mem src));
 8671   predicate(!needs_releasing_store(n));
 8672 
 8673   ins_cost(INSN_COST);
 8674   format %{ "strd  $src, $mem\t# double" %}
 8675 
 8676   ins_encode( aarch64_enc_strd(src, mem) );
 8677 
 8678   ins_pipe(pipe_class_memory);
 8679 %}
 8680 
 8681 // Store Compressed Klass Pointer
 8682 instruct storeNKlass(iRegN src, memory mem)
 8683 %{
 8684   predicate(!needs_releasing_store(n));
 8685   match(Set mem (StoreNKlass mem src));
 8686 
 8687   ins_cost(INSN_COST);
 8688   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
 8689 
 8690   ins_encode(aarch64_enc_strw(src, mem));
 8691 
 8692   ins_pipe(istore_reg_mem);
 8693 %}
 8694 
 8695 // TODO
 8696 // implement storeImmD0 and storeDImmPacked
 8697 
 8698 // prefetch instructions
 8699 // Must be safe to execute with invalid address (cannot fault).
 8700 
 8701 instruct prefetchalloc( memory mem ) %{
 8702   match(PrefetchAllocation mem);
 8703 
 8704   ins_cost(INSN_COST);
 8705   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
 8706 
 8707   ins_encode( aarch64_enc_prefetchw(mem) );
 8708 
 8709   ins_pipe(iload_prefetch);
 8710 %}
 8711 
 8712 //  ---------------- volatile loads and stores ----------------
 8713 
 8714 // Load Byte (8 bit signed)
 8715 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 8716 %{
 8717   match(Set dst (LoadB mem));
 8718 
 8719   ins_cost(VOLATILE_REF_COST);
 8720   format %{ "ldarsb  $dst, $mem\t# byte" %}
 8721 
 8722   ins_encode(aarch64_enc_ldarsb(dst, mem));
 8723 
 8724   ins_pipe(pipe_serial);
 8725 %}
 8726 
 8727 // Load Byte (8 bit signed) into long
 8728 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 8729 %{
 8730   match(Set dst (ConvI2L (LoadB mem)));
 8731 
 8732   ins_cost(VOLATILE_REF_COST);
 8733   format %{ "ldarsb  $dst, $mem\t# byte" %}
 8734 
 8735   ins_encode(aarch64_enc_ldarsb(dst, mem));
 8736 
 8737   ins_pipe(pipe_serial);
 8738 %}
 8739 
 8740 // Load Byte (8 bit unsigned)
 8741 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 8742 %{
 8743   match(Set dst (LoadUB mem));
 8744 
 8745   ins_cost(VOLATILE_REF_COST);
 8746   format %{ "ldarb  $dst, $mem\t# byte" %}
 8747 
 8748   ins_encode(aarch64_enc_ldarb(dst, mem));
 8749 
 8750   ins_pipe(pipe_serial);
 8751 %}
 8752 
 8753 // Load Byte (8 bit unsigned) into long
 8754 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 8755 %{
 8756   match(Set dst (ConvI2L (LoadUB mem)));
 8757 
 8758   ins_cost(VOLATILE_REF_COST);
 8759   format %{ "ldarb  $dst, $mem\t# byte" %}
 8760 
 8761   ins_encode(aarch64_enc_ldarb(dst, mem));
 8762 
 8763   ins_pipe(pipe_serial);
 8764 %}
 8765 
 8766 // Load Short (16 bit signed)
 8767 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 8768 %{
 8769   match(Set dst (LoadS mem));
 8770 
 8771   ins_cost(VOLATILE_REF_COST);
 8772   format %{ "ldarshw  $dst, $mem\t# short" %}
 8773 
 8774   ins_encode(aarch64_enc_ldarshw(dst, mem));
 8775 
 8776   ins_pipe(pipe_serial);
 8777 %}
 8778 
 8779 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 8780 %{
 8781   match(Set dst (LoadUS mem));
 8782 
 8783   ins_cost(VOLATILE_REF_COST);
 8784   format %{ "ldarhw  $dst, $mem\t# short" %}
 8785 
 8786   ins_encode(aarch64_enc_ldarhw(dst, mem));
 8787 
 8788   ins_pipe(pipe_serial);
 8789 %}
 8790 
 8791 // Load Short/Char (16 bit unsigned) into long
 8792 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 8793 %{
 8794   match(Set dst (ConvI2L (LoadUS mem)));
 8795 
 8796   ins_cost(VOLATILE_REF_COST);
 8797   format %{ "ldarh  $dst, $mem\t# short" %}
 8798 
 8799   ins_encode(aarch64_enc_ldarh(dst, mem));
 8800 
 8801   ins_pipe(pipe_serial);
 8802 %}
 8803 
 8804 // Load Short/Char (16 bit signed) into long
 8805 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 8806 %{
 8807   match(Set dst (ConvI2L (LoadS mem)));
 8808 
 8809   ins_cost(VOLATILE_REF_COST);
 8810   format %{ "ldarh  $dst, $mem\t# short" %}
 8811 
 8812   ins_encode(aarch64_enc_ldarsh(dst, mem));
 8813 
 8814   ins_pipe(pipe_serial);
 8815 %}
 8816 
 8817 // Load Integer (32 bit signed)
 8818 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 8819 %{
 8820   match(Set dst (LoadI mem));
 8821 
 8822   ins_cost(VOLATILE_REF_COST);
 8823   format %{ "ldarw  $dst, $mem\t# int" %}
 8824 
 8825   ins_encode(aarch64_enc_ldarw(dst, mem));
 8826 
 8827   ins_pipe(pipe_serial);
 8828 %}
 8829 
 8830 // Load Integer (32 bit unsigned) into long
 8831 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
 8832 %{
 8833   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 8834 
 8835   ins_cost(VOLATILE_REF_COST);
 8836   format %{ "ldarw  $dst, $mem\t# int" %}
 8837 
 8838   ins_encode(aarch64_enc_ldarw(dst, mem));
 8839 
 8840   ins_pipe(pipe_serial);
 8841 %}
 8842 
 8843 // Load Long (64 bit signed)
 8844 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 8845 %{
 8846   match(Set dst (LoadL mem));
 8847 
 8848   ins_cost(VOLATILE_REF_COST);
 8849   format %{ "ldar  $dst, $mem\t# int" %}
 8850 
 8851   ins_encode(aarch64_enc_ldar(dst, mem));
 8852 
 8853   ins_pipe(pipe_serial);
 8854 %}
 8855 
 8856 // Load Pointer
 8857 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
 8858 %{
 8859   match(Set dst (LoadP mem));
 8860 
 8861   ins_cost(VOLATILE_REF_COST);
 8862   format %{ "ldar  $dst, $mem\t# ptr" %}
 8863 
 8864   ins_encode(aarch64_enc_ldar(dst, mem));
 8865 
 8866   ins_pipe(pipe_serial);
 8867 %}
 8868 
 8869 // Load Compressed Pointer
 8870 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
 8871 %{
 8872   match(Set dst (LoadN mem));
 8873 
 8874   ins_cost(VOLATILE_REF_COST);
 8875   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
 8876 
 8877   ins_encode(aarch64_enc_ldarw(dst, mem));
 8878 
 8879   ins_pipe(pipe_serial);
 8880 %}
 8881 
 8882 // Load Float
 8883 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
 8884 %{
 8885   match(Set dst (LoadF mem));
 8886 
 8887   ins_cost(VOLATILE_REF_COST);
 8888   format %{ "ldars  $dst, $mem\t# float" %}
 8889 
 8890   ins_encode( aarch64_enc_fldars(dst, mem) );
 8891 
 8892   ins_pipe(pipe_serial);
 8893 %}
 8894 
 8895 // Load Double
 8896 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
 8897 %{
 8898   match(Set dst (LoadD mem));
 8899 
 8900   ins_cost(VOLATILE_REF_COST);
 8901   format %{ "ldard  $dst, $mem\t# double" %}
 8902 
 8903   ins_encode( aarch64_enc_fldard(dst, mem) );
 8904 
 8905   ins_pipe(pipe_serial);
 8906 %}
 8907 
 8908 // Store Byte
 8909 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 8910 %{
 8911   match(Set mem (StoreB mem src));
 8912 
 8913   ins_cost(VOLATILE_REF_COST);
 8914   format %{ "stlrb  $src, $mem\t# byte" %}
 8915 
 8916   ins_encode(aarch64_enc_stlrb(src, mem));
 8917 
 8918   ins_pipe(pipe_class_memory);
 8919 %}
 8920 
 8921 // Store Char/Short
 8922 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 8923 %{
 8924   match(Set mem (StoreC mem src));
 8925 
 8926   ins_cost(VOLATILE_REF_COST);
 8927   format %{ "stlrh  $src, $mem\t# short" %}
 8928 
 8929   ins_encode(aarch64_enc_stlrh(src, mem));
 8930 
 8931   ins_pipe(pipe_class_memory);
 8932 %}
 8933 
 8934 // Store Integer
 8935 
 8936 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 8937 %{
 8938   match(Set mem(StoreI mem src));
 8939 
 8940   ins_cost(VOLATILE_REF_COST);
 8941   format %{ "stlrw  $src, $mem\t# int" %}
 8942 
 8943   ins_encode(aarch64_enc_stlrw(src, mem));
 8944 
 8945   ins_pipe(pipe_class_memory);
 8946 %}
 8947 
 8948 // Store Long (64 bit signed)
 8949 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
 8950 %{
 8951   match(Set mem (StoreL mem src));
 8952 
 8953   ins_cost(VOLATILE_REF_COST);
 8954   format %{ "stlr  $src, $mem\t# int" %}
 8955 
 8956   ins_encode(aarch64_enc_stlr(src, mem));
 8957 
 8958   ins_pipe(pipe_class_memory);
 8959 %}
 8960 
 8961 // Store Pointer
 8962 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
 8963 %{
 8964   match(Set mem (StoreP mem src));
 8965 
 8966   ins_cost(VOLATILE_REF_COST);
 8967   format %{ "stlr  $src, $mem\t# ptr" %}
 8968 
 8969   ins_encode(aarch64_enc_stlr(src, mem));
 8970 
 8971   ins_pipe(pipe_class_memory);
 8972 %}
 8973 
 8974 // Store Compressed Pointer
 8975 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
 8976 %{
 8977   match(Set mem (StoreN mem src));
 8978 
 8979   ins_cost(VOLATILE_REF_COST);
 8980   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
 8981 
 8982   ins_encode(aarch64_enc_stlrw(src, mem));
 8983 
 8984   ins_pipe(pipe_class_memory);
 8985 %}
 8986 
 8987 // Store Float
 8988 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
 8989 %{
 8990   match(Set mem (StoreF mem src));
 8991 
 8992   ins_cost(VOLATILE_REF_COST);
 8993   format %{ "stlrs  $src, $mem\t# float" %}
 8994 
 8995   ins_encode( aarch64_enc_fstlrs(src, mem) );
 8996 
 8997   ins_pipe(pipe_class_memory);
 8998 %}
 8999 
 9000 // TODO
 9001 // implement storeImmF0 and storeFImmPacked
 9002 
 9003 // Store Double
 9004 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
 9005 %{
 9006   match(Set mem (StoreD mem src));
 9007 
 9008   ins_cost(VOLATILE_REF_COST);
 9009   format %{ "stlrd  $src, $mem\t# double" %}
 9010 
 9011   ins_encode( aarch64_enc_fstlrd(src, mem) );
 9012 
 9013   ins_pipe(pipe_class_memory);
 9014 %}
 9015 
 9016 //  ---------------- end of volatile loads and stores ----------------
 9017 
 9018 // ============================================================================
 9019 // BSWAP Instructions
 9020 
 9021 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
 9022   match(Set dst (ReverseBytesI src));
 9023 
 9024   ins_cost(INSN_COST);
 9025   format %{ "revw  $dst, $src" %}
 9026 
 9027   ins_encode %{
 9028     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
 9029   %}
 9030 
 9031   ins_pipe(ialu_reg);
 9032 %}
 9033 
 9034 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
 9035   match(Set dst (ReverseBytesL src));
 9036 
 9037   ins_cost(INSN_COST);
 9038   format %{ "rev  $dst, $src" %}
 9039 
 9040   ins_encode %{
 9041     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
 9042   %}
 9043 
 9044   ins_pipe(ialu_reg);
 9045 %}
 9046 
 9047 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
 9048   match(Set dst (ReverseBytesUS src));
 9049 
 9050   ins_cost(INSN_COST);
 9051   format %{ "rev16w  $dst, $src" %}
 9052 
 9053   ins_encode %{
 9054     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 9055   %}
 9056 
 9057   ins_pipe(ialu_reg);
 9058 %}
 9059 
 9060 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
 9061   match(Set dst (ReverseBytesS src));
 9062 
 9063   ins_cost(INSN_COST);
 9064   format %{ "rev16w  $dst, $src\n\t"
 9065             "sbfmw $dst, $dst, #0, #15" %}
 9066 
 9067   ins_encode %{
 9068     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 9069     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
 9070   %}
 9071 
 9072   ins_pipe(ialu_reg);
 9073 %}
 9074 
 9075 // ============================================================================
 9076 // Zero Count Instructions
 9077 
 9078 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 9079   match(Set dst (CountLeadingZerosI src));
 9080 
 9081   ins_cost(INSN_COST);
 9082   format %{ "clzw  $dst, $src" %}
 9083   ins_encode %{
 9084     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
 9085   %}
 9086 
 9087   ins_pipe(ialu_reg);
 9088 %}
 9089 
 9090 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
 9091   match(Set dst (CountLeadingZerosL src));
 9092 
 9093   ins_cost(INSN_COST);
 9094   format %{ "clz   $dst, $src" %}
 9095   ins_encode %{
 9096     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
 9097   %}
 9098 
 9099   ins_pipe(ialu_reg);
 9100 %}
 9101 
 9102 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 9103   match(Set dst (CountTrailingZerosI src));
 9104 
 9105   ins_cost(INSN_COST * 2);
 9106   format %{ "rbitw  $dst, $src\n\t"
 9107             "clzw   $dst, $dst" %}
 9108   ins_encode %{
 9109     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
 9110     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
 9111   %}
 9112 
 9113   ins_pipe(ialu_reg);
 9114 %}
 9115 
 9116 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
 9117   match(Set dst (CountTrailingZerosL src));
 9118 
 9119   ins_cost(INSN_COST * 2);
 9120   format %{ "rbit   $dst, $src\n\t"
 9121             "clz    $dst, $dst" %}
 9122   ins_encode %{
 9123     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
 9124     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
 9125   %}
 9126 
 9127   ins_pipe(ialu_reg);
 9128 %}
 9129 
 9130 //---------- Population Count Instructions -------------------------------------
 9131 //
 9132 
 9133 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
 9134   predicate(UsePopCountInstruction);
 9135   match(Set dst (PopCountI src));
 9136   effect(TEMP tmp);
 9137   ins_cost(INSN_COST * 13);
 9138 
 9139   format %{ "movw   $src, $src\n\t"
 9140             "mov    $tmp, $src\t# vector (1D)\n\t"
 9141             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 9142             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 9143             "mov    $dst, $tmp\t# vector (1D)" %}
 9144   ins_encode %{
 9145     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
 9146     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
 9147     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 9148     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 9149     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
 9150   %}
 9151 
 9152   ins_pipe(pipe_class_default);
 9153 %}
 9154 
 9155 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
 9156   predicate(UsePopCountInstruction);
 9157   match(Set dst (PopCountI (LoadI mem)));
 9158   effect(TEMP tmp);
 9159   ins_cost(INSN_COST * 13);
 9160 
 9161   format %{ "ldrs   $tmp, $mem\n\t"
 9162             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 9163             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 9164             "mov    $dst, $tmp\t# vector (1D)" %}
 9165   ins_encode %{
 9166     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 9167     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
 9168                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 9169     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 9170     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 9171     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
 9172   %}
 9173 
 9174   ins_pipe(pipe_class_default);
 9175 %}
 9176 
 9177 // Note: Long.bitCount(long) returns an int.
 9178 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
 9179   predicate(UsePopCountInstruction);
 9180   match(Set dst (PopCountL src));
 9181   effect(TEMP tmp);
 9182   ins_cost(INSN_COST * 13);
 9183 
 9184   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
 9185             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 9186             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 9187             "mov    $dst, $tmp\t# vector (1D)" %}
 9188   ins_encode %{
 9189     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
 9190     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 9191     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 9192     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
 9193   %}
 9194 
 9195   ins_pipe(pipe_class_default);
 9196 %}
 9197 
 9198 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
 9199   predicate(UsePopCountInstruction);
 9200   match(Set dst (PopCountL (LoadL mem)));
 9201   effect(TEMP tmp);
 9202   ins_cost(INSN_COST * 13);
 9203 
 9204   format %{ "ldrd   $tmp, $mem\n\t"
 9205             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 9206             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 9207             "mov    $dst, $tmp\t# vector (1D)" %}
 9208   ins_encode %{
 9209     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 9210     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
 9211                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 9212     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 9213     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 9214     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
 9215   %}
 9216 
 9217   ins_pipe(pipe_class_default);
 9218 %}
 9219 
 9220 // ============================================================================
 9221 // MemBar Instruction
 9222 
 9223 instruct load_fence() %{
 9224   match(LoadFence);
 9225   ins_cost(VOLATILE_REF_COST);
 9226 
 9227   format %{ "load_fence" %}
 9228 
 9229   ins_encode %{
 9230     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 9231   %}
 9232   ins_pipe(pipe_serial);
 9233 %}
 9234 
 9235 instruct unnecessary_membar_acquire() %{
 9236   predicate(unnecessary_acquire(n));
 9237   match(MemBarAcquire);
 9238   ins_cost(0);
 9239 
 9240   format %{ "membar_acquire (elided)" %}
 9241 
 9242   ins_encode %{
 9243     __ block_comment("membar_acquire (elided)");
 9244   %}
 9245 
 9246   ins_pipe(pipe_class_empty);
 9247 %}
 9248 
 9249 instruct membar_acquire() %{
 9250   match(MemBarAcquire);
 9251   ins_cost(VOLATILE_REF_COST);
 9252 
 9253   format %{ "membar_acquire" %}
 9254 
 9255   ins_encode %{
 9256     __ block_comment("membar_acquire");
 9257     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 9258   %}
 9259 
 9260   ins_pipe(pipe_serial);
 9261 %}
 9262 
 9263 
 9264 instruct membar_acquire_lock() %{
 9265   match(MemBarAcquireLock);
 9266   ins_cost(VOLATILE_REF_COST);
 9267 
 9268   format %{ "membar_acquire_lock (elided)" %}
 9269 
 9270   ins_encode %{
 9271     __ block_comment("membar_acquire_lock (elided)");
 9272   %}
 9273 
 9274   ins_pipe(pipe_serial);
 9275 %}
 9276 
 9277 instruct store_fence() %{
 9278   match(StoreFence);
 9279   ins_cost(VOLATILE_REF_COST);
 9280 
 9281   format %{ "store_fence" %}
 9282 
 9283   ins_encode %{
 9284     __ membar(Assembler::LoadStore|Assembler::StoreStore);
 9285   %}
 9286   ins_pipe(pipe_serial);
 9287 %}
 9288 
 9289 instruct unnecessary_membar_release() %{
 9290   predicate(unnecessary_release(n));
 9291   match(MemBarRelease);
 9292   ins_cost(0);
 9293 
 9294   format %{ "membar_release (elided)" %}
 9295 
 9296   ins_encode %{
 9297     __ block_comment("membar_release (elided)");
 9298   %}
 9299   ins_pipe(pipe_serial);
 9300 %}
 9301 
 9302 instruct membar_release() %{
 9303   match(MemBarRelease);
 9304   ins_cost(VOLATILE_REF_COST);
 9305 
 9306   format %{ "membar_release" %}
 9307 
 9308   ins_encode %{
 9309     __ block_comment("membar_release");
 9310     __ membar(Assembler::LoadStore|Assembler::StoreStore);
 9311   %}
 9312   ins_pipe(pipe_serial);
 9313 %}
 9314 
 9315 instruct membar_storestore() %{
 9316   match(MemBarStoreStore);
 9317   ins_cost(VOLATILE_REF_COST);
 9318 
 9319   format %{ "MEMBAR-store-store" %}
 9320 
 9321   ins_encode %{
 9322     __ membar(Assembler::StoreStore);
 9323   %}
 9324   ins_pipe(pipe_serial);
 9325 %}
 9326 
 9327 instruct membar_release_lock() %{
 9328   match(MemBarReleaseLock);
 9329   ins_cost(VOLATILE_REF_COST);
 9330 
 9331   format %{ "membar_release_lock (elided)" %}
 9332 
 9333   ins_encode %{
 9334     __ block_comment("membar_release_lock (elided)");
 9335   %}
 9336 
 9337   ins_pipe(pipe_serial);
 9338 %}
 9339 
 9340 instruct unnecessary_membar_volatile() %{
 9341   predicate(unnecessary_volatile(n));
 9342   match(MemBarVolatile);
 9343   ins_cost(0);
 9344 
 9345   format %{ "membar_volatile (elided)" %}
 9346 
 9347   ins_encode %{
 9348     __ block_comment("membar_volatile (elided)");
 9349   %}
 9350 
 9351   ins_pipe(pipe_serial);
 9352 %}
 9353 
 9354 instruct membar_volatile() %{
 9355   match(MemBarVolatile);
 9356   ins_cost(VOLATILE_REF_COST*100);
 9357 
 9358   format %{ "membar_volatile" %}
 9359 
 9360   ins_encode %{
 9361     __ block_comment("membar_volatile");
 9362     __ membar(Assembler::StoreLoad);
 9363   %}
 9364 
 9365   ins_pipe(pipe_serial);
 9366 %}
 9367 
 9368 // ============================================================================
 9369 // Cast/Convert Instructions
 9370 
 9371 instruct castX2P(iRegPNoSp dst, iRegL src) %{
 9372   match(Set dst (CastX2P src));
 9373 
 9374   ins_cost(INSN_COST);
 9375   format %{ "mov $dst, $src\t# long -> ptr" %}
 9376 
 9377   ins_encode %{
 9378     if ($dst$$reg != $src$$reg) {
 9379       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 9380     }
 9381   %}
 9382 
 9383   ins_pipe(ialu_reg);
 9384 %}
 9385 
 9386 instruct castP2X(iRegLNoSp dst, iRegP src) %{
 9387   match(Set dst (CastP2X src));
 9388 
 9389   ins_cost(INSN_COST);
 9390   format %{ "mov $dst, $src\t# ptr -> long" %}
 9391 
 9392   ins_encode %{
 9393     if ($dst$$reg != $src$$reg) {
 9394       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 9395     }
 9396   %}
 9397 
 9398   ins_pipe(ialu_reg);
 9399 %}
 9400 
 9401 // Convert oop into int for vectors alignment masking
 9402 instruct convP2I(iRegINoSp dst, iRegP src) %{
 9403   match(Set dst (ConvL2I (CastP2X src)));
 9404 
 9405   ins_cost(INSN_COST);
 9406   format %{ "movw $dst, $src\t# ptr -> int" %}
 9407   ins_encode %{
 9408     __ movw($dst$$Register, $src$$Register);
 9409   %}
 9410 
 9411   ins_pipe(ialu_reg);
 9412 %}
 9413 
 9414 // Convert compressed oop into int for vectors alignment masking
 9415 // in case of 32bit oops (heap < 4Gb).
 9416 instruct convN2I(iRegINoSp dst, iRegN src)
 9417 %{
 9418   predicate(Universe::narrow_oop_shift() == 0);
 9419   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
 9420 
 9421   ins_cost(INSN_COST);
 9422   format %{ "mov dst, $src\t# compressed ptr -> int" %}
 9423   ins_encode %{
 9424     __ movw($dst$$Register, $src$$Register);
 9425   %}
 9426 
 9427   ins_pipe(ialu_reg);
 9428 %}
 9429 
 9430 
 9431 // Convert oop pointer into compressed form
 9432 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 9433   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
 9434   match(Set dst (EncodeP src));
 9435   effect(KILL cr);
 9436   ins_cost(INSN_COST * 3);
 9437   format %{ "encode_heap_oop $dst, $src" %}
 9438   ins_encode %{
 9439     Register s = $src$$Register;
 9440     Register d = $dst$$Register;
 9441     __ encode_heap_oop(d, s);
 9442   %}
 9443   ins_pipe(ialu_reg);
 9444 %}
 9445 
 9446 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 9447   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
 9448   match(Set dst (EncodeP src));
 9449   ins_cost(INSN_COST * 3);
 9450   format %{ "encode_heap_oop_not_null $dst, $src" %}
 9451   ins_encode %{
 9452     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
 9453   %}
 9454   ins_pipe(ialu_reg);
 9455 %}
 9456 
 9457 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 9458   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
 9459             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
 9460   match(Set dst (DecodeN src));
 9461   ins_cost(INSN_COST * 3);
 9462   format %{ "decode_heap_oop $dst, $src" %}
 9463   ins_encode %{
 9464     Register s = $src$$Register;
 9465     Register d = $dst$$Register;
 9466     __ decode_heap_oop(d, s);
 9467   %}
 9468   ins_pipe(ialu_reg);
 9469 %}
 9470 
 9471 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 9472   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
 9473             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
 9474   match(Set dst (DecodeN src));
 9475   ins_cost(INSN_COST * 3);
 9476   format %{ "decode_heap_oop_not_null $dst, $src" %}
 9477   ins_encode %{
 9478     Register s = $src$$Register;
 9479     Register d = $dst$$Register;
 9480     __ decode_heap_oop_not_null(d, s);
 9481   %}
 9482   ins_pipe(ialu_reg);
 9483 %}
 9484 
 9485 // n.b. AArch64 implementations of encode_klass_not_null and
 9486 // decode_klass_not_null do not modify the flags register so, unlike
 9487 // Intel, we don't kill CR as a side effect here
 9488 
 9489 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
 9490   match(Set dst (EncodePKlass src));
 9491 
 9492   ins_cost(INSN_COST * 3);
 9493   format %{ "encode_klass_not_null $dst,$src" %}
 9494 
 9495   ins_encode %{
 9496     Register src_reg = as_Register($src$$reg);
 9497     Register dst_reg = as_Register($dst$$reg);
 9498     __ encode_klass_not_null(dst_reg, src_reg);
 9499   %}
 9500 
 9501    ins_pipe(ialu_reg);
 9502 %}
 9503 
 9504 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
 9505   match(Set dst (DecodeNKlass src));
 9506 
 9507   ins_cost(INSN_COST * 3);
 9508   format %{ "decode_klass_not_null $dst,$src" %}
 9509 
 9510   ins_encode %{
 9511     Register src_reg = as_Register($src$$reg);
 9512     Register dst_reg = as_Register($dst$$reg);
 9513     if (dst_reg != src_reg) {
 9514       __ decode_klass_not_null(dst_reg, src_reg);
 9515     } else {
 9516       __ decode_klass_not_null(dst_reg);
 9517     }
 9518   %}
 9519 
 9520    ins_pipe(ialu_reg);
 9521 %}
 9522 
 9523 instruct checkCastPP(iRegPNoSp dst)
 9524 %{
 9525   match(Set dst (CheckCastPP dst));
 9526 
 9527   size(0);
 9528   format %{ "# checkcastPP of $dst" %}
 9529   ins_encode(/* empty encoding */);
 9530   ins_pipe(pipe_class_empty);
 9531 %}
 9532 
 9533 instruct castPP(iRegPNoSp dst)
 9534 %{
 9535   match(Set dst (CastPP dst));
 9536 
 9537   size(0);
 9538   format %{ "# castPP of $dst" %}
 9539   ins_encode(/* empty encoding */);
 9540   ins_pipe(pipe_class_empty);
 9541 %}
 9542 
 9543 instruct castII(iRegI dst)
 9544 %{
 9545   match(Set dst (CastII dst));
 9546 
 9547   size(0);
 9548   format %{ "# castII of $dst" %}
 9549   ins_encode(/* empty encoding */);
 9550   ins_cost(0);
 9551   ins_pipe(pipe_class_empty);
 9552 %}
 9553 
 9554 // ============================================================================
 9555 // Atomic operation instructions
 9556 //
 9557 // Intel and SPARC both implement Ideal Node LoadPLocked and
 9558 // Store{PIL}Conditional instructions using a normal load for the
 9559 // LoadPLocked and a CAS for the Store{PIL}Conditional.
 9560 //
 9561 // The ideal code appears only to use LoadPLocked/StorePLocked as a
 9562 // pair to lock object allocations from Eden space when not using
 9563 // TLABs.
 9564 //
 9565 // There does not appear to be a Load{IL}Locked Ideal Node and the
 9566 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
 9567 // and to use StoreIConditional only for 32-bit and StoreLConditional
 9568 // only for 64-bit.
 9569 //
 9570 // We implement LoadPLocked and StorePLocked instructions using,
 9571 // respectively the AArch64 hw load-exclusive and store-conditional
 9572 // instructions. Whereas we must implement each of
 9573 // Store{IL}Conditional using a CAS which employs a pair of
 9574 // instructions comprising a load-exclusive followed by a
 9575 // store-conditional.
 9576 
 9577 
 9578 // Locked-load (linked load) of the current heap-top
 9579 // used when updating the eden heap top
 9580 // implemented using ldaxr on AArch64
 9581 
 9582 instruct loadPLocked(iRegPNoSp dst, indirect mem)
 9583 %{
 9584   match(Set dst (LoadPLocked mem));
 9585 
 9586   ins_cost(VOLATILE_REF_COST);
 9587 
 9588   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
 9589 
 9590   ins_encode(aarch64_enc_ldaxr(dst, mem));
 9591 
 9592   ins_pipe(pipe_serial);
 9593 %}
 9594 
 9595 // Conditional-store of the updated heap-top.
 9596 // Used during allocation of the shared heap.
 9597 // Sets flag (EQ) on success.
 9598 // implemented using stlxr on AArch64.
 9599 
 9600 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
 9601 %{
 9602   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
 9603 
 9604   ins_cost(VOLATILE_REF_COST);
 9605 
 9606  // TODO
 9607  // do we need to do a store-conditional release or can we just use a
 9608  // plain store-conditional?
 9609 
 9610   format %{
 9611     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
 9612     "cmpw rscratch1, zr\t# EQ on successful write"
 9613   %}
 9614 
 9615   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
 9616 
 9617   ins_pipe(pipe_serial);
 9618 %}
 9619 
 9620 
 9621 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
 9622 // when attempting to rebias a lock towards the current thread.  We
 9623 // must use the acquire form of cmpxchg in order to guarantee acquire
 9624 // semantics in this case.
 9625 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
 9626 %{
 9627   match(Set cr (StoreLConditional mem (Binary oldval newval)));
 9628 
 9629   ins_cost(VOLATILE_REF_COST);
 9630 
 9631   format %{
 9632     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
 9633     "cmpw rscratch1, zr\t# EQ on successful write"
 9634   %}
 9635 
 9636   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
 9637 
 9638   ins_pipe(pipe_slow);
 9639 %}
 9640 
 9641 // storeIConditional also has acquire semantics, for no better reason
 9642 // than matching storeLConditional.  At the time of writing this
 9643 // comment storeIConditional was not used anywhere by AArch64.
 9644 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
 9645 %{
 9646   match(Set cr (StoreIConditional mem (Binary oldval newval)));
 9647 
 9648   ins_cost(VOLATILE_REF_COST);
 9649 
 9650   format %{
 9651     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
 9652     "cmpw rscratch1, zr\t# EQ on successful write"
 9653   %}
 9654 
 9655   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
 9656 
 9657   ins_pipe(pipe_slow);
 9658 %}
 9659 
 9660 // standard CompareAndSwapX when we are using barriers
 9661 // these have higher priority than the rules selected by a predicate
 9662 
 9663 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
 9664 // can't match them
 9665 
 9666 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 9667 
 9668   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 9669   ins_cost(2 * VOLATILE_REF_COST);
 9670 
 9671   effect(KILL cr);
 9672 
 9673   format %{
 9674     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 9675     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9676   %}
 9677 
 9678   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
 9679             aarch64_enc_cset_eq(res));
 9680 
 9681   ins_pipe(pipe_slow);
 9682 %}
 9683 
 9684 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 9685 
 9686   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 9687   ins_cost(2 * VOLATILE_REF_COST);
 9688 
 9689   effect(KILL cr);
 9690 
 9691   format %{
 9692     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 9693     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9694   %}
 9695 
 9696   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
 9697             aarch64_enc_cset_eq(res));
 9698 
 9699   ins_pipe(pipe_slow);
 9700 %}
 9701 
 9702 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 9703 
 9704   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 9705   ins_cost(2 * VOLATILE_REF_COST);
 9706 
 9707   effect(KILL cr);
 9708 
 9709  format %{
 9710     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 9711     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9712  %}
 9713 
 9714  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 9715             aarch64_enc_cset_eq(res));
 9716 
 9717   ins_pipe(pipe_slow);
 9718 %}
 9719 
 9720 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 9721 
 9722   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 9723   ins_cost(2 * VOLATILE_REF_COST);
 9724 
 9725   effect(KILL cr);
 9726 
 9727  format %{
 9728     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 9729     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9730  %}
 9731 
 9732  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 9733             aarch64_enc_cset_eq(res));
 9734 
 9735   ins_pipe(pipe_slow);
 9736 %}
 9737 
 9738 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9739 
 9740   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 9741   ins_cost(2 * VOLATILE_REF_COST);
 9742 
 9743   effect(KILL cr);
 9744 
 9745  format %{
 9746     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 9747     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9748  %}
 9749 
 9750  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 9751             aarch64_enc_cset_eq(res));
 9752 
 9753   ins_pipe(pipe_slow);
 9754 %}
 9755 
 9756 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 9757 
 9758   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 9759   ins_cost(2 * VOLATILE_REF_COST);
 9760 
 9761   effect(KILL cr);
 9762 
 9763  format %{
 9764     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 9765     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9766  %}
 9767 
 9768  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 9769             aarch64_enc_cset_eq(res));
 9770 
 9771   ins_pipe(pipe_slow);
 9772 %}
 9773 
 9774 // alternative CompareAndSwapX when we are eliding barriers
 9775 
 9776 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 9777 
 9778   predicate(needs_acquiring_load_exclusive(n));
 9779   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 9780   ins_cost(VOLATILE_REF_COST);
 9781 
 9782   effect(KILL cr);
 9783 
 9784  format %{
 9785     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 9786     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9787  %}
 9788 
 9789  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 9790             aarch64_enc_cset_eq(res));
 9791 
 9792   ins_pipe(pipe_slow);
 9793 %}
 9794 
 9795 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 9796 
 9797   predicate(needs_acquiring_load_exclusive(n));
 9798   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 9799   ins_cost(VOLATILE_REF_COST);
 9800 
 9801   effect(KILL cr);
 9802 
 9803  format %{
 9804     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 9805     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9806  %}
 9807 
 9808  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 9809             aarch64_enc_cset_eq(res));
 9810 
 9811   ins_pipe(pipe_slow);
 9812 %}
 9813 
 9814 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9815 
 9816   predicate(needs_acquiring_load_exclusive(n));
 9817   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 9818   ins_cost(VOLATILE_REF_COST);
 9819 
 9820   effect(KILL cr);
 9821 
 9822  format %{
 9823     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 9824     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9825  %}
 9826 
 9827  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 9828             aarch64_enc_cset_eq(res));
 9829 
 9830   ins_pipe(pipe_slow);
 9831 %}
 9832 
 9833 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 9834 
 9835   predicate(needs_acquiring_load_exclusive(n));
 9836   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 9837   ins_cost(VOLATILE_REF_COST);
 9838 
 9839   effect(KILL cr);
 9840 
 9841  format %{
 9842     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 9843     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9844  %}
 9845 
 9846  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 9847             aarch64_enc_cset_eq(res));
 9848 
 9849   ins_pipe(pipe_slow);
 9850 %}
 9851 
 9852 
 9853 // ---------------------------------------------------------------------
 9854 
 9855 
 9856 // BEGIN This section of the file is automatically generated. Do not edit --------------
 9857 
 9858 // Sundry CAS operations.  Note that release is always true,
 9859 // regardless of the memory ordering of the CAS.  This is because we
 9860 // need the volatile case to be sequentially consistent but there is
 9861 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
 9862 // can't check the type of memory ordering here, so we always emit a
 9863 // STLXR.
 9864 
 9865 // This section is generated from aarch64_ad_cas.m4
 9866 
 9867 
 9868 
 9869 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9870   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 9871   ins_cost(2 * VOLATILE_REF_COST);
 9872   effect(TEMP_DEF res, KILL cr);
 9873   format %{
 9874     "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9875   %}
 9876   ins_encode %{
 9877     __ uxtbw(rscratch2, $oldval$$Register);
 9878     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
 9879                Assembler::byte, /*acquire*/ false, /*release*/ true,
 9880                /*weak*/ false, $res$$Register);
 9881     __ sxtbw($res$$Register, $res$$Register);
 9882   %}
 9883   ins_pipe(pipe_slow);
 9884 %}
 9885 
 9886 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9887   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 9888   ins_cost(2 * VOLATILE_REF_COST);
 9889   effect(TEMP_DEF res, KILL cr);
 9890   format %{
 9891     "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9892   %}
 9893   ins_encode %{
 9894     __ uxthw(rscratch2, $oldval$$Register);
 9895     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
 9896                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 9897                /*weak*/ false, $res$$Register);
 9898     __ sxthw($res$$Register, $res$$Register);
 9899   %}
 9900   ins_pipe(pipe_slow);
 9901 %}
 9902 
 9903 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9904   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 9905   ins_cost(2 * VOLATILE_REF_COST);
 9906   effect(TEMP_DEF res, KILL cr);
 9907   format %{
 9908     "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9909   %}
 9910   ins_encode %{
 9911     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9912                Assembler::word, /*acquire*/ false, /*release*/ true,
 9913                /*weak*/ false, $res$$Register);
 9914   %}
 9915   ins_pipe(pipe_slow);
 9916 %}
 9917 
 9918 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9919   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 9920   ins_cost(2 * VOLATILE_REF_COST);
 9921   effect(TEMP_DEF res, KILL cr);
 9922   format %{
 9923     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9924   %}
 9925   ins_encode %{
 9926     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9927                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9928                /*weak*/ false, $res$$Register);
 9929   %}
 9930   ins_pipe(pipe_slow);
 9931 %}
 9932 
 9933 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9934   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 9935   ins_cost(2 * VOLATILE_REF_COST);
 9936   effect(TEMP_DEF res, KILL cr);
 9937   format %{
 9938     "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9939   %}
 9940   ins_encode %{
 9941     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9942                Assembler::word, /*acquire*/ false, /*release*/ true,
 9943                /*weak*/ false, $res$$Register);
 9944   %}
 9945   ins_pipe(pipe_slow);
 9946 %}
 9947 
 9948 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9949   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 9950   ins_cost(2 * VOLATILE_REF_COST);
 9951   effect(TEMP_DEF res, KILL cr);
 9952   format %{
 9953     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9954   %}
 9955   ins_encode %{
 9956     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9957                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9958                /*weak*/ false, $res$$Register);
 9959   %}
 9960   ins_pipe(pipe_slow);
 9961 %}
 9962 
 9963 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9964   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 9965   ins_cost(2 * VOLATILE_REF_COST);
 9966   effect(KILL cr);
 9967   format %{
 9968     "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9969     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9970   %}
 9971   ins_encode %{
 9972     __ uxtbw(rscratch2, $oldval$$Register);
 9973     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
 9974                Assembler::byte, /*acquire*/ false, /*release*/ true,
 9975                /*weak*/ true, noreg);
 9976     __ csetw($res$$Register, Assembler::EQ);
 9977   %}
 9978   ins_pipe(pipe_slow);
 9979 %}
 9980 
 9981 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9982   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 9983   ins_cost(2 * VOLATILE_REF_COST);
 9984   effect(KILL cr);
 9985   format %{
 9986     "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9987     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9988   %}
 9989   ins_encode %{
 9990     __ uxthw(rscratch2, $oldval$$Register);
 9991     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
 9992                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 9993                /*weak*/ true, noreg);
 9994     __ csetw($res$$Register, Assembler::EQ);
 9995   %}
 9996   ins_pipe(pipe_slow);
 9997 %}
 9998 
 9999 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
10000   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
10001   ins_cost(2 * VOLATILE_REF_COST);
10002   effect(KILL cr);
10003   format %{
10004     "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
10005     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
10006   %}
10007   ins_encode %{
10008     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
10009                Assembler::word, /*acquire*/ false, /*release*/ true,
10010                /*weak*/ true, noreg);
10011     __ csetw($res$$Register, Assembler::EQ);
10012   %}
10013   ins_pipe(pipe_slow);
10014 %}
10015 
10016 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
10017   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
10018   ins_cost(2 * VOLATILE_REF_COST);
10019   effect(KILL cr);
10020   format %{
10021     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
10022     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
10023   %}
10024   ins_encode %{
10025     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
10026                Assembler::xword, /*acquire*/ false, /*release*/ true,
10027                /*weak*/ true, noreg);
10028     __ csetw($res$$Register, Assembler::EQ);
10029   %}
10030   ins_pipe(pipe_slow);
10031 %}
10032 
10033 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
10034   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
10035   ins_cost(2 * VOLATILE_REF_COST);
10036   effect(KILL cr);
10037   format %{
10038     "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
10039     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
10040   %}
10041   ins_encode %{
10042     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
10043                Assembler::word, /*acquire*/ false, /*release*/ true,
10044                /*weak*/ true, noreg);
10045     __ csetw($res$$Register, Assembler::EQ);
10046   %}
10047   ins_pipe(pipe_slow);
10048 %}
10049 
10050 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
10051   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
10052   ins_cost(2 * VOLATILE_REF_COST);
10053   effect(KILL cr);
10054   format %{
10055     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
10056     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
10057   %}
10058   ins_encode %{
10059     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
10060                Assembler::xword, /*acquire*/ false, /*release*/ true,
10061                /*weak*/ true, noreg);
10062     __ csetw($res$$Register, Assembler::EQ);
10063   %}
10064   ins_pipe(pipe_slow);
10065 %}
10066 
10067 // END This section of the file is automatically generated. Do not edit --------------
10068 // ---------------------------------------------------------------------
10069 
10070 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
10071   match(Set prev (GetAndSetI mem newv));
10072   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
10073   ins_encode %{
10074     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
10075   %}
10076   ins_pipe(pipe_serial);
10077 %}
10078 
10079 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
10080   match(Set prev (GetAndSetL mem newv));
10081   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
10082   ins_encode %{
10083     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
10084   %}
10085   ins_pipe(pipe_serial);
10086 %}
10087 
10088 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
10089   match(Set prev (GetAndSetN mem newv));
10090   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
10091   ins_encode %{
10092     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
10093   %}
10094   ins_pipe(pipe_serial);
10095 %}
10096 
10097 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
10098   match(Set prev (GetAndSetP mem newv));
10099   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
10100   ins_encode %{
10101     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
10102   %}
10103   ins_pipe(pipe_serial);
10104 %}
10105 
10106 
10107 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
10108   match(Set newval (GetAndAddL mem incr));
10109   ins_cost(INSN_COST * 10);
10110   format %{ "get_and_addL $newval, [$mem], $incr" %}
10111   ins_encode %{
10112     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
10113   %}
10114   ins_pipe(pipe_serial);
10115 %}
10116 
10117 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
10118   predicate(n->as_LoadStore()->result_not_used());
10119   match(Set dummy (GetAndAddL mem incr));
10120   ins_cost(INSN_COST * 9);
10121   format %{ "get_and_addL [$mem], $incr" %}
10122   ins_encode %{
10123     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
10124   %}
10125   ins_pipe(pipe_serial);
10126 %}
10127 
10128 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
10129   match(Set newval (GetAndAddL mem incr));
10130   ins_cost(INSN_COST * 10);
10131   format %{ "get_and_addL $newval, [$mem], $incr" %}
10132   ins_encode %{
10133     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
10134   %}
10135   ins_pipe(pipe_serial);
10136 %}
10137 
10138 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
10139   predicate(n->as_LoadStore()->result_not_used());
10140   match(Set dummy (GetAndAddL mem incr));
10141   ins_cost(INSN_COST * 9);
10142   format %{ "get_and_addL [$mem], $incr" %}
10143   ins_encode %{
10144     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
10145   %}
10146   ins_pipe(pipe_serial);
10147 %}
10148 
10149 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
10150   match(Set newval (GetAndAddI mem incr));
10151   ins_cost(INSN_COST * 10);
10152   format %{ "get_and_addI $newval, [$mem], $incr" %}
10153   ins_encode %{
10154     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
10155   %}
10156   ins_pipe(pipe_serial);
10157 %}
10158 
10159 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
10160   predicate(n->as_LoadStore()->result_not_used());
10161   match(Set dummy (GetAndAddI mem incr));
10162   ins_cost(INSN_COST * 9);
10163   format %{ "get_and_addI [$mem], $incr" %}
10164   ins_encode %{
10165     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
10166   %}
10167   ins_pipe(pipe_serial);
10168 %}
10169 
10170 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
10171   match(Set newval (GetAndAddI mem incr));
10172   ins_cost(INSN_COST * 10);
10173   format %{ "get_and_addI $newval, [$mem], $incr" %}
10174   ins_encode %{
10175     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
10176   %}
10177   ins_pipe(pipe_serial);
10178 %}
10179 
10180 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
10181   predicate(n->as_LoadStore()->result_not_used());
10182   match(Set dummy (GetAndAddI mem incr));
10183   ins_cost(INSN_COST * 9);
10184   format %{ "get_and_addI [$mem], $incr" %}
10185   ins_encode %{
10186     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
10187   %}
10188   ins_pipe(pipe_serial);
10189 %}
10190 
10191 // Manifest a CmpL result in an integer register.
10192 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
10193 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
10194 %{
10195   match(Set dst (CmpL3 src1 src2));
10196   effect(KILL flags);
10197 
10198   ins_cost(INSN_COST * 6);
10199   format %{
10200       "cmp $src1, $src2"
10201       "csetw $dst, ne"
10202       "cnegw $dst, lt"
10203   %}
10204   // format %{ "CmpL3 $dst, $src1, $src2" %}
10205   ins_encode %{
10206     __ cmp($src1$$Register, $src2$$Register);
10207     __ csetw($dst$$Register, Assembler::NE);
10208     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
10209   %}
10210 
10211   ins_pipe(pipe_class_default);
10212 %}
10213 
10214 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
10215 %{
10216   match(Set dst (CmpL3 src1 src2));
10217   effect(KILL flags);
10218 
10219   ins_cost(INSN_COST * 6);
10220   format %{
10221       "cmp $src1, $src2"
10222       "csetw $dst, ne"
10223       "cnegw $dst, lt"
10224   %}
10225   ins_encode %{
10226     int32_t con = (int32_t)$src2$$constant;
10227      if (con < 0) {
10228       __ adds(zr, $src1$$Register, -con);
10229     } else {
10230       __ subs(zr, $src1$$Register, con);
10231     }
10232     __ csetw($dst$$Register, Assembler::NE);
10233     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
10234   %}
10235 
10236   ins_pipe(pipe_class_default);
10237 %}
10238 
10239 // ============================================================================
10240 // Conditional Move Instructions
10241 
10242 // n.b. we have identical rules for both a signed compare op (cmpOp)
10243 // and an unsigned compare op (cmpOpU). it would be nice if we could
10244 // define an op class which merged both inputs and use it to type the
10245 // argument to a single rule. unfortunatelyt his fails because the
10246 // opclass does not live up to the COND_INTER interface of its
10247 // component operands. When the generic code tries to negate the
10248 // operand it ends up running the generci Machoper::negate method
10249 // which throws a ShouldNotHappen. So, we have to provide two flavours
10250 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
10251 
10252 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10253   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
10254 
10255   ins_cost(INSN_COST * 2);
10256   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
10257 
10258   ins_encode %{
10259     __ cselw(as_Register($dst$$reg),
10260              as_Register($src2$$reg),
10261              as_Register($src1$$reg),
10262              (Assembler::Condition)$cmp$$cmpcode);
10263   %}
10264 
10265   ins_pipe(icond_reg_reg);
10266 %}
10267 
10268 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10269   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
10270 
10271   ins_cost(INSN_COST * 2);
10272   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
10273 
10274   ins_encode %{
10275     __ cselw(as_Register($dst$$reg),
10276              as_Register($src2$$reg),
10277              as_Register($src1$$reg),
10278              (Assembler::Condition)$cmp$$cmpcode);
10279   %}
10280 
10281   ins_pipe(icond_reg_reg);
10282 %}
10283 
10284 // special cases where one arg is zero
10285 
10286 // n.b. this is selected in preference to the rule above because it
10287 // avoids loading constant 0 into a source register
10288 
10289 // TODO
10290 // we ought only to be able to cull one of these variants as the ideal
10291 // transforms ought always to order the zero consistently (to left/right?)
10292 
10293 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
10294   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
10295 
10296   ins_cost(INSN_COST * 2);
10297   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
10298 
10299   ins_encode %{
10300     __ cselw(as_Register($dst$$reg),
10301              as_Register($src$$reg),
10302              zr,
10303              (Assembler::Condition)$cmp$$cmpcode);
10304   %}
10305 
10306   ins_pipe(icond_reg);
10307 %}
10308 
10309 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
10310   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
10311 
10312   ins_cost(INSN_COST * 2);
10313   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
10314 
10315   ins_encode %{
10316     __ cselw(as_Register($dst$$reg),
10317              as_Register($src$$reg),
10318              zr,
10319              (Assembler::Condition)$cmp$$cmpcode);
10320   %}
10321 
10322   ins_pipe(icond_reg);
10323 %}
10324 
10325 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
10326   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
10327 
10328   ins_cost(INSN_COST * 2);
10329   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
10330 
10331   ins_encode %{
10332     __ cselw(as_Register($dst$$reg),
10333              zr,
10334              as_Register($src$$reg),
10335              (Assembler::Condition)$cmp$$cmpcode);
10336   %}
10337 
10338   ins_pipe(icond_reg);
10339 %}
10340 
10341 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
10342   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
10343 
10344   ins_cost(INSN_COST * 2);
10345   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
10346 
10347   ins_encode %{
10348     __ cselw(as_Register($dst$$reg),
10349              zr,
10350              as_Register($src$$reg),
10351              (Assembler::Condition)$cmp$$cmpcode);
10352   %}
10353 
10354   ins_pipe(icond_reg);
10355 %}
10356 
10357 // special case for creating a boolean 0 or 1
10358 
10359 // n.b. this is selected in preference to the rule above because it
10360 // avoids loading constants 0 and 1 into a source register
10361 
10362 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
10363   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
10364 
10365   ins_cost(INSN_COST * 2);
10366   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
10367 
10368   ins_encode %{
10369     // equivalently
10370     // cset(as_Register($dst$$reg),
10371     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
10372     __ csincw(as_Register($dst$$reg),
10373              zr,
10374              zr,
10375              (Assembler::Condition)$cmp$$cmpcode);
10376   %}
10377 
10378   ins_pipe(icond_none);
10379 %}
10380 
10381 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
10382   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
10383 
10384   ins_cost(INSN_COST * 2);
10385   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
10386 
10387   ins_encode %{
10388     // equivalently
10389     // cset(as_Register($dst$$reg),
10390     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
10391     __ csincw(as_Register($dst$$reg),
10392              zr,
10393              zr,
10394              (Assembler::Condition)$cmp$$cmpcode);
10395   %}
10396 
10397   ins_pipe(icond_none);
10398 %}
10399 
10400 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
10401   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
10402 
10403   ins_cost(INSN_COST * 2);
10404   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
10405 
10406   ins_encode %{
10407     __ csel(as_Register($dst$$reg),
10408             as_Register($src2$$reg),
10409             as_Register($src1$$reg),
10410             (Assembler::Condition)$cmp$$cmpcode);
10411   %}
10412 
10413   ins_pipe(icond_reg_reg);
10414 %}
10415 
10416 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
10417   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
10418 
10419   ins_cost(INSN_COST * 2);
10420   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
10421 
10422   ins_encode %{
10423     __ csel(as_Register($dst$$reg),
10424             as_Register($src2$$reg),
10425             as_Register($src1$$reg),
10426             (Assembler::Condition)$cmp$$cmpcode);
10427   %}
10428 
10429   ins_pipe(icond_reg_reg);
10430 %}
10431 
10432 // special cases where one arg is zero
10433 
10434 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
10435   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
10436 
10437   ins_cost(INSN_COST * 2);
10438   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
10439 
10440   ins_encode %{
10441     __ csel(as_Register($dst$$reg),
10442             zr,
10443             as_Register($src$$reg),
10444             (Assembler::Condition)$cmp$$cmpcode);
10445   %}
10446 
10447   ins_pipe(icond_reg);
10448 %}
10449 
10450 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
10451   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
10452 
10453   ins_cost(INSN_COST * 2);
10454   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
10455 
10456   ins_encode %{
10457     __ csel(as_Register($dst$$reg),
10458             zr,
10459             as_Register($src$$reg),
10460             (Assembler::Condition)$cmp$$cmpcode);
10461   %}
10462 
10463   ins_pipe(icond_reg);
10464 %}
10465 
10466 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
10467   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
10468 
10469   ins_cost(INSN_COST * 2);
10470   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
10471 
10472   ins_encode %{
10473     __ csel(as_Register($dst$$reg),
10474             as_Register($src$$reg),
10475             zr,
10476             (Assembler::Condition)$cmp$$cmpcode);
10477   %}
10478 
10479   ins_pipe(icond_reg);
10480 %}
10481 
10482 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
10483   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
10484 
10485   ins_cost(INSN_COST * 2);
10486   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
10487 
10488   ins_encode %{
10489     __ csel(as_Register($dst$$reg),
10490             as_Register($src$$reg),
10491             zr,
10492             (Assembler::Condition)$cmp$$cmpcode);
10493   %}
10494 
10495   ins_pipe(icond_reg);
10496 %}
10497 
10498 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10499   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10500 
10501   ins_cost(INSN_COST * 2);
10502   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
10503 
10504   ins_encode %{
10505     __ csel(as_Register($dst$$reg),
10506             as_Register($src2$$reg),
10507             as_Register($src1$$reg),
10508             (Assembler::Condition)$cmp$$cmpcode);
10509   %}
10510 
10511   ins_pipe(icond_reg_reg);
10512 %}
10513 
10514 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10515   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10516 
10517   ins_cost(INSN_COST * 2);
10518   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
10519 
10520   ins_encode %{
10521     __ csel(as_Register($dst$$reg),
10522             as_Register($src2$$reg),
10523             as_Register($src1$$reg),
10524             (Assembler::Condition)$cmp$$cmpcode);
10525   %}
10526 
10527   ins_pipe(icond_reg_reg);
10528 %}
10529 
10530 // special cases where one arg is zero
10531 
10532 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10533   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10534 
10535   ins_cost(INSN_COST * 2);
10536   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
10537 
10538   ins_encode %{
10539     __ csel(as_Register($dst$$reg),
10540             zr,
10541             as_Register($src$$reg),
10542             (Assembler::Condition)$cmp$$cmpcode);
10543   %}
10544 
10545   ins_pipe(icond_reg);
10546 %}
10547 
10548 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10549   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10550 
10551   ins_cost(INSN_COST * 2);
10552   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
10553 
10554   ins_encode %{
10555     __ csel(as_Register($dst$$reg),
10556             zr,
10557             as_Register($src$$reg),
10558             (Assembler::Condition)$cmp$$cmpcode);
10559   %}
10560 
10561   ins_pipe(icond_reg);
10562 %}
10563 
10564 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10565   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10566 
10567   ins_cost(INSN_COST * 2);
10568   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
10569 
10570   ins_encode %{
10571     __ csel(as_Register($dst$$reg),
10572             as_Register($src$$reg),
10573             zr,
10574             (Assembler::Condition)$cmp$$cmpcode);
10575   %}
10576 
10577   ins_pipe(icond_reg);
10578 %}
10579 
10580 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10581   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10582 
10583   ins_cost(INSN_COST * 2);
10584   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
10585 
10586   ins_encode %{
10587     __ csel(as_Register($dst$$reg),
10588             as_Register($src$$reg),
10589             zr,
10590             (Assembler::Condition)$cmp$$cmpcode);
10591   %}
10592 
10593   ins_pipe(icond_reg);
10594 %}
10595 
10596 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10597   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10598 
10599   ins_cost(INSN_COST * 2);
10600   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10601 
10602   ins_encode %{
10603     __ cselw(as_Register($dst$$reg),
10604              as_Register($src2$$reg),
10605              as_Register($src1$$reg),
10606              (Assembler::Condition)$cmp$$cmpcode);
10607   %}
10608 
10609   ins_pipe(icond_reg_reg);
10610 %}
10611 
10612 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10613   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10614 
10615   ins_cost(INSN_COST * 2);
10616   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10617 
10618   ins_encode %{
10619     __ cselw(as_Register($dst$$reg),
10620              as_Register($src2$$reg),
10621              as_Register($src1$$reg),
10622              (Assembler::Condition)$cmp$$cmpcode);
10623   %}
10624 
10625   ins_pipe(icond_reg_reg);
10626 %}
10627 
10628 // special cases where one arg is zero
10629 
10630 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10631   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10632 
10633   ins_cost(INSN_COST * 2);
10634   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
10635 
10636   ins_encode %{
10637     __ cselw(as_Register($dst$$reg),
10638              zr,
10639              as_Register($src$$reg),
10640              (Assembler::Condition)$cmp$$cmpcode);
10641   %}
10642 
10643   ins_pipe(icond_reg);
10644 %}
10645 
10646 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10647   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10648 
10649   ins_cost(INSN_COST * 2);
10650   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
10651 
10652   ins_encode %{
10653     __ cselw(as_Register($dst$$reg),
10654              zr,
10655              as_Register($src$$reg),
10656              (Assembler::Condition)$cmp$$cmpcode);
10657   %}
10658 
10659   ins_pipe(icond_reg);
10660 %}
10661 
10662 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10663   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10664 
10665   ins_cost(INSN_COST * 2);
10666   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
10667 
10668   ins_encode %{
10669     __ cselw(as_Register($dst$$reg),
10670              as_Register($src$$reg),
10671              zr,
10672              (Assembler::Condition)$cmp$$cmpcode);
10673   %}
10674 
10675   ins_pipe(icond_reg);
10676 %}
10677 
10678 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10679   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10680 
10681   ins_cost(INSN_COST * 2);
10682   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
10683 
10684   ins_encode %{
10685     __ cselw(as_Register($dst$$reg),
10686              as_Register($src$$reg),
10687              zr,
10688              (Assembler::Condition)$cmp$$cmpcode);
10689   %}
10690 
10691   ins_pipe(icond_reg);
10692 %}
10693 
10694 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
10695 %{
10696   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10697 
10698   ins_cost(INSN_COST * 3);
10699 
10700   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10701   ins_encode %{
10702     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10703     __ fcsels(as_FloatRegister($dst$$reg),
10704               as_FloatRegister($src2$$reg),
10705               as_FloatRegister($src1$$reg),
10706               cond);
10707   %}
10708 
10709   ins_pipe(fp_cond_reg_reg_s);
10710 %}
10711 
10712 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
10713 %{
10714   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10715 
10716   ins_cost(INSN_COST * 3);
10717 
10718   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10719   ins_encode %{
10720     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10721     __ fcsels(as_FloatRegister($dst$$reg),
10722               as_FloatRegister($src2$$reg),
10723               as_FloatRegister($src1$$reg),
10724               cond);
10725   %}
10726 
10727   ins_pipe(fp_cond_reg_reg_s);
10728 %}
10729 
10730 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
10731 %{
10732   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10733 
10734   ins_cost(INSN_COST * 3);
10735 
10736   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10737   ins_encode %{
10738     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10739     __ fcseld(as_FloatRegister($dst$$reg),
10740               as_FloatRegister($src2$$reg),
10741               as_FloatRegister($src1$$reg),
10742               cond);
10743   %}
10744 
10745   ins_pipe(fp_cond_reg_reg_d);
10746 %}
10747 
10748 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
10749 %{
10750   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10751 
10752   ins_cost(INSN_COST * 3);
10753 
10754   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10755   ins_encode %{
10756     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10757     __ fcseld(as_FloatRegister($dst$$reg),
10758               as_FloatRegister($src2$$reg),
10759               as_FloatRegister($src1$$reg),
10760               cond);
10761   %}
10762 
10763   ins_pipe(fp_cond_reg_reg_d);
10764 %}
10765 
10766 // ============================================================================
10767 // Arithmetic Instructions
10768 //
10769 
10770 // Integer Addition
10771 
10772 // TODO
10773 // these currently employ operations which do not set CR and hence are
10774 // not flagged as killing CR but we would like to isolate the cases
10775 // where we want to set flags from those where we don't. need to work
10776 // out how to do that.
10777 
10778 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10779   match(Set dst (AddI src1 src2));
10780 
10781   ins_cost(INSN_COST);
10782   format %{ "addw  $dst, $src1, $src2" %}
10783 
10784   ins_encode %{
10785     __ addw(as_Register($dst$$reg),
10786             as_Register($src1$$reg),
10787             as_Register($src2$$reg));
10788   %}
10789 
10790   ins_pipe(ialu_reg_reg);
10791 %}
10792 
10793 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10794   match(Set dst (AddI src1 src2));
10795 
10796   ins_cost(INSN_COST);
10797   format %{ "addw $dst, $src1, $src2" %}
10798 
10799   // use opcode to indicate that this is an add not a sub
10800   opcode(0x0);
10801 
10802   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10803 
10804   ins_pipe(ialu_reg_imm);
10805 %}
10806 
10807 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10808   match(Set dst (AddI (ConvL2I src1) src2));
10809 
10810   ins_cost(INSN_COST);
10811   format %{ "addw $dst, $src1, $src2" %}
10812 
10813   // use opcode to indicate that this is an add not a sub
10814   opcode(0x0);
10815 
10816   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10817 
10818   ins_pipe(ialu_reg_imm);
10819 %}
10820 
10821 // Pointer Addition
10822 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10823   match(Set dst (AddP src1 src2));
10824 
10825   ins_cost(INSN_COST);
10826   format %{ "add $dst, $src1, $src2\t# ptr" %}
10827 
10828   ins_encode %{
10829     __ add(as_Register($dst$$reg),
10830            as_Register($src1$$reg),
10831            as_Register($src2$$reg));
10832   %}
10833 
10834   ins_pipe(ialu_reg_reg);
10835 %}
10836 
10837 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10838   match(Set dst (AddP src1 (ConvI2L src2)));
10839 
10840   ins_cost(1.9 * INSN_COST);
10841   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10842 
10843   ins_encode %{
10844     __ add(as_Register($dst$$reg),
10845            as_Register($src1$$reg),
10846            as_Register($src2$$reg), ext::sxtw);
10847   %}
10848 
10849   ins_pipe(ialu_reg_reg);
10850 %}
10851 
10852 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10853   match(Set dst (AddP src1 (LShiftL src2 scale)));
10854 
10855   ins_cost(1.9 * INSN_COST);
10856   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10857 
10858   ins_encode %{
10859     __ lea(as_Register($dst$$reg),
10860            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10861                    Address::lsl($scale$$constant)));
10862   %}
10863 
10864   ins_pipe(ialu_reg_reg_shift);
10865 %}
10866 
10867 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10868   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10869 
10870   ins_cost(1.9 * INSN_COST);
10871   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10872 
10873   ins_encode %{
10874     __ lea(as_Register($dst$$reg),
10875            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10876                    Address::sxtw($scale$$constant)));
10877   %}
10878 
10879   ins_pipe(ialu_reg_reg_shift);
10880 %}
10881 
10882 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10883   match(Set dst (LShiftL (ConvI2L src) scale));
10884 
10885   ins_cost(INSN_COST);
10886   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10887 
10888   ins_encode %{
10889     __ sbfiz(as_Register($dst$$reg),
10890           as_Register($src$$reg),
10891           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
10892   %}
10893 
10894   ins_pipe(ialu_reg_shift);
10895 %}
10896 
10897 // Pointer Immediate Addition
10898 // n.b. this needs to be more expensive than using an indirect memory
10899 // operand
10900 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10901   match(Set dst (AddP src1 src2));
10902 
10903   ins_cost(INSN_COST);
10904   format %{ "add $dst, $src1, $src2\t# ptr" %}
10905 
10906   // use opcode to indicate that this is an add not a sub
10907   opcode(0x0);
10908 
10909   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10910 
10911   ins_pipe(ialu_reg_imm);
10912 %}
10913 
10914 // Long Addition
10915 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10916 
10917   match(Set dst (AddL src1 src2));
10918 
10919   ins_cost(INSN_COST);
10920   format %{ "add  $dst, $src1, $src2" %}
10921 
10922   ins_encode %{
10923     __ add(as_Register($dst$$reg),
10924            as_Register($src1$$reg),
10925            as_Register($src2$$reg));
10926   %}
10927 
10928   ins_pipe(ialu_reg_reg);
10929 %}
10930 
10931 // No constant pool entries requiredLong Immediate Addition.
10932 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10933   match(Set dst (AddL src1 src2));
10934 
10935   ins_cost(INSN_COST);
10936   format %{ "add $dst, $src1, $src2" %}
10937 
10938   // use opcode to indicate that this is an add not a sub
10939   opcode(0x0);
10940 
10941   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10942 
10943   ins_pipe(ialu_reg_imm);
10944 %}
10945 
10946 // Integer Subtraction
10947 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10948   match(Set dst (SubI src1 src2));
10949 
10950   ins_cost(INSN_COST);
10951   format %{ "subw  $dst, $src1, $src2" %}
10952 
10953   ins_encode %{
10954     __ subw(as_Register($dst$$reg),
10955             as_Register($src1$$reg),
10956             as_Register($src2$$reg));
10957   %}
10958 
10959   ins_pipe(ialu_reg_reg);
10960 %}
10961 
10962 // Immediate Subtraction
10963 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10964   match(Set dst (SubI src1 src2));
10965 
10966   ins_cost(INSN_COST);
10967   format %{ "subw $dst, $src1, $src2" %}
10968 
10969   // use opcode to indicate that this is a sub not an add
10970   opcode(0x1);
10971 
10972   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10973 
10974   ins_pipe(ialu_reg_imm);
10975 %}
10976 
10977 // Long Subtraction
10978 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10979 
10980   match(Set dst (SubL src1 src2));
10981 
10982   ins_cost(INSN_COST);
10983   format %{ "sub  $dst, $src1, $src2" %}
10984 
10985   ins_encode %{
10986     __ sub(as_Register($dst$$reg),
10987            as_Register($src1$$reg),
10988            as_Register($src2$$reg));
10989   %}
10990 
10991   ins_pipe(ialu_reg_reg);
10992 %}
10993 
10994 // No constant pool entries requiredLong Immediate Subtraction.
10995 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10996   match(Set dst (SubL src1 src2));
10997 
10998   ins_cost(INSN_COST);
10999   format %{ "sub$dst, $src1, $src2" %}
11000 
11001   // use opcode to indicate that this is a sub not an add
11002   opcode(0x1);
11003 
11004   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
11005 
11006   ins_pipe(ialu_reg_imm);
11007 %}
11008 
11009 // Integer Negation (special case for sub)
11010 
11011 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
11012   match(Set dst (SubI zero src));
11013 
11014   ins_cost(INSN_COST);
11015   format %{ "negw $dst, $src\t# int" %}
11016 
11017   ins_encode %{
11018     __ negw(as_Register($dst$$reg),
11019             as_Register($src$$reg));
11020   %}
11021 
11022   ins_pipe(ialu_reg);
11023 %}
11024 
11025 // Long Negation
11026 
11027 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
11028   match(Set dst (SubL zero src));
11029 
11030   ins_cost(INSN_COST);
11031   format %{ "neg $dst, $src\t# long" %}
11032 
11033   ins_encode %{
11034     __ neg(as_Register($dst$$reg),
11035            as_Register($src$$reg));
11036   %}
11037 
11038   ins_pipe(ialu_reg);
11039 %}
11040 
11041 // Integer Multiply
11042 
11043 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11044   match(Set dst (MulI src1 src2));
11045 
11046   ins_cost(INSN_COST * 3);
11047   format %{ "mulw  $dst, $src1, $src2" %}
11048 
11049   ins_encode %{
11050     __ mulw(as_Register($dst$$reg),
11051             as_Register($src1$$reg),
11052             as_Register($src2$$reg));
11053   %}
11054 
11055   ins_pipe(imul_reg_reg);
11056 %}
11057 
11058 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11059   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
11060 
11061   ins_cost(INSN_COST * 3);
11062   format %{ "smull  $dst, $src1, $src2" %}
11063 
11064   ins_encode %{
11065     __ smull(as_Register($dst$$reg),
11066              as_Register($src1$$reg),
11067              as_Register($src2$$reg));
11068   %}
11069 
11070   ins_pipe(imul_reg_reg);
11071 %}
11072 
11073 // Long Multiply
11074 
11075 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11076   match(Set dst (MulL src1 src2));
11077 
11078   ins_cost(INSN_COST * 5);
11079   format %{ "mul  $dst, $src1, $src2" %}
11080 
11081   ins_encode %{
11082     __ mul(as_Register($dst$$reg),
11083            as_Register($src1$$reg),
11084            as_Register($src2$$reg));
11085   %}
11086 
11087   ins_pipe(lmul_reg_reg);
11088 %}
11089 
11090 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
11091 %{
11092   match(Set dst (MulHiL src1 src2));
11093 
11094   ins_cost(INSN_COST * 7);
11095   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
11096 
11097   ins_encode %{
11098     __ smulh(as_Register($dst$$reg),
11099              as_Register($src1$$reg),
11100              as_Register($src2$$reg));
11101   %}
11102 
11103   ins_pipe(lmul_reg_reg);
11104 %}
11105 
11106 // Combined Integer Multiply & Add/Sub
11107 
11108 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
11109   match(Set dst (AddI src3 (MulI src1 src2)));
11110 
11111   ins_cost(INSN_COST * 3);
11112   format %{ "madd  $dst, $src1, $src2, $src3" %}
11113 
11114   ins_encode %{
11115     __ maddw(as_Register($dst$$reg),
11116              as_Register($src1$$reg),
11117              as_Register($src2$$reg),
11118              as_Register($src3$$reg));
11119   %}
11120 
11121   ins_pipe(imac_reg_reg);
11122 %}
11123 
11124 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
11125   match(Set dst (SubI src3 (MulI src1 src2)));
11126 
11127   ins_cost(INSN_COST * 3);
11128   format %{ "msub  $dst, $src1, $src2, $src3" %}
11129 
11130   ins_encode %{
11131     __ msubw(as_Register($dst$$reg),
11132              as_Register($src1$$reg),
11133              as_Register($src2$$reg),
11134              as_Register($src3$$reg));
11135   %}
11136 
11137   ins_pipe(imac_reg_reg);
11138 %}
11139 
11140 // Combined Long Multiply & Add/Sub
11141 
11142 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
11143   match(Set dst (AddL src3 (MulL src1 src2)));
11144 
11145   ins_cost(INSN_COST * 5);
11146   format %{ "madd  $dst, $src1, $src2, $src3" %}
11147 
11148   ins_encode %{
11149     __ madd(as_Register($dst$$reg),
11150             as_Register($src1$$reg),
11151             as_Register($src2$$reg),
11152             as_Register($src3$$reg));
11153   %}
11154 
11155   ins_pipe(lmac_reg_reg);
11156 %}
11157 
11158 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
11159   match(Set dst (SubL src3 (MulL src1 src2)));
11160 
11161   ins_cost(INSN_COST * 5);
11162   format %{ "msub  $dst, $src1, $src2, $src3" %}
11163 
11164   ins_encode %{
11165     __ msub(as_Register($dst$$reg),
11166             as_Register($src1$$reg),
11167             as_Register($src2$$reg),
11168             as_Register($src3$$reg));
11169   %}
11170 
11171   ins_pipe(lmac_reg_reg);
11172 %}
11173 
11174 // Integer Divide
11175 
11176 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11177   match(Set dst (DivI src1 src2));
11178 
11179   ins_cost(INSN_COST * 19);
11180   format %{ "sdivw  $dst, $src1, $src2" %}
11181 
11182   ins_encode(aarch64_enc_divw(dst, src1, src2));
11183   ins_pipe(idiv_reg_reg);
11184 %}
11185 
11186 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
11187   match(Set dst (URShiftI (RShiftI src1 div1) div2));
11188   ins_cost(INSN_COST);
11189   format %{ "lsrw $dst, $src1, $div1" %}
11190   ins_encode %{
11191     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
11192   %}
11193   ins_pipe(ialu_reg_shift);
11194 %}
11195 
11196 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
11197   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
11198   ins_cost(INSN_COST);
11199   format %{ "addw $dst, $src, LSR $div1" %}
11200 
11201   ins_encode %{
11202     __ addw(as_Register($dst$$reg),
11203               as_Register($src$$reg),
11204               as_Register($src$$reg),
11205               Assembler::LSR, 31);
11206   %}
11207   ins_pipe(ialu_reg);
11208 %}
11209 
11210 // Long Divide
11211 
11212 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11213   match(Set dst (DivL src1 src2));
11214 
11215   ins_cost(INSN_COST * 35);
11216   format %{ "sdiv   $dst, $src1, $src2" %}
11217 
11218   ins_encode(aarch64_enc_div(dst, src1, src2));
11219   ins_pipe(ldiv_reg_reg);
11220 %}
11221 
11222 instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{
11223   match(Set dst (URShiftL (RShiftL src1 div1) div2));
11224   ins_cost(INSN_COST);
11225   format %{ "lsr $dst, $src1, $div1" %}
11226   ins_encode %{
11227     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
11228   %}
11229   ins_pipe(ialu_reg_shift);
11230 %}
11231 
11232 instruct div2RoundL(iRegLNoSp dst, iRegL src, immI_63 div1, immI_63 div2) %{
11233   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
11234   ins_cost(INSN_COST);
11235   format %{ "add $dst, $src, $div1" %}
11236 
11237   ins_encode %{
11238     __ add(as_Register($dst$$reg),
11239               as_Register($src$$reg),
11240               as_Register($src$$reg),
11241               Assembler::LSR, 63);
11242   %}
11243   ins_pipe(ialu_reg);
11244 %}
11245 
11246 // Integer Remainder
11247 
11248 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11249   match(Set dst (ModI src1 src2));
11250 
11251   ins_cost(INSN_COST * 22);
11252   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
11253             "msubw($dst, rscratch1, $src2, $src1" %}
11254 
11255   ins_encode(aarch64_enc_modw(dst, src1, src2));
11256   ins_pipe(idiv_reg_reg);
11257 %}
11258 
11259 // Long Remainder
11260 
11261 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11262   match(Set dst (ModL src1 src2));
11263 
11264   ins_cost(INSN_COST * 38);
11265   format %{ "sdiv   rscratch1, $src1, $src2\n"
11266             "msub($dst, rscratch1, $src2, $src1" %}
11267 
11268   ins_encode(aarch64_enc_mod(dst, src1, src2));
11269   ins_pipe(ldiv_reg_reg);
11270 %}
11271 
11272 // Integer Shifts
11273 
11274 // Shift Left Register
11275 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11276   match(Set dst (LShiftI src1 src2));
11277 
11278   ins_cost(INSN_COST * 2);
11279   format %{ "lslvw  $dst, $src1, $src2" %}
11280 
11281   ins_encode %{
11282     __ lslvw(as_Register($dst$$reg),
11283              as_Register($src1$$reg),
11284              as_Register($src2$$reg));
11285   %}
11286 
11287   ins_pipe(ialu_reg_reg_vshift);
11288 %}
11289 
11290 // Shift Left Immediate
11291 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11292   match(Set dst (LShiftI src1 src2));
11293 
11294   ins_cost(INSN_COST);
11295   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
11296 
11297   ins_encode %{
11298     __ lslw(as_Register($dst$$reg),
11299             as_Register($src1$$reg),
11300             $src2$$constant & 0x1f);
11301   %}
11302 
11303   ins_pipe(ialu_reg_shift);
11304 %}
11305 
11306 // Shift Right Logical Register
11307 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11308   match(Set dst (URShiftI src1 src2));
11309 
11310   ins_cost(INSN_COST * 2);
11311   format %{ "lsrvw  $dst, $src1, $src2" %}
11312 
11313   ins_encode %{
11314     __ lsrvw(as_Register($dst$$reg),
11315              as_Register($src1$$reg),
11316              as_Register($src2$$reg));
11317   %}
11318 
11319   ins_pipe(ialu_reg_reg_vshift);
11320 %}
11321 
11322 // Shift Right Logical Immediate
11323 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11324   match(Set dst (URShiftI src1 src2));
11325 
11326   ins_cost(INSN_COST);
11327   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
11328 
11329   ins_encode %{
11330     __ lsrw(as_Register($dst$$reg),
11331             as_Register($src1$$reg),
11332             $src2$$constant & 0x1f);
11333   %}
11334 
11335   ins_pipe(ialu_reg_shift);
11336 %}
11337 
11338 // Shift Right Arithmetic Register
11339 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11340   match(Set dst (RShiftI src1 src2));
11341 
11342   ins_cost(INSN_COST * 2);
11343   format %{ "asrvw  $dst, $src1, $src2" %}
11344 
11345   ins_encode %{
11346     __ asrvw(as_Register($dst$$reg),
11347              as_Register($src1$$reg),
11348              as_Register($src2$$reg));
11349   %}
11350 
11351   ins_pipe(ialu_reg_reg_vshift);
11352 %}
11353 
11354 // Shift Right Arithmetic Immediate
11355 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11356   match(Set dst (RShiftI src1 src2));
11357 
11358   ins_cost(INSN_COST);
11359   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
11360 
11361   ins_encode %{
11362     __ asrw(as_Register($dst$$reg),
11363             as_Register($src1$$reg),
11364             $src2$$constant & 0x1f);
11365   %}
11366 
11367   ins_pipe(ialu_reg_shift);
11368 %}
11369 
11370 // Combined Int Mask and Right Shift (using UBFM)
11371 // TODO
11372 
11373 // Long Shifts
11374 
11375 // Shift Left Register
11376 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11377   match(Set dst (LShiftL src1 src2));
11378 
11379   ins_cost(INSN_COST * 2);
11380   format %{ "lslv  $dst, $src1, $src2" %}
11381 
11382   ins_encode %{
11383     __ lslv(as_Register($dst$$reg),
11384             as_Register($src1$$reg),
11385             as_Register($src2$$reg));
11386   %}
11387 
11388   ins_pipe(ialu_reg_reg_vshift);
11389 %}
11390 
11391 // Shift Left Immediate
11392 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11393   match(Set dst (LShiftL src1 src2));
11394 
11395   ins_cost(INSN_COST);
11396   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
11397 
11398   ins_encode %{
11399     __ lsl(as_Register($dst$$reg),
11400             as_Register($src1$$reg),
11401             $src2$$constant & 0x3f);
11402   %}
11403 
11404   ins_pipe(ialu_reg_shift);
11405 %}
11406 
11407 // Shift Right Logical Register
11408 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11409   match(Set dst (URShiftL src1 src2));
11410 
11411   ins_cost(INSN_COST * 2);
11412   format %{ "lsrv  $dst, $src1, $src2" %}
11413 
11414   ins_encode %{
11415     __ lsrv(as_Register($dst$$reg),
11416             as_Register($src1$$reg),
11417             as_Register($src2$$reg));
11418   %}
11419 
11420   ins_pipe(ialu_reg_reg_vshift);
11421 %}
11422 
11423 // Shift Right Logical Immediate
11424 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11425   match(Set dst (URShiftL src1 src2));
11426 
11427   ins_cost(INSN_COST);
11428   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
11429 
11430   ins_encode %{
11431     __ lsr(as_Register($dst$$reg),
11432            as_Register($src1$$reg),
11433            $src2$$constant & 0x3f);
11434   %}
11435 
11436   ins_pipe(ialu_reg_shift);
11437 %}
11438 
11439 // A special-case pattern for card table stores.
11440 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
11441   match(Set dst (URShiftL (CastP2X src1) src2));
11442 
11443   ins_cost(INSN_COST);
11444   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
11445 
11446   ins_encode %{
11447     __ lsr(as_Register($dst$$reg),
11448            as_Register($src1$$reg),
11449            $src2$$constant & 0x3f);
11450   %}
11451 
11452   ins_pipe(ialu_reg_shift);
11453 %}
11454 
11455 // Shift Right Arithmetic Register
11456 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11457   match(Set dst (RShiftL src1 src2));
11458 
11459   ins_cost(INSN_COST * 2);
11460   format %{ "asrv  $dst, $src1, $src2" %}
11461 
11462   ins_encode %{
11463     __ asrv(as_Register($dst$$reg),
11464             as_Register($src1$$reg),
11465             as_Register($src2$$reg));
11466   %}
11467 
11468   ins_pipe(ialu_reg_reg_vshift);
11469 %}
11470 
11471 // Shift Right Arithmetic Immediate
11472 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11473   match(Set dst (RShiftL src1 src2));
11474 
11475   ins_cost(INSN_COST);
11476   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
11477 
11478   ins_encode %{
11479     __ asr(as_Register($dst$$reg),
11480            as_Register($src1$$reg),
11481            $src2$$constant & 0x3f);
11482   %}
11483 
11484   ins_pipe(ialu_reg_shift);
11485 %}
11486 
11487 // BEGIN This section of the file is automatically generated. Do not edit --------------
11488 
11489 instruct regL_not_reg(iRegLNoSp dst,
11490                          iRegL src1, immL_M1 m1,
11491                          rFlagsReg cr) %{
11492   match(Set dst (XorL src1 m1));
11493   ins_cost(INSN_COST);
11494   format %{ "eon  $dst, $src1, zr" %}
11495 
11496   ins_encode %{
11497     __ eon(as_Register($dst$$reg),
11498               as_Register($src1$$reg),
11499               zr,
11500               Assembler::LSL, 0);
11501   %}
11502 
11503   ins_pipe(ialu_reg);
11504 %}
11505 instruct regI_not_reg(iRegINoSp dst,
11506                          iRegIorL2I src1, immI_M1 m1,
11507                          rFlagsReg cr) %{
11508   match(Set dst (XorI src1 m1));
11509   ins_cost(INSN_COST);
11510   format %{ "eonw  $dst, $src1, zr" %}
11511 
11512   ins_encode %{
11513     __ eonw(as_Register($dst$$reg),
11514               as_Register($src1$$reg),
11515               zr,
11516               Assembler::LSL, 0);
11517   %}
11518 
11519   ins_pipe(ialu_reg);
11520 %}
11521 
11522 instruct AndI_reg_not_reg(iRegINoSp dst,
11523                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11524                          rFlagsReg cr) %{
11525   match(Set dst (AndI src1 (XorI src2 m1)));
11526   ins_cost(INSN_COST);
11527   format %{ "bicw  $dst, $src1, $src2" %}
11528 
11529   ins_encode %{
11530     __ bicw(as_Register($dst$$reg),
11531               as_Register($src1$$reg),
11532               as_Register($src2$$reg),
11533               Assembler::LSL, 0);
11534   %}
11535 
11536   ins_pipe(ialu_reg_reg);
11537 %}
11538 
11539 instruct AndL_reg_not_reg(iRegLNoSp dst,
11540                          iRegL src1, iRegL src2, immL_M1 m1,
11541                          rFlagsReg cr) %{
11542   match(Set dst (AndL src1 (XorL src2 m1)));
11543   ins_cost(INSN_COST);
11544   format %{ "bic  $dst, $src1, $src2" %}
11545 
11546   ins_encode %{
11547     __ bic(as_Register($dst$$reg),
11548               as_Register($src1$$reg),
11549               as_Register($src2$$reg),
11550               Assembler::LSL, 0);
11551   %}
11552 
11553   ins_pipe(ialu_reg_reg);
11554 %}
11555 
11556 instruct OrI_reg_not_reg(iRegINoSp dst,
11557                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11558                          rFlagsReg cr) %{
11559   match(Set dst (OrI src1 (XorI src2 m1)));
11560   ins_cost(INSN_COST);
11561   format %{ "ornw  $dst, $src1, $src2" %}
11562 
11563   ins_encode %{
11564     __ ornw(as_Register($dst$$reg),
11565               as_Register($src1$$reg),
11566               as_Register($src2$$reg),
11567               Assembler::LSL, 0);
11568   %}
11569 
11570   ins_pipe(ialu_reg_reg);
11571 %}
11572 
11573 instruct OrL_reg_not_reg(iRegLNoSp dst,
11574                          iRegL src1, iRegL src2, immL_M1 m1,
11575                          rFlagsReg cr) %{
11576   match(Set dst (OrL src1 (XorL src2 m1)));
11577   ins_cost(INSN_COST);
11578   format %{ "orn  $dst, $src1, $src2" %}
11579 
11580   ins_encode %{
11581     __ orn(as_Register($dst$$reg),
11582               as_Register($src1$$reg),
11583               as_Register($src2$$reg),
11584               Assembler::LSL, 0);
11585   %}
11586 
11587   ins_pipe(ialu_reg_reg);
11588 %}
11589 
11590 instruct XorI_reg_not_reg(iRegINoSp dst,
11591                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11592                          rFlagsReg cr) %{
11593   match(Set dst (XorI m1 (XorI src2 src1)));
11594   ins_cost(INSN_COST);
11595   format %{ "eonw  $dst, $src1, $src2" %}
11596 
11597   ins_encode %{
11598     __ eonw(as_Register($dst$$reg),
11599               as_Register($src1$$reg),
11600               as_Register($src2$$reg),
11601               Assembler::LSL, 0);
11602   %}
11603 
11604   ins_pipe(ialu_reg_reg);
11605 %}
11606 
11607 instruct XorL_reg_not_reg(iRegLNoSp dst,
11608                          iRegL src1, iRegL src2, immL_M1 m1,
11609                          rFlagsReg cr) %{
11610   match(Set dst (XorL m1 (XorL src2 src1)));
11611   ins_cost(INSN_COST);
11612   format %{ "eon  $dst, $src1, $src2" %}
11613 
11614   ins_encode %{
11615     __ eon(as_Register($dst$$reg),
11616               as_Register($src1$$reg),
11617               as_Register($src2$$reg),
11618               Assembler::LSL, 0);
11619   %}
11620 
11621   ins_pipe(ialu_reg_reg);
11622 %}
11623 
11624 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
11625                          iRegIorL2I src1, iRegIorL2I src2,
11626                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11627   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
11628   ins_cost(1.9 * INSN_COST);
11629   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
11630 
11631   ins_encode %{
11632     __ bicw(as_Register($dst$$reg),
11633               as_Register($src1$$reg),
11634               as_Register($src2$$reg),
11635               Assembler::LSR,
11636               $src3$$constant & 0x1f);
11637   %}
11638 
11639   ins_pipe(ialu_reg_reg_shift);
11640 %}
11641 
11642 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
11643                          iRegL src1, iRegL src2,
11644                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11645   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
11646   ins_cost(1.9 * INSN_COST);
11647   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
11648 
11649   ins_encode %{
11650     __ bic(as_Register($dst$$reg),
11651               as_Register($src1$$reg),
11652               as_Register($src2$$reg),
11653               Assembler::LSR,
11654               $src3$$constant & 0x3f);
11655   %}
11656 
11657   ins_pipe(ialu_reg_reg_shift);
11658 %}
11659 
11660 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
11661                          iRegIorL2I src1, iRegIorL2I src2,
11662                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11663   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
11664   ins_cost(1.9 * INSN_COST);
11665   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
11666 
11667   ins_encode %{
11668     __ bicw(as_Register($dst$$reg),
11669               as_Register($src1$$reg),
11670               as_Register($src2$$reg),
11671               Assembler::ASR,
11672               $src3$$constant & 0x1f);
11673   %}
11674 
11675   ins_pipe(ialu_reg_reg_shift);
11676 %}
11677 
11678 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
11679                          iRegL src1, iRegL src2,
11680                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11681   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
11682   ins_cost(1.9 * INSN_COST);
11683   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
11684 
11685   ins_encode %{
11686     __ bic(as_Register($dst$$reg),
11687               as_Register($src1$$reg),
11688               as_Register($src2$$reg),
11689               Assembler::ASR,
11690               $src3$$constant & 0x3f);
11691   %}
11692 
11693   ins_pipe(ialu_reg_reg_shift);
11694 %}
11695 
11696 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11697                          iRegIorL2I src1, iRegIorL2I src2,
11698                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11699   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11700   ins_cost(1.9 * INSN_COST);
11701   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11702 
11703   ins_encode %{
11704     __ bicw(as_Register($dst$$reg),
11705               as_Register($src1$$reg),
11706               as_Register($src2$$reg),
11707               Assembler::LSL,
11708               $src3$$constant & 0x1f);
11709   %}
11710 
11711   ins_pipe(ialu_reg_reg_shift);
11712 %}
11713 
11714 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11715                          iRegL src1, iRegL src2,
11716                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11717   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11718   ins_cost(1.9 * INSN_COST);
11719   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11720 
11721   ins_encode %{
11722     __ bic(as_Register($dst$$reg),
11723               as_Register($src1$$reg),
11724               as_Register($src2$$reg),
11725               Assembler::LSL,
11726               $src3$$constant & 0x3f);
11727   %}
11728 
11729   ins_pipe(ialu_reg_reg_shift);
11730 %}
11731 
11732 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11733                          iRegIorL2I src1, iRegIorL2I src2,
11734                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11735   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11736   ins_cost(1.9 * INSN_COST);
11737   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11738 
11739   ins_encode %{
11740     __ eonw(as_Register($dst$$reg),
11741               as_Register($src1$$reg),
11742               as_Register($src2$$reg),
11743               Assembler::LSR,
11744               $src3$$constant & 0x1f);
11745   %}
11746 
11747   ins_pipe(ialu_reg_reg_shift);
11748 %}
11749 
11750 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11751                          iRegL src1, iRegL src2,
11752                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11753   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11754   ins_cost(1.9 * INSN_COST);
11755   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11756 
11757   ins_encode %{
11758     __ eon(as_Register($dst$$reg),
11759               as_Register($src1$$reg),
11760               as_Register($src2$$reg),
11761               Assembler::LSR,
11762               $src3$$constant & 0x3f);
11763   %}
11764 
11765   ins_pipe(ialu_reg_reg_shift);
11766 %}
11767 
11768 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11769                          iRegIorL2I src1, iRegIorL2I src2,
11770                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11771   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11772   ins_cost(1.9 * INSN_COST);
11773   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11774 
11775   ins_encode %{
11776     __ eonw(as_Register($dst$$reg),
11777               as_Register($src1$$reg),
11778               as_Register($src2$$reg),
11779               Assembler::ASR,
11780               $src3$$constant & 0x1f);
11781   %}
11782 
11783   ins_pipe(ialu_reg_reg_shift);
11784 %}
11785 
11786 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11787                          iRegL src1, iRegL src2,
11788                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11789   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11790   ins_cost(1.9 * INSN_COST);
11791   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11792 
11793   ins_encode %{
11794     __ eon(as_Register($dst$$reg),
11795               as_Register($src1$$reg),
11796               as_Register($src2$$reg),
11797               Assembler::ASR,
11798               $src3$$constant & 0x3f);
11799   %}
11800 
11801   ins_pipe(ialu_reg_reg_shift);
11802 %}
11803 
11804 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11805                          iRegIorL2I src1, iRegIorL2I src2,
11806                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11807   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11808   ins_cost(1.9 * INSN_COST);
11809   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11810 
11811   ins_encode %{
11812     __ eonw(as_Register($dst$$reg),
11813               as_Register($src1$$reg),
11814               as_Register($src2$$reg),
11815               Assembler::LSL,
11816               $src3$$constant & 0x1f);
11817   %}
11818 
11819   ins_pipe(ialu_reg_reg_shift);
11820 %}
11821 
11822 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11823                          iRegL src1, iRegL src2,
11824                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11825   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11826   ins_cost(1.9 * INSN_COST);
11827   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11828 
11829   ins_encode %{
11830     __ eon(as_Register($dst$$reg),
11831               as_Register($src1$$reg),
11832               as_Register($src2$$reg),
11833               Assembler::LSL,
11834               $src3$$constant & 0x3f);
11835   %}
11836 
11837   ins_pipe(ialu_reg_reg_shift);
11838 %}
11839 
11840 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11841                          iRegIorL2I src1, iRegIorL2I src2,
11842                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11843   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11844   ins_cost(1.9 * INSN_COST);
11845   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11846 
11847   ins_encode %{
11848     __ ornw(as_Register($dst$$reg),
11849               as_Register($src1$$reg),
11850               as_Register($src2$$reg),
11851               Assembler::LSR,
11852               $src3$$constant & 0x1f);
11853   %}
11854 
11855   ins_pipe(ialu_reg_reg_shift);
11856 %}
11857 
11858 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11859                          iRegL src1, iRegL src2,
11860                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11861   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11862   ins_cost(1.9 * INSN_COST);
11863   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11864 
11865   ins_encode %{
11866     __ orn(as_Register($dst$$reg),
11867               as_Register($src1$$reg),
11868               as_Register($src2$$reg),
11869               Assembler::LSR,
11870               $src3$$constant & 0x3f);
11871   %}
11872 
11873   ins_pipe(ialu_reg_reg_shift);
11874 %}
11875 
11876 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11877                          iRegIorL2I src1, iRegIorL2I src2,
11878                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11879   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11880   ins_cost(1.9 * INSN_COST);
11881   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11882 
11883   ins_encode %{
11884     __ ornw(as_Register($dst$$reg),
11885               as_Register($src1$$reg),
11886               as_Register($src2$$reg),
11887               Assembler::ASR,
11888               $src3$$constant & 0x1f);
11889   %}
11890 
11891   ins_pipe(ialu_reg_reg_shift);
11892 %}
11893 
11894 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11895                          iRegL src1, iRegL src2,
11896                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11897   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11898   ins_cost(1.9 * INSN_COST);
11899   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11900 
11901   ins_encode %{
11902     __ orn(as_Register($dst$$reg),
11903               as_Register($src1$$reg),
11904               as_Register($src2$$reg),
11905               Assembler::ASR,
11906               $src3$$constant & 0x3f);
11907   %}
11908 
11909   ins_pipe(ialu_reg_reg_shift);
11910 %}
11911 
11912 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11913                          iRegIorL2I src1, iRegIorL2I src2,
11914                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11915   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11916   ins_cost(1.9 * INSN_COST);
11917   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11918 
11919   ins_encode %{
11920     __ ornw(as_Register($dst$$reg),
11921               as_Register($src1$$reg),
11922               as_Register($src2$$reg),
11923               Assembler::LSL,
11924               $src3$$constant & 0x1f);
11925   %}
11926 
11927   ins_pipe(ialu_reg_reg_shift);
11928 %}
11929 
11930 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11931                          iRegL src1, iRegL src2,
11932                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11933   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11934   ins_cost(1.9 * INSN_COST);
11935   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11936 
11937   ins_encode %{
11938     __ orn(as_Register($dst$$reg),
11939               as_Register($src1$$reg),
11940               as_Register($src2$$reg),
11941               Assembler::LSL,
11942               $src3$$constant & 0x3f);
11943   %}
11944 
11945   ins_pipe(ialu_reg_reg_shift);
11946 %}
11947 
11948 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11949                          iRegIorL2I src1, iRegIorL2I src2,
11950                          immI src3, rFlagsReg cr) %{
11951   match(Set dst (AndI src1 (URShiftI src2 src3)));
11952 
11953   ins_cost(1.9 * INSN_COST);
11954   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11955 
11956   ins_encode %{
11957     __ andw(as_Register($dst$$reg),
11958               as_Register($src1$$reg),
11959               as_Register($src2$$reg),
11960               Assembler::LSR,
11961               $src3$$constant & 0x1f);
11962   %}
11963 
11964   ins_pipe(ialu_reg_reg_shift);
11965 %}
11966 
11967 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11968                          iRegL src1, iRegL src2,
11969                          immI src3, rFlagsReg cr) %{
11970   match(Set dst (AndL src1 (URShiftL src2 src3)));
11971 
11972   ins_cost(1.9 * INSN_COST);
11973   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11974 
11975   ins_encode %{
11976     __ andr(as_Register($dst$$reg),
11977               as_Register($src1$$reg),
11978               as_Register($src2$$reg),
11979               Assembler::LSR,
11980               $src3$$constant & 0x3f);
11981   %}
11982 
11983   ins_pipe(ialu_reg_reg_shift);
11984 %}
11985 
11986 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11987                          iRegIorL2I src1, iRegIorL2I src2,
11988                          immI src3, rFlagsReg cr) %{
11989   match(Set dst (AndI src1 (RShiftI src2 src3)));
11990 
11991   ins_cost(1.9 * INSN_COST);
11992   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11993 
11994   ins_encode %{
11995     __ andw(as_Register($dst$$reg),
11996               as_Register($src1$$reg),
11997               as_Register($src2$$reg),
11998               Assembler::ASR,
11999               $src3$$constant & 0x1f);
12000   %}
12001 
12002   ins_pipe(ialu_reg_reg_shift);
12003 %}
12004 
12005 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
12006                          iRegL src1, iRegL src2,
12007                          immI src3, rFlagsReg cr) %{
12008   match(Set dst (AndL src1 (RShiftL src2 src3)));
12009 
12010   ins_cost(1.9 * INSN_COST);
12011   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
12012 
12013   ins_encode %{
12014     __ andr(as_Register($dst$$reg),
12015               as_Register($src1$$reg),
12016               as_Register($src2$$reg),
12017               Assembler::ASR,
12018               $src3$$constant & 0x3f);
12019   %}
12020 
12021   ins_pipe(ialu_reg_reg_shift);
12022 %}
12023 
12024 instruct AndI_reg_LShift_reg(iRegINoSp dst,
12025                          iRegIorL2I src1, iRegIorL2I src2,
12026                          immI src3, rFlagsReg cr) %{
12027   match(Set dst (AndI src1 (LShiftI src2 src3)));
12028 
12029   ins_cost(1.9 * INSN_COST);
12030   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
12031 
12032   ins_encode %{
12033     __ andw(as_Register($dst$$reg),
12034               as_Register($src1$$reg),
12035               as_Register($src2$$reg),
12036               Assembler::LSL,
12037               $src3$$constant & 0x1f);
12038   %}
12039 
12040   ins_pipe(ialu_reg_reg_shift);
12041 %}
12042 
12043 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
12044                          iRegL src1, iRegL src2,
12045                          immI src3, rFlagsReg cr) %{
12046   match(Set dst (AndL src1 (LShiftL src2 src3)));
12047 
12048   ins_cost(1.9 * INSN_COST);
12049   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
12050 
12051   ins_encode %{
12052     __ andr(as_Register($dst$$reg),
12053               as_Register($src1$$reg),
12054               as_Register($src2$$reg),
12055               Assembler::LSL,
12056               $src3$$constant & 0x3f);
12057   %}
12058 
12059   ins_pipe(ialu_reg_reg_shift);
12060 %}
12061 
12062 instruct XorI_reg_URShift_reg(iRegINoSp dst,
12063                          iRegIorL2I src1, iRegIorL2I src2,
12064                          immI src3, rFlagsReg cr) %{
12065   match(Set dst (XorI src1 (URShiftI src2 src3)));
12066 
12067   ins_cost(1.9 * INSN_COST);
12068   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
12069 
12070   ins_encode %{
12071     __ eorw(as_Register($dst$$reg),
12072               as_Register($src1$$reg),
12073               as_Register($src2$$reg),
12074               Assembler::LSR,
12075               $src3$$constant & 0x1f);
12076   %}
12077 
12078   ins_pipe(ialu_reg_reg_shift);
12079 %}
12080 
12081 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
12082                          iRegL src1, iRegL src2,
12083                          immI src3, rFlagsReg cr) %{
12084   match(Set dst (XorL src1 (URShiftL src2 src3)));
12085 
12086   ins_cost(1.9 * INSN_COST);
12087   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
12088 
12089   ins_encode %{
12090     __ eor(as_Register($dst$$reg),
12091               as_Register($src1$$reg),
12092               as_Register($src2$$reg),
12093               Assembler::LSR,
12094               $src3$$constant & 0x3f);
12095   %}
12096 
12097   ins_pipe(ialu_reg_reg_shift);
12098 %}
12099 
12100 instruct XorI_reg_RShift_reg(iRegINoSp dst,
12101                          iRegIorL2I src1, iRegIorL2I src2,
12102                          immI src3, rFlagsReg cr) %{
12103   match(Set dst (XorI src1 (RShiftI src2 src3)));
12104 
12105   ins_cost(1.9 * INSN_COST);
12106   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
12107 
12108   ins_encode %{
12109     __ eorw(as_Register($dst$$reg),
12110               as_Register($src1$$reg),
12111               as_Register($src2$$reg),
12112               Assembler::ASR,
12113               $src3$$constant & 0x1f);
12114   %}
12115 
12116   ins_pipe(ialu_reg_reg_shift);
12117 %}
12118 
12119 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
12120                          iRegL src1, iRegL src2,
12121                          immI src3, rFlagsReg cr) %{
12122   match(Set dst (XorL src1 (RShiftL src2 src3)));
12123 
12124   ins_cost(1.9 * INSN_COST);
12125   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
12126 
12127   ins_encode %{
12128     __ eor(as_Register($dst$$reg),
12129               as_Register($src1$$reg),
12130               as_Register($src2$$reg),
12131               Assembler::ASR,
12132               $src3$$constant & 0x3f);
12133   %}
12134 
12135   ins_pipe(ialu_reg_reg_shift);
12136 %}
12137 
12138 instruct XorI_reg_LShift_reg(iRegINoSp dst,
12139                          iRegIorL2I src1, iRegIorL2I src2,
12140                          immI src3, rFlagsReg cr) %{
12141   match(Set dst (XorI src1 (LShiftI src2 src3)));
12142 
12143   ins_cost(1.9 * INSN_COST);
12144   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
12145 
12146   ins_encode %{
12147     __ eorw(as_Register($dst$$reg),
12148               as_Register($src1$$reg),
12149               as_Register($src2$$reg),
12150               Assembler::LSL,
12151               $src3$$constant & 0x1f);
12152   %}
12153 
12154   ins_pipe(ialu_reg_reg_shift);
12155 %}
12156 
12157 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
12158                          iRegL src1, iRegL src2,
12159                          immI src3, rFlagsReg cr) %{
12160   match(Set dst (XorL src1 (LShiftL src2 src3)));
12161 
12162   ins_cost(1.9 * INSN_COST);
12163   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
12164 
12165   ins_encode %{
12166     __ eor(as_Register($dst$$reg),
12167               as_Register($src1$$reg),
12168               as_Register($src2$$reg),
12169               Assembler::LSL,
12170               $src3$$constant & 0x3f);
12171   %}
12172 
12173   ins_pipe(ialu_reg_reg_shift);
12174 %}
12175 
12176 instruct OrI_reg_URShift_reg(iRegINoSp dst,
12177                          iRegIorL2I src1, iRegIorL2I src2,
12178                          immI src3, rFlagsReg cr) %{
12179   match(Set dst (OrI src1 (URShiftI src2 src3)));
12180 
12181   ins_cost(1.9 * INSN_COST);
12182   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
12183 
12184   ins_encode %{
12185     __ orrw(as_Register($dst$$reg),
12186               as_Register($src1$$reg),
12187               as_Register($src2$$reg),
12188               Assembler::LSR,
12189               $src3$$constant & 0x1f);
12190   %}
12191 
12192   ins_pipe(ialu_reg_reg_shift);
12193 %}
12194 
12195 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
12196                          iRegL src1, iRegL src2,
12197                          immI src3, rFlagsReg cr) %{
12198   match(Set dst (OrL src1 (URShiftL src2 src3)));
12199 
12200   ins_cost(1.9 * INSN_COST);
12201   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
12202 
12203   ins_encode %{
12204     __ orr(as_Register($dst$$reg),
12205               as_Register($src1$$reg),
12206               as_Register($src2$$reg),
12207               Assembler::LSR,
12208               $src3$$constant & 0x3f);
12209   %}
12210 
12211   ins_pipe(ialu_reg_reg_shift);
12212 %}
12213 
12214 instruct OrI_reg_RShift_reg(iRegINoSp dst,
12215                          iRegIorL2I src1, iRegIorL2I src2,
12216                          immI src3, rFlagsReg cr) %{
12217   match(Set dst (OrI src1 (RShiftI src2 src3)));
12218 
12219   ins_cost(1.9 * INSN_COST);
12220   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
12221 
12222   ins_encode %{
12223     __ orrw(as_Register($dst$$reg),
12224               as_Register($src1$$reg),
12225               as_Register($src2$$reg),
12226               Assembler::ASR,
12227               $src3$$constant & 0x1f);
12228   %}
12229 
12230   ins_pipe(ialu_reg_reg_shift);
12231 %}
12232 
12233 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
12234                          iRegL src1, iRegL src2,
12235                          immI src3, rFlagsReg cr) %{
12236   match(Set dst (OrL src1 (RShiftL src2 src3)));
12237 
12238   ins_cost(1.9 * INSN_COST);
12239   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
12240 
12241   ins_encode %{
12242     __ orr(as_Register($dst$$reg),
12243               as_Register($src1$$reg),
12244               as_Register($src2$$reg),
12245               Assembler::ASR,
12246               $src3$$constant & 0x3f);
12247   %}
12248 
12249   ins_pipe(ialu_reg_reg_shift);
12250 %}
12251 
12252 instruct OrI_reg_LShift_reg(iRegINoSp dst,
12253                          iRegIorL2I src1, iRegIorL2I src2,
12254                          immI src3, rFlagsReg cr) %{
12255   match(Set dst (OrI src1 (LShiftI src2 src3)));
12256 
12257   ins_cost(1.9 * INSN_COST);
12258   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
12259 
12260   ins_encode %{
12261     __ orrw(as_Register($dst$$reg),
12262               as_Register($src1$$reg),
12263               as_Register($src2$$reg),
12264               Assembler::LSL,
12265               $src3$$constant & 0x1f);
12266   %}
12267 
12268   ins_pipe(ialu_reg_reg_shift);
12269 %}
12270 
12271 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
12272                          iRegL src1, iRegL src2,
12273                          immI src3, rFlagsReg cr) %{
12274   match(Set dst (OrL src1 (LShiftL src2 src3)));
12275 
12276   ins_cost(1.9 * INSN_COST);
12277   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
12278 
12279   ins_encode %{
12280     __ orr(as_Register($dst$$reg),
12281               as_Register($src1$$reg),
12282               as_Register($src2$$reg),
12283               Assembler::LSL,
12284               $src3$$constant & 0x3f);
12285   %}
12286 
12287   ins_pipe(ialu_reg_reg_shift);
12288 %}
12289 
12290 instruct AddI_reg_URShift_reg(iRegINoSp dst,
12291                          iRegIorL2I src1, iRegIorL2I src2,
12292                          immI src3, rFlagsReg cr) %{
12293   match(Set dst (AddI src1 (URShiftI src2 src3)));
12294 
12295   ins_cost(1.9 * INSN_COST);
12296   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
12297 
12298   ins_encode %{
12299     __ addw(as_Register($dst$$reg),
12300               as_Register($src1$$reg),
12301               as_Register($src2$$reg),
12302               Assembler::LSR,
12303               $src3$$constant & 0x1f);
12304   %}
12305 
12306   ins_pipe(ialu_reg_reg_shift);
12307 %}
12308 
12309 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
12310                          iRegL src1, iRegL src2,
12311                          immI src3, rFlagsReg cr) %{
12312   match(Set dst (AddL src1 (URShiftL src2 src3)));
12313 
12314   ins_cost(1.9 * INSN_COST);
12315   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
12316 
12317   ins_encode %{
12318     __ add(as_Register($dst$$reg),
12319               as_Register($src1$$reg),
12320               as_Register($src2$$reg),
12321               Assembler::LSR,
12322               $src3$$constant & 0x3f);
12323   %}
12324 
12325   ins_pipe(ialu_reg_reg_shift);
12326 %}
12327 
12328 instruct AddI_reg_RShift_reg(iRegINoSp dst,
12329                          iRegIorL2I src1, iRegIorL2I src2,
12330                          immI src3, rFlagsReg cr) %{
12331   match(Set dst (AddI src1 (RShiftI src2 src3)));
12332 
12333   ins_cost(1.9 * INSN_COST);
12334   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
12335 
12336   ins_encode %{
12337     __ addw(as_Register($dst$$reg),
12338               as_Register($src1$$reg),
12339               as_Register($src2$$reg),
12340               Assembler::ASR,
12341               $src3$$constant & 0x1f);
12342   %}
12343 
12344   ins_pipe(ialu_reg_reg_shift);
12345 %}
12346 
12347 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
12348                          iRegL src1, iRegL src2,
12349                          immI src3, rFlagsReg cr) %{
12350   match(Set dst (AddL src1 (RShiftL src2 src3)));
12351 
12352   ins_cost(1.9 * INSN_COST);
12353   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
12354 
12355   ins_encode %{
12356     __ add(as_Register($dst$$reg),
12357               as_Register($src1$$reg),
12358               as_Register($src2$$reg),
12359               Assembler::ASR,
12360               $src3$$constant & 0x3f);
12361   %}
12362 
12363   ins_pipe(ialu_reg_reg_shift);
12364 %}
12365 
12366 instruct AddI_reg_LShift_reg(iRegINoSp dst,
12367                          iRegIorL2I src1, iRegIorL2I src2,
12368                          immI src3, rFlagsReg cr) %{
12369   match(Set dst (AddI src1 (LShiftI src2 src3)));
12370 
12371   ins_cost(1.9 * INSN_COST);
12372   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
12373 
12374   ins_encode %{
12375     __ addw(as_Register($dst$$reg),
12376               as_Register($src1$$reg),
12377               as_Register($src2$$reg),
12378               Assembler::LSL,
12379               $src3$$constant & 0x1f);
12380   %}
12381 
12382   ins_pipe(ialu_reg_reg_shift);
12383 %}
12384 
12385 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
12386                          iRegL src1, iRegL src2,
12387                          immI src3, rFlagsReg cr) %{
12388   match(Set dst (AddL src1 (LShiftL src2 src3)));
12389 
12390   ins_cost(1.9 * INSN_COST);
12391   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
12392 
12393   ins_encode %{
12394     __ add(as_Register($dst$$reg),
12395               as_Register($src1$$reg),
12396               as_Register($src2$$reg),
12397               Assembler::LSL,
12398               $src3$$constant & 0x3f);
12399   %}
12400 
12401   ins_pipe(ialu_reg_reg_shift);
12402 %}
12403 
12404 instruct SubI_reg_URShift_reg(iRegINoSp dst,
12405                          iRegIorL2I src1, iRegIorL2I src2,
12406                          immI src3, rFlagsReg cr) %{
12407   match(Set dst (SubI src1 (URShiftI src2 src3)));
12408 
12409   ins_cost(1.9 * INSN_COST);
12410   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
12411 
12412   ins_encode %{
12413     __ subw(as_Register($dst$$reg),
12414               as_Register($src1$$reg),
12415               as_Register($src2$$reg),
12416               Assembler::LSR,
12417               $src3$$constant & 0x1f);
12418   %}
12419 
12420   ins_pipe(ialu_reg_reg_shift);
12421 %}
12422 
12423 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
12424                          iRegL src1, iRegL src2,
12425                          immI src3, rFlagsReg cr) %{
12426   match(Set dst (SubL src1 (URShiftL src2 src3)));
12427 
12428   ins_cost(1.9 * INSN_COST);
12429   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
12430 
12431   ins_encode %{
12432     __ sub(as_Register($dst$$reg),
12433               as_Register($src1$$reg),
12434               as_Register($src2$$reg),
12435               Assembler::LSR,
12436               $src3$$constant & 0x3f);
12437   %}
12438 
12439   ins_pipe(ialu_reg_reg_shift);
12440 %}
12441 
12442 instruct SubI_reg_RShift_reg(iRegINoSp dst,
12443                          iRegIorL2I src1, iRegIorL2I src2,
12444                          immI src3, rFlagsReg cr) %{
12445   match(Set dst (SubI src1 (RShiftI src2 src3)));
12446 
12447   ins_cost(1.9 * INSN_COST);
12448   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
12449 
12450   ins_encode %{
12451     __ subw(as_Register($dst$$reg),
12452               as_Register($src1$$reg),
12453               as_Register($src2$$reg),
12454               Assembler::ASR,
12455               $src3$$constant & 0x1f);
12456   %}
12457 
12458   ins_pipe(ialu_reg_reg_shift);
12459 %}
12460 
12461 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
12462                          iRegL src1, iRegL src2,
12463                          immI src3, rFlagsReg cr) %{
12464   match(Set dst (SubL src1 (RShiftL src2 src3)));
12465 
12466   ins_cost(1.9 * INSN_COST);
12467   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
12468 
12469   ins_encode %{
12470     __ sub(as_Register($dst$$reg),
12471               as_Register($src1$$reg),
12472               as_Register($src2$$reg),
12473               Assembler::ASR,
12474               $src3$$constant & 0x3f);
12475   %}
12476 
12477   ins_pipe(ialu_reg_reg_shift);
12478 %}
12479 
12480 instruct SubI_reg_LShift_reg(iRegINoSp dst,
12481                          iRegIorL2I src1, iRegIorL2I src2,
12482                          immI src3, rFlagsReg cr) %{
12483   match(Set dst (SubI src1 (LShiftI src2 src3)));
12484 
12485   ins_cost(1.9 * INSN_COST);
12486   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
12487 
12488   ins_encode %{
12489     __ subw(as_Register($dst$$reg),
12490               as_Register($src1$$reg),
12491               as_Register($src2$$reg),
12492               Assembler::LSL,
12493               $src3$$constant & 0x1f);
12494   %}
12495 
12496   ins_pipe(ialu_reg_reg_shift);
12497 %}
12498 
12499 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
12500                          iRegL src1, iRegL src2,
12501                          immI src3, rFlagsReg cr) %{
12502   match(Set dst (SubL src1 (LShiftL src2 src3)));
12503 
12504   ins_cost(1.9 * INSN_COST);
12505   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
12506 
12507   ins_encode %{
12508     __ sub(as_Register($dst$$reg),
12509               as_Register($src1$$reg),
12510               as_Register($src2$$reg),
12511               Assembler::LSL,
12512               $src3$$constant & 0x3f);
12513   %}
12514 
12515   ins_pipe(ialu_reg_reg_shift);
12516 %}
12517 
12518 
12519 
12520 // Shift Left followed by Shift Right.
12521 // This idiom is used by the compiler for the i2b bytecode etc.
12522 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12523 %{
12524   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
12525   // Make sure we are not going to exceed what sbfm can do.
12526   predicate((unsigned int)n->in(2)->get_int() <= 63
12527             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
12528 
12529   ins_cost(INSN_COST * 2);
12530   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12531   ins_encode %{
12532     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12533     int s = 63 - lshift;
12534     int r = (rshift - lshift) & 63;
12535     __ sbfm(as_Register($dst$$reg),
12536             as_Register($src$$reg),
12537             r, s);
12538   %}
12539 
12540   ins_pipe(ialu_reg_shift);
12541 %}
12542 
12543 // Shift Left followed by Shift Right.
12544 // This idiom is used by the compiler for the i2b bytecode etc.
12545 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12546 %{
12547   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
12548   // Make sure we are not going to exceed what sbfmw can do.
12549   predicate((unsigned int)n->in(2)->get_int() <= 31
12550             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
12551 
12552   ins_cost(INSN_COST * 2);
12553   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12554   ins_encode %{
12555     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12556     int s = 31 - lshift;
12557     int r = (rshift - lshift) & 31;
12558     __ sbfmw(as_Register($dst$$reg),
12559             as_Register($src$$reg),
12560             r, s);
12561   %}
12562 
12563   ins_pipe(ialu_reg_shift);
12564 %}
12565 
12566 // Shift Left followed by Shift Right.
12567 // This idiom is used by the compiler for the i2b bytecode etc.
12568 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12569 %{
12570   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
12571   // Make sure we are not going to exceed what ubfm can do.
12572   predicate((unsigned int)n->in(2)->get_int() <= 63
12573             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
12574 
12575   ins_cost(INSN_COST * 2);
12576   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12577   ins_encode %{
12578     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12579     int s = 63 - lshift;
12580     int r = (rshift - lshift) & 63;
12581     __ ubfm(as_Register($dst$$reg),
12582             as_Register($src$$reg),
12583             r, s);
12584   %}
12585 
12586   ins_pipe(ialu_reg_shift);
12587 %}
12588 
12589 // Shift Left followed by Shift Right.
12590 // This idiom is used by the compiler for the i2b bytecode etc.
12591 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12592 %{
12593   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
12594   // Make sure we are not going to exceed what ubfmw can do.
12595   predicate((unsigned int)n->in(2)->get_int() <= 31
12596             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
12597 
12598   ins_cost(INSN_COST * 2);
12599   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12600   ins_encode %{
12601     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12602     int s = 31 - lshift;
12603     int r = (rshift - lshift) & 31;
12604     __ ubfmw(as_Register($dst$$reg),
12605             as_Register($src$$reg),
12606             r, s);
12607   %}
12608 
12609   ins_pipe(ialu_reg_shift);
12610 %}
12611 // Bitfield extract with shift & mask
12612 
12613 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12614 %{
12615   match(Set dst (AndI (URShiftI src rshift) mask));
12616 
12617   ins_cost(INSN_COST);
12618   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
12619   ins_encode %{
12620     int rshift = $rshift$$constant;
12621     long mask = $mask$$constant;
12622     int width = exact_log2(mask+1);
12623     __ ubfxw(as_Register($dst$$reg),
12624             as_Register($src$$reg), rshift, width);
12625   %}
12626   ins_pipe(ialu_reg_shift);
12627 %}
12628 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
12629 %{
12630   match(Set dst (AndL (URShiftL src rshift) mask));
12631 
12632   ins_cost(INSN_COST);
12633   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12634   ins_encode %{
12635     int rshift = $rshift$$constant;
12636     long mask = $mask$$constant;
12637     int width = exact_log2(mask+1);
12638     __ ubfx(as_Register($dst$$reg),
12639             as_Register($src$$reg), rshift, width);
12640   %}
12641   ins_pipe(ialu_reg_shift);
12642 %}
12643 
12644 // We can use ubfx when extending an And with a mask when we know mask
12645 // is positive.  We know that because immI_bitmask guarantees it.
12646 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12647 %{
12648   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
12649 
12650   ins_cost(INSN_COST * 2);
12651   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12652   ins_encode %{
12653     int rshift = $rshift$$constant;
12654     long mask = $mask$$constant;
12655     int width = exact_log2(mask+1);
12656     __ ubfx(as_Register($dst$$reg),
12657             as_Register($src$$reg), rshift, width);
12658   %}
12659   ins_pipe(ialu_reg_shift);
12660 %}
12661 
12662 // We can use ubfiz when masking by a positive number and then left shifting the result.
12663 // We know that the mask is positive because immI_bitmask guarantees it.
12664 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12665 %{
12666   match(Set dst (LShiftI (AndI src mask) lshift));
12667   predicate((unsigned int)n->in(2)->get_int() <= 31 &&
12668     (exact_log2(n->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= (31+1));
12669 
12670   ins_cost(INSN_COST);
12671   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12672   ins_encode %{
12673     int lshift = $lshift$$constant;
12674     long mask = $mask$$constant;
12675     int width = exact_log2(mask+1);
12676     __ ubfizw(as_Register($dst$$reg),
12677           as_Register($src$$reg), lshift, width);
12678   %}
12679   ins_pipe(ialu_reg_shift);
12680 %}
12681 // We can use ubfiz when masking by a positive number and then left shifting the result.
12682 // We know that the mask is positive because immL_bitmask guarantees it.
12683 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
12684 %{
12685   match(Set dst (LShiftL (AndL src mask) lshift));
12686   predicate((unsigned int)n->in(2)->get_int() <= 63 &&
12687     (exact_log2_long(n->in(1)->in(2)->get_long()+1) + (unsigned int)n->in(2)->get_int()) <= (63+1));
12688 
12689   ins_cost(INSN_COST);
12690   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12691   ins_encode %{
12692     int lshift = $lshift$$constant;
12693     long mask = $mask$$constant;
12694     int width = exact_log2(mask+1);
12695     __ ubfiz(as_Register($dst$$reg),
12696           as_Register($src$$reg), lshift, width);
12697   %}
12698   ins_pipe(ialu_reg_shift);
12699 %}
12700 
12701 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
12702 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12703 %{
12704   match(Set dst (LShiftL (ConvI2L(AndI src mask)) lshift));
12705   predicate((unsigned int)n->in(2)->get_int() <= 31 &&
12706     (exact_log2((unsigned int)n->in(1)->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= 32);
12707 
12708   ins_cost(INSN_COST);
12709   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12710   ins_encode %{
12711     int lshift = $lshift$$constant;
12712     long mask = $mask$$constant;
12713     int width = exact_log2(mask+1);
12714     __ ubfiz(as_Register($dst$$reg),
12715              as_Register($src$$reg), lshift, width);
12716   %}
12717   ins_pipe(ialu_reg_shift);
12718 %}
12719 
12720 // Rotations
12721 
12722 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12723 %{
12724   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12725   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
12726 
12727   ins_cost(INSN_COST);
12728   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12729 
12730   ins_encode %{
12731     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12732             $rshift$$constant & 63);
12733   %}
12734   ins_pipe(ialu_reg_reg_extr);
12735 %}
12736 
12737 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12738 %{
12739   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12740   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
12741 
12742   ins_cost(INSN_COST);
12743   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12744 
12745   ins_encode %{
12746     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12747             $rshift$$constant & 31);
12748   %}
12749   ins_pipe(ialu_reg_reg_extr);
12750 %}
12751 
12752 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12753 %{
12754   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12755   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
12756 
12757   ins_cost(INSN_COST);
12758   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12759 
12760   ins_encode %{
12761     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12762             $rshift$$constant & 63);
12763   %}
12764   ins_pipe(ialu_reg_reg_extr);
12765 %}
12766 
12767 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12768 %{
12769   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12770   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
12771 
12772   ins_cost(INSN_COST);
12773   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12774 
12775   ins_encode %{
12776     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12777             $rshift$$constant & 31);
12778   %}
12779   ins_pipe(ialu_reg_reg_extr);
12780 %}
12781 
12782 
12783 // rol expander
12784 
12785 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12786 %{
12787   effect(DEF dst, USE src, USE shift);
12788 
12789   format %{ "rol    $dst, $src, $shift" %}
12790   ins_cost(INSN_COST * 3);
12791   ins_encode %{
12792     __ subw(rscratch1, zr, as_Register($shift$$reg));
12793     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12794             rscratch1);
12795     %}
12796   ins_pipe(ialu_reg_reg_vshift);
12797 %}
12798 
12799 // rol expander
12800 
12801 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12802 %{
12803   effect(DEF dst, USE src, USE shift);
12804 
12805   format %{ "rol    $dst, $src, $shift" %}
12806   ins_cost(INSN_COST * 3);
12807   ins_encode %{
12808     __ subw(rscratch1, zr, as_Register($shift$$reg));
12809     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12810             rscratch1);
12811     %}
12812   ins_pipe(ialu_reg_reg_vshift);
12813 %}
12814 
12815 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12816 %{
12817   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
12818 
12819   expand %{
12820     rolL_rReg(dst, src, shift, cr);
12821   %}
12822 %}
12823 
12824 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12825 %{
12826   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
12827 
12828   expand %{
12829     rolL_rReg(dst, src, shift, cr);
12830   %}
12831 %}
12832 
12833 instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12834 %{
12835   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
12836 
12837   expand %{
12838     rolI_rReg(dst, src, shift, cr);
12839   %}
12840 %}
12841 
12842 instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12843 %{
12844   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
12845 
12846   expand %{
12847     rolI_rReg(dst, src, shift, cr);
12848   %}
12849 %}
12850 
12851 // ror expander
12852 
12853 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12854 %{
12855   effect(DEF dst, USE src, USE shift);
12856 
12857   format %{ "ror    $dst, $src, $shift" %}
12858   ins_cost(INSN_COST);
12859   ins_encode %{
12860     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12861             as_Register($shift$$reg));
12862     %}
12863   ins_pipe(ialu_reg_reg_vshift);
12864 %}
12865 
12866 // ror expander
12867 
12868 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12869 %{
12870   effect(DEF dst, USE src, USE shift);
12871 
12872   format %{ "ror    $dst, $src, $shift" %}
12873   ins_cost(INSN_COST);
12874   ins_encode %{
12875     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12876             as_Register($shift$$reg));
12877     %}
12878   ins_pipe(ialu_reg_reg_vshift);
12879 %}
12880 
12881 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12882 %{
12883   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
12884 
12885   expand %{
12886     rorL_rReg(dst, src, shift, cr);
12887   %}
12888 %}
12889 
12890 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12891 %{
12892   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
12893 
12894   expand %{
12895     rorL_rReg(dst, src, shift, cr);
12896   %}
12897 %}
12898 
12899 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12900 %{
12901   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
12902 
12903   expand %{
12904     rorI_rReg(dst, src, shift, cr);
12905   %}
12906 %}
12907 
12908 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12909 %{
12910   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
12911 
12912   expand %{
12913     rorI_rReg(dst, src, shift, cr);
12914   %}
12915 %}
12916 
12917 // Add/subtract (extended)
12918 
12919 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12920 %{
12921   match(Set dst (AddL src1 (ConvI2L src2)));
12922   ins_cost(INSN_COST);
12923   format %{ "add  $dst, $src1, $src2, sxtw" %}
12924 
12925    ins_encode %{
12926      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12927             as_Register($src2$$reg), ext::sxtw);
12928    %}
12929   ins_pipe(ialu_reg_reg);
12930 %};
12931 
12932 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12933 %{
12934   match(Set dst (SubL src1 (ConvI2L src2)));
12935   ins_cost(INSN_COST);
12936   format %{ "sub  $dst, $src1, $src2, sxtw" %}
12937 
12938    ins_encode %{
12939      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12940             as_Register($src2$$reg), ext::sxtw);
12941    %}
12942   ins_pipe(ialu_reg_reg);
12943 %};
12944 
12945 
12946 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12947 %{
12948   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12949   ins_cost(INSN_COST);
12950   format %{ "add  $dst, $src1, $src2, sxth" %}
12951 
12952    ins_encode %{
12953      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12954             as_Register($src2$$reg), ext::sxth);
12955    %}
12956   ins_pipe(ialu_reg_reg);
12957 %}
12958 
12959 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12960 %{
12961   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12962   ins_cost(INSN_COST);
12963   format %{ "add  $dst, $src1, $src2, sxtb" %}
12964 
12965    ins_encode %{
12966      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12967             as_Register($src2$$reg), ext::sxtb);
12968    %}
12969   ins_pipe(ialu_reg_reg);
12970 %}
12971 
12972 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12973 %{
12974   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12975   ins_cost(INSN_COST);
12976   format %{ "add  $dst, $src1, $src2, uxtb" %}
12977 
12978    ins_encode %{
12979      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12980             as_Register($src2$$reg), ext::uxtb);
12981    %}
12982   ins_pipe(ialu_reg_reg);
12983 %}
12984 
12985 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12986 %{
12987   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12988   ins_cost(INSN_COST);
12989   format %{ "add  $dst, $src1, $src2, sxth" %}
12990 
12991    ins_encode %{
12992      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12993             as_Register($src2$$reg), ext::sxth);
12994    %}
12995   ins_pipe(ialu_reg_reg);
12996 %}
12997 
12998 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12999 %{
13000   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
13001   ins_cost(INSN_COST);
13002   format %{ "add  $dst, $src1, $src2, sxtw" %}
13003 
13004    ins_encode %{
13005      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13006             as_Register($src2$$reg), ext::sxtw);
13007    %}
13008   ins_pipe(ialu_reg_reg);
13009 %}
13010 
13011 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
13012 %{
13013   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
13014   ins_cost(INSN_COST);
13015   format %{ "add  $dst, $src1, $src2, sxtb" %}
13016 
13017    ins_encode %{
13018      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13019             as_Register($src2$$reg), ext::sxtb);
13020    %}
13021   ins_pipe(ialu_reg_reg);
13022 %}
13023 
13024 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
13025 %{
13026   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
13027   ins_cost(INSN_COST);
13028   format %{ "add  $dst, $src1, $src2, uxtb" %}
13029 
13030    ins_encode %{
13031      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13032             as_Register($src2$$reg), ext::uxtb);
13033    %}
13034   ins_pipe(ialu_reg_reg);
13035 %}
13036 
13037 
13038 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
13039 %{
13040   match(Set dst (AddI src1 (AndI src2 mask)));
13041   ins_cost(INSN_COST);
13042   format %{ "addw  $dst, $src1, $src2, uxtb" %}
13043 
13044    ins_encode %{
13045      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13046             as_Register($src2$$reg), ext::uxtb);
13047    %}
13048   ins_pipe(ialu_reg_reg);
13049 %}
13050 
13051 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
13052 %{
13053   match(Set dst (AddI src1 (AndI src2 mask)));
13054   ins_cost(INSN_COST);
13055   format %{ "addw  $dst, $src1, $src2, uxth" %}
13056 
13057    ins_encode %{
13058      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13059             as_Register($src2$$reg), ext::uxth);
13060    %}
13061   ins_pipe(ialu_reg_reg);
13062 %}
13063 
13064 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
13065 %{
13066   match(Set dst (AddL src1 (AndL src2 mask)));
13067   ins_cost(INSN_COST);
13068   format %{ "add  $dst, $src1, $src2, uxtb" %}
13069 
13070    ins_encode %{
13071      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13072             as_Register($src2$$reg), ext::uxtb);
13073    %}
13074   ins_pipe(ialu_reg_reg);
13075 %}
13076 
13077 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13078 %{
13079   match(Set dst (AddL src1 (AndL src2 mask)));
13080   ins_cost(INSN_COST);
13081   format %{ "add  $dst, $src1, $src2, uxth" %}
13082 
13083    ins_encode %{
13084      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13085             as_Register($src2$$reg), ext::uxth);
13086    %}
13087   ins_pipe(ialu_reg_reg);
13088 %}
13089 
13090 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13091 %{
13092   match(Set dst (AddL src1 (AndL src2 mask)));
13093   ins_cost(INSN_COST);
13094   format %{ "add  $dst, $src1, $src2, uxtw" %}
13095 
13096    ins_encode %{
13097      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13098             as_Register($src2$$reg), ext::uxtw);
13099    %}
13100   ins_pipe(ialu_reg_reg);
13101 %}
13102 
13103 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
13104 %{
13105   match(Set dst (SubI src1 (AndI src2 mask)));
13106   ins_cost(INSN_COST);
13107   format %{ "subw  $dst, $src1, $src2, uxtb" %}
13108 
13109    ins_encode %{
13110      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13111             as_Register($src2$$reg), ext::uxtb);
13112    %}
13113   ins_pipe(ialu_reg_reg);
13114 %}
13115 
13116 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
13117 %{
13118   match(Set dst (SubI src1 (AndI src2 mask)));
13119   ins_cost(INSN_COST);
13120   format %{ "subw  $dst, $src1, $src2, uxth" %}
13121 
13122    ins_encode %{
13123      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13124             as_Register($src2$$reg), ext::uxth);
13125    %}
13126   ins_pipe(ialu_reg_reg);
13127 %}
13128 
13129 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
13130 %{
13131   match(Set dst (SubL src1 (AndL src2 mask)));
13132   ins_cost(INSN_COST);
13133   format %{ "sub  $dst, $src1, $src2, uxtb" %}
13134 
13135    ins_encode %{
13136      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13137             as_Register($src2$$reg), ext::uxtb);
13138    %}
13139   ins_pipe(ialu_reg_reg);
13140 %}
13141 
13142 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13143 %{
13144   match(Set dst (SubL src1 (AndL src2 mask)));
13145   ins_cost(INSN_COST);
13146   format %{ "sub  $dst, $src1, $src2, uxth" %}
13147 
13148    ins_encode %{
13149      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13150             as_Register($src2$$reg), ext::uxth);
13151    %}
13152   ins_pipe(ialu_reg_reg);
13153 %}
13154 
13155 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13156 %{
13157   match(Set dst (SubL src1 (AndL src2 mask)));
13158   ins_cost(INSN_COST);
13159   format %{ "sub  $dst, $src1, $src2, uxtw" %}
13160 
13161    ins_encode %{
13162      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13163             as_Register($src2$$reg), ext::uxtw);
13164    %}
13165   ins_pipe(ialu_reg_reg);
13166 %}
13167 
13168 
13169 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13170 %{
13171   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13172   ins_cost(1.9 * INSN_COST);
13173   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
13174 
13175    ins_encode %{
13176      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13177             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13178    %}
13179   ins_pipe(ialu_reg_reg_shift);
13180 %}
13181 
13182 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13183 %{
13184   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13185   ins_cost(1.9 * INSN_COST);
13186   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
13187 
13188    ins_encode %{
13189      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13190             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13191    %}
13192   ins_pipe(ialu_reg_reg_shift);
13193 %}
13194 
13195 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13196 %{
13197   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13198   ins_cost(1.9 * INSN_COST);
13199   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
13200 
13201    ins_encode %{
13202      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13203             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13204    %}
13205   ins_pipe(ialu_reg_reg_shift);
13206 %}
13207 
13208 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13209 %{
13210   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13211   ins_cost(1.9 * INSN_COST);
13212   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
13213 
13214    ins_encode %{
13215      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13216             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13217    %}
13218   ins_pipe(ialu_reg_reg_shift);
13219 %}
13220 
13221 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13222 %{
13223   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13224   ins_cost(1.9 * INSN_COST);
13225   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
13226 
13227    ins_encode %{
13228      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13229             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13230    %}
13231   ins_pipe(ialu_reg_reg_shift);
13232 %}
13233 
13234 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13235 %{
13236   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13237   ins_cost(1.9 * INSN_COST);
13238   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
13239 
13240    ins_encode %{
13241      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13242             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13243    %}
13244   ins_pipe(ialu_reg_reg_shift);
13245 %}
13246 
13247 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13248 %{
13249   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13250   ins_cost(1.9 * INSN_COST);
13251   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
13252 
13253    ins_encode %{
13254      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13255             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13256    %}
13257   ins_pipe(ialu_reg_reg_shift);
13258 %}
13259 
13260 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13261 %{
13262   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13263   ins_cost(1.9 * INSN_COST);
13264   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
13265 
13266    ins_encode %{
13267      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13268             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13269    %}
13270   ins_pipe(ialu_reg_reg_shift);
13271 %}
13272 
13273 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13274 %{
13275   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13276   ins_cost(1.9 * INSN_COST);
13277   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
13278 
13279    ins_encode %{
13280      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13281             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13282    %}
13283   ins_pipe(ialu_reg_reg_shift);
13284 %}
13285 
13286 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13287 %{
13288   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13289   ins_cost(1.9 * INSN_COST);
13290   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
13291 
13292    ins_encode %{
13293      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13294             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13295    %}
13296   ins_pipe(ialu_reg_reg_shift);
13297 %}
13298 
13299 
13300 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13301 %{
13302   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
13303   ins_cost(1.9 * INSN_COST);
13304   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
13305 
13306    ins_encode %{
13307      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13308             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13309    %}
13310   ins_pipe(ialu_reg_reg_shift);
13311 %};
13312 
13313 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13314 %{
13315   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
13316   ins_cost(1.9 * INSN_COST);
13317   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
13318 
13319    ins_encode %{
13320      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13321             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13322    %}
13323   ins_pipe(ialu_reg_reg_shift);
13324 %};
13325 
13326 
13327 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13328 %{
13329   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13330   ins_cost(1.9 * INSN_COST);
13331   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
13332 
13333    ins_encode %{
13334      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13335             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13336    %}
13337   ins_pipe(ialu_reg_reg_shift);
13338 %}
13339 
13340 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13341 %{
13342   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13343   ins_cost(1.9 * INSN_COST);
13344   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
13345 
13346    ins_encode %{
13347      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13348             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13349    %}
13350   ins_pipe(ialu_reg_reg_shift);
13351 %}
13352 
13353 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13354 %{
13355   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13356   ins_cost(1.9 * INSN_COST);
13357   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
13358 
13359    ins_encode %{
13360      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13361             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13362    %}
13363   ins_pipe(ialu_reg_reg_shift);
13364 %}
13365 
13366 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13367 %{
13368   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13369   ins_cost(1.9 * INSN_COST);
13370   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
13371 
13372    ins_encode %{
13373      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13374             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13375    %}
13376   ins_pipe(ialu_reg_reg_shift);
13377 %}
13378 
13379 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13380 %{
13381   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13382   ins_cost(1.9 * INSN_COST);
13383   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
13384 
13385    ins_encode %{
13386      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13387             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13388    %}
13389   ins_pipe(ialu_reg_reg_shift);
13390 %}
13391 
13392 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13393 %{
13394   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13395   ins_cost(1.9 * INSN_COST);
13396   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
13397 
13398    ins_encode %{
13399      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13400             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13401    %}
13402   ins_pipe(ialu_reg_reg_shift);
13403 %}
13404 
13405 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13406 %{
13407   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13408   ins_cost(1.9 * INSN_COST);
13409   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
13410 
13411    ins_encode %{
13412      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13413             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13414    %}
13415   ins_pipe(ialu_reg_reg_shift);
13416 %}
13417 
13418 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13419 %{
13420   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13421   ins_cost(1.9 * INSN_COST);
13422   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
13423 
13424    ins_encode %{
13425      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13426             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13427    %}
13428   ins_pipe(ialu_reg_reg_shift);
13429 %}
13430 
13431 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13432 %{
13433   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13434   ins_cost(1.9 * INSN_COST);
13435   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
13436 
13437    ins_encode %{
13438      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13439             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13440    %}
13441   ins_pipe(ialu_reg_reg_shift);
13442 %}
13443 
13444 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13445 %{
13446   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13447   ins_cost(1.9 * INSN_COST);
13448   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
13449 
13450    ins_encode %{
13451      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13452             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13453    %}
13454   ins_pipe(ialu_reg_reg_shift);
13455 %}
13456 // END This section of the file is automatically generated. Do not edit --------------
13457 
13458 // ============================================================================
13459 // Floating Point Arithmetic Instructions
13460 
13461 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13462   match(Set dst (AddF src1 src2));
13463 
13464   ins_cost(INSN_COST * 5);
13465   format %{ "fadds   $dst, $src1, $src2" %}
13466 
13467   ins_encode %{
13468     __ fadds(as_FloatRegister($dst$$reg),
13469              as_FloatRegister($src1$$reg),
13470              as_FloatRegister($src2$$reg));
13471   %}
13472 
13473   ins_pipe(fp_dop_reg_reg_s);
13474 %}
13475 
13476 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13477   match(Set dst (AddD src1 src2));
13478 
13479   ins_cost(INSN_COST * 5);
13480   format %{ "faddd   $dst, $src1, $src2" %}
13481 
13482   ins_encode %{
13483     __ faddd(as_FloatRegister($dst$$reg),
13484              as_FloatRegister($src1$$reg),
13485              as_FloatRegister($src2$$reg));
13486   %}
13487 
13488   ins_pipe(fp_dop_reg_reg_d);
13489 %}
13490 
13491 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13492   match(Set dst (SubF src1 src2));
13493 
13494   ins_cost(INSN_COST * 5);
13495   format %{ "fsubs   $dst, $src1, $src2" %}
13496 
13497   ins_encode %{
13498     __ fsubs(as_FloatRegister($dst$$reg),
13499              as_FloatRegister($src1$$reg),
13500              as_FloatRegister($src2$$reg));
13501   %}
13502 
13503   ins_pipe(fp_dop_reg_reg_s);
13504 %}
13505 
13506 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13507   match(Set dst (SubD src1 src2));
13508 
13509   ins_cost(INSN_COST * 5);
13510   format %{ "fsubd   $dst, $src1, $src2" %}
13511 
13512   ins_encode %{
13513     __ fsubd(as_FloatRegister($dst$$reg),
13514              as_FloatRegister($src1$$reg),
13515              as_FloatRegister($src2$$reg));
13516   %}
13517 
13518   ins_pipe(fp_dop_reg_reg_d);
13519 %}
13520 
13521 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13522   match(Set dst (MulF src1 src2));
13523 
13524   ins_cost(INSN_COST * 6);
13525   format %{ "fmuls   $dst, $src1, $src2" %}
13526 
13527   ins_encode %{
13528     __ fmuls(as_FloatRegister($dst$$reg),
13529              as_FloatRegister($src1$$reg),
13530              as_FloatRegister($src2$$reg));
13531   %}
13532 
13533   ins_pipe(fp_dop_reg_reg_s);
13534 %}
13535 
13536 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13537   match(Set dst (MulD src1 src2));
13538 
13539   ins_cost(INSN_COST * 6);
13540   format %{ "fmuld   $dst, $src1, $src2" %}
13541 
13542   ins_encode %{
13543     __ fmuld(as_FloatRegister($dst$$reg),
13544              as_FloatRegister($src1$$reg),
13545              as_FloatRegister($src2$$reg));
13546   %}
13547 
13548   ins_pipe(fp_dop_reg_reg_d);
13549 %}
13550 
13551 // src1 * src2 + src3
13552 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13553   predicate(UseFMA);
13554   match(Set dst (FmaF src3 (Binary src1 src2)));
13555 
13556   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
13557 
13558   ins_encode %{
13559     __ fmadds(as_FloatRegister($dst$$reg),
13560              as_FloatRegister($src1$$reg),
13561              as_FloatRegister($src2$$reg),
13562              as_FloatRegister($src3$$reg));
13563   %}
13564 
13565   ins_pipe(pipe_class_default);
13566 %}
13567 
13568 // src1 * src2 + src3
13569 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13570   predicate(UseFMA);
13571   match(Set dst (FmaD src3 (Binary src1 src2)));
13572 
13573   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
13574 
13575   ins_encode %{
13576     __ fmaddd(as_FloatRegister($dst$$reg),
13577              as_FloatRegister($src1$$reg),
13578              as_FloatRegister($src2$$reg),
13579              as_FloatRegister($src3$$reg));
13580   %}
13581 
13582   ins_pipe(pipe_class_default);
13583 %}
13584 
13585 // -src1 * src2 + src3
13586 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13587   predicate(UseFMA);
13588   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
13589   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
13590 
13591   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
13592 
13593   ins_encode %{
13594     __ fmsubs(as_FloatRegister($dst$$reg),
13595               as_FloatRegister($src1$$reg),
13596               as_FloatRegister($src2$$reg),
13597               as_FloatRegister($src3$$reg));
13598   %}
13599 
13600   ins_pipe(pipe_class_default);
13601 %}
13602 
13603 // -src1 * src2 + src3
13604 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13605   predicate(UseFMA);
13606   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
13607   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
13608 
13609   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
13610 
13611   ins_encode %{
13612     __ fmsubd(as_FloatRegister($dst$$reg),
13613               as_FloatRegister($src1$$reg),
13614               as_FloatRegister($src2$$reg),
13615               as_FloatRegister($src3$$reg));
13616   %}
13617 
13618   ins_pipe(pipe_class_default);
13619 %}
13620 
13621 // -src1 * src2 - src3
13622 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13623   predicate(UseFMA);
13624   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
13625   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
13626 
13627   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
13628 
13629   ins_encode %{
13630     __ fnmadds(as_FloatRegister($dst$$reg),
13631                as_FloatRegister($src1$$reg),
13632                as_FloatRegister($src2$$reg),
13633                as_FloatRegister($src3$$reg));
13634   %}
13635 
13636   ins_pipe(pipe_class_default);
13637 %}
13638 
13639 // -src1 * src2 - src3
13640 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13641   predicate(UseFMA);
13642   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
13643   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
13644 
13645   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
13646 
13647   ins_encode %{
13648     __ fnmaddd(as_FloatRegister($dst$$reg),
13649                as_FloatRegister($src1$$reg),
13650                as_FloatRegister($src2$$reg),
13651                as_FloatRegister($src3$$reg));
13652   %}
13653 
13654   ins_pipe(pipe_class_default);
13655 %}
13656 
13657 // src1 * src2 - src3
13658 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
13659   predicate(UseFMA);
13660   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
13661 
13662   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
13663 
13664   ins_encode %{
13665     __ fnmsubs(as_FloatRegister($dst$$reg),
13666                as_FloatRegister($src1$$reg),
13667                as_FloatRegister($src2$$reg),
13668                as_FloatRegister($src3$$reg));
13669   %}
13670 
13671   ins_pipe(pipe_class_default);
13672 %}
13673 
13674 // src1 * src2 - src3
13675 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
13676   predicate(UseFMA);
13677   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
13678 
13679   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
13680 
13681   ins_encode %{
13682   // n.b. insn name should be fnmsubd
13683     __ fnmsub(as_FloatRegister($dst$$reg),
13684               as_FloatRegister($src1$$reg),
13685               as_FloatRegister($src2$$reg),
13686               as_FloatRegister($src3$$reg));
13687   %}
13688 
13689   ins_pipe(pipe_class_default);
13690 %}
13691 
13692 
13693 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13694   match(Set dst (DivF src1  src2));
13695 
13696   ins_cost(INSN_COST * 18);
13697   format %{ "fdivs   $dst, $src1, $src2" %}
13698 
13699   ins_encode %{
13700     __ fdivs(as_FloatRegister($dst$$reg),
13701              as_FloatRegister($src1$$reg),
13702              as_FloatRegister($src2$$reg));
13703   %}
13704 
13705   ins_pipe(fp_div_s);
13706 %}
13707 
13708 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13709   match(Set dst (DivD src1  src2));
13710 
13711   ins_cost(INSN_COST * 32);
13712   format %{ "fdivd   $dst, $src1, $src2" %}
13713 
13714   ins_encode %{
13715     __ fdivd(as_FloatRegister($dst$$reg),
13716              as_FloatRegister($src1$$reg),
13717              as_FloatRegister($src2$$reg));
13718   %}
13719 
13720   ins_pipe(fp_div_d);
13721 %}
13722 
13723 instruct negF_reg_reg(vRegF dst, vRegF src) %{
13724   match(Set dst (NegF src));
13725 
13726   ins_cost(INSN_COST * 3);
13727   format %{ "fneg   $dst, $src" %}
13728 
13729   ins_encode %{
13730     __ fnegs(as_FloatRegister($dst$$reg),
13731              as_FloatRegister($src$$reg));
13732   %}
13733 
13734   ins_pipe(fp_uop_s);
13735 %}
13736 
13737 instruct negD_reg_reg(vRegD dst, vRegD src) %{
13738   match(Set dst (NegD src));
13739 
13740   ins_cost(INSN_COST * 3);
13741   format %{ "fnegd   $dst, $src" %}
13742 
13743   ins_encode %{
13744     __ fnegd(as_FloatRegister($dst$$reg),
13745              as_FloatRegister($src$$reg));
13746   %}
13747 
13748   ins_pipe(fp_uop_d);
13749 %}
13750 
13751 instruct absF_reg(vRegF dst, vRegF src) %{
13752   match(Set dst (AbsF src));
13753 
13754   ins_cost(INSN_COST * 3);
13755   format %{ "fabss   $dst, $src" %}
13756   ins_encode %{
13757     __ fabss(as_FloatRegister($dst$$reg),
13758              as_FloatRegister($src$$reg));
13759   %}
13760 
13761   ins_pipe(fp_uop_s);
13762 %}
13763 
13764 instruct absD_reg(vRegD dst, vRegD src) %{
13765   match(Set dst (AbsD src));
13766 
13767   ins_cost(INSN_COST * 3);
13768   format %{ "fabsd   $dst, $src" %}
13769   ins_encode %{
13770     __ fabsd(as_FloatRegister($dst$$reg),
13771              as_FloatRegister($src$$reg));
13772   %}
13773 
13774   ins_pipe(fp_uop_d);
13775 %}
13776 
13777 instruct sqrtD_reg(vRegD dst, vRegD src) %{
13778   match(Set dst (SqrtD src));
13779 
13780   ins_cost(INSN_COST * 50);
13781   format %{ "fsqrtd  $dst, $src" %}
13782   ins_encode %{
13783     __ fsqrtd(as_FloatRegister($dst$$reg),
13784              as_FloatRegister($src$$reg));
13785   %}
13786 
13787   ins_pipe(fp_div_s);
13788 %}
13789 
13790 instruct sqrtF_reg(vRegF dst, vRegF src) %{
13791   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
13792 
13793   ins_cost(INSN_COST * 50);
13794   format %{ "fsqrts  $dst, $src" %}
13795   ins_encode %{
13796     __ fsqrts(as_FloatRegister($dst$$reg),
13797              as_FloatRegister($src$$reg));
13798   %}
13799 
13800   ins_pipe(fp_div_d);
13801 %}
13802 
13803 // ============================================================================
13804 // Logical Instructions
13805 
13806 // Integer Logical Instructions
13807 
13808 // And Instructions
13809 
13810 
13811 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
13812   match(Set dst (AndI src1 src2));
13813 
13814   format %{ "andw  $dst, $src1, $src2\t# int" %}
13815 
13816   ins_cost(INSN_COST);
13817   ins_encode %{
13818     __ andw(as_Register($dst$$reg),
13819             as_Register($src1$$reg),
13820             as_Register($src2$$reg));
13821   %}
13822 
13823   ins_pipe(ialu_reg_reg);
13824 %}
13825 
13826 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
13827   match(Set dst (AndI src1 src2));
13828 
13829   format %{ "andsw  $dst, $src1, $src2\t# int" %}
13830 
13831   ins_cost(INSN_COST);
13832   ins_encode %{
13833     __ andw(as_Register($dst$$reg),
13834             as_Register($src1$$reg),
13835             (unsigned long)($src2$$constant));
13836   %}
13837 
13838   ins_pipe(ialu_reg_imm);
13839 %}
13840 
13841 // Or Instructions
13842 
13843 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13844   match(Set dst (OrI src1 src2));
13845 
13846   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13847 
13848   ins_cost(INSN_COST);
13849   ins_encode %{
13850     __ orrw(as_Register($dst$$reg),
13851             as_Register($src1$$reg),
13852             as_Register($src2$$reg));
13853   %}
13854 
13855   ins_pipe(ialu_reg_reg);
13856 %}
13857 
13858 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13859   match(Set dst (OrI src1 src2));
13860 
13861   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13862 
13863   ins_cost(INSN_COST);
13864   ins_encode %{
13865     __ orrw(as_Register($dst$$reg),
13866             as_Register($src1$$reg),
13867             (unsigned long)($src2$$constant));
13868   %}
13869 
13870   ins_pipe(ialu_reg_imm);
13871 %}
13872 
13873 // Xor Instructions
13874 
13875 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13876   match(Set dst (XorI src1 src2));
13877 
13878   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13879 
13880   ins_cost(INSN_COST);
13881   ins_encode %{
13882     __ eorw(as_Register($dst$$reg),
13883             as_Register($src1$$reg),
13884             as_Register($src2$$reg));
13885   %}
13886 
13887   ins_pipe(ialu_reg_reg);
13888 %}
13889 
13890 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13891   match(Set dst (XorI src1 src2));
13892 
13893   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13894 
13895   ins_cost(INSN_COST);
13896   ins_encode %{
13897     __ eorw(as_Register($dst$$reg),
13898             as_Register($src1$$reg),
13899             (unsigned long)($src2$$constant));
13900   %}
13901 
13902   ins_pipe(ialu_reg_imm);
13903 %}
13904 
13905 // Long Logical Instructions
13906 // TODO
13907 
13908 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
13909   match(Set dst (AndL src1 src2));
13910 
13911   format %{ "and  $dst, $src1, $src2\t# int" %}
13912 
13913   ins_cost(INSN_COST);
13914   ins_encode %{
13915     __ andr(as_Register($dst$$reg),
13916             as_Register($src1$$reg),
13917             as_Register($src2$$reg));
13918   %}
13919 
13920   ins_pipe(ialu_reg_reg);
13921 %}
13922 
13923 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
13924   match(Set dst (AndL src1 src2));
13925 
13926   format %{ "and  $dst, $src1, $src2\t# int" %}
13927 
13928   ins_cost(INSN_COST);
13929   ins_encode %{
13930     __ andr(as_Register($dst$$reg),
13931             as_Register($src1$$reg),
13932             (unsigned long)($src2$$constant));
13933   %}
13934 
13935   ins_pipe(ialu_reg_imm);
13936 %}
13937 
13938 // Or Instructions
13939 
13940 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13941   match(Set dst (OrL src1 src2));
13942 
13943   format %{ "orr  $dst, $src1, $src2\t# int" %}
13944 
13945   ins_cost(INSN_COST);
13946   ins_encode %{
13947     __ orr(as_Register($dst$$reg),
13948            as_Register($src1$$reg),
13949            as_Register($src2$$reg));
13950   %}
13951 
13952   ins_pipe(ialu_reg_reg);
13953 %}
13954 
13955 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13956   match(Set dst (OrL src1 src2));
13957 
13958   format %{ "orr  $dst, $src1, $src2\t# int" %}
13959 
13960   ins_cost(INSN_COST);
13961   ins_encode %{
13962     __ orr(as_Register($dst$$reg),
13963            as_Register($src1$$reg),
13964            (unsigned long)($src2$$constant));
13965   %}
13966 
13967   ins_pipe(ialu_reg_imm);
13968 %}
13969 
13970 // Xor Instructions
13971 
13972 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13973   match(Set dst (XorL src1 src2));
13974 
13975   format %{ "eor  $dst, $src1, $src2\t# int" %}
13976 
13977   ins_cost(INSN_COST);
13978   ins_encode %{
13979     __ eor(as_Register($dst$$reg),
13980            as_Register($src1$$reg),
13981            as_Register($src2$$reg));
13982   %}
13983 
13984   ins_pipe(ialu_reg_reg);
13985 %}
13986 
13987 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13988   match(Set dst (XorL src1 src2));
13989 
13990   ins_cost(INSN_COST);
13991   format %{ "eor  $dst, $src1, $src2\t# int" %}
13992 
13993   ins_encode %{
13994     __ eor(as_Register($dst$$reg),
13995            as_Register($src1$$reg),
13996            (unsigned long)($src2$$constant));
13997   %}
13998 
13999   ins_pipe(ialu_reg_imm);
14000 %}
14001 
14002 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
14003 %{
14004   match(Set dst (ConvI2L src));
14005 
14006   ins_cost(INSN_COST);
14007   format %{ "sxtw  $dst, $src\t# i2l" %}
14008   ins_encode %{
14009     __ sbfm($dst$$Register, $src$$Register, 0, 31);
14010   %}
14011   ins_pipe(ialu_reg_shift);
14012 %}
14013 
14014 // this pattern occurs in bigmath arithmetic
14015 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
14016 %{
14017   match(Set dst (AndL (ConvI2L src) mask));
14018 
14019   ins_cost(INSN_COST);
14020   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
14021   ins_encode %{
14022     __ ubfm($dst$$Register, $src$$Register, 0, 31);
14023   %}
14024 
14025   ins_pipe(ialu_reg_shift);
14026 %}
14027 
14028 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
14029   match(Set dst (ConvL2I src));
14030 
14031   ins_cost(INSN_COST);
14032   format %{ "movw  $dst, $src \t// l2i" %}
14033 
14034   ins_encode %{
14035     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
14036   %}
14037 
14038   ins_pipe(ialu_reg);
14039 %}
14040 
14041 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
14042 %{
14043   match(Set dst (Conv2B src));
14044   effect(KILL cr);
14045 
14046   format %{
14047     "cmpw $src, zr\n\t"
14048     "cset $dst, ne"
14049   %}
14050 
14051   ins_encode %{
14052     __ cmpw(as_Register($src$$reg), zr);
14053     __ cset(as_Register($dst$$reg), Assembler::NE);
14054   %}
14055 
14056   ins_pipe(ialu_reg);
14057 %}
14058 
14059 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
14060 %{
14061   match(Set dst (Conv2B src));
14062   effect(KILL cr);
14063 
14064   format %{
14065     "cmp  $src, zr\n\t"
14066     "cset $dst, ne"
14067   %}
14068 
14069   ins_encode %{
14070     __ cmp(as_Register($src$$reg), zr);
14071     __ cset(as_Register($dst$$reg), Assembler::NE);
14072   %}
14073 
14074   ins_pipe(ialu_reg);
14075 %}
14076 
14077 instruct convD2F_reg(vRegF dst, vRegD src) %{
14078   match(Set dst (ConvD2F src));
14079 
14080   ins_cost(INSN_COST * 5);
14081   format %{ "fcvtd  $dst, $src \t// d2f" %}
14082 
14083   ins_encode %{
14084     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14085   %}
14086 
14087   ins_pipe(fp_d2f);
14088 %}
14089 
14090 instruct convF2D_reg(vRegD dst, vRegF src) %{
14091   match(Set dst (ConvF2D src));
14092 
14093   ins_cost(INSN_COST * 5);
14094   format %{ "fcvts  $dst, $src \t// f2d" %}
14095 
14096   ins_encode %{
14097     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14098   %}
14099 
14100   ins_pipe(fp_f2d);
14101 %}
14102 
14103 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14104   match(Set dst (ConvF2I src));
14105 
14106   ins_cost(INSN_COST * 5);
14107   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
14108 
14109   ins_encode %{
14110     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14111   %}
14112 
14113   ins_pipe(fp_f2i);
14114 %}
14115 
14116 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
14117   match(Set dst (ConvF2L src));
14118 
14119   ins_cost(INSN_COST * 5);
14120   format %{ "fcvtzs  $dst, $src \t// f2l" %}
14121 
14122   ins_encode %{
14123     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14124   %}
14125 
14126   ins_pipe(fp_f2l);
14127 %}
14128 
14129 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
14130   match(Set dst (ConvI2F src));
14131 
14132   ins_cost(INSN_COST * 5);
14133   format %{ "scvtfws  $dst, $src \t// i2f" %}
14134 
14135   ins_encode %{
14136     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14137   %}
14138 
14139   ins_pipe(fp_i2f);
14140 %}
14141 
14142 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
14143   match(Set dst (ConvL2F src));
14144 
14145   ins_cost(INSN_COST * 5);
14146   format %{ "scvtfs  $dst, $src \t// l2f" %}
14147 
14148   ins_encode %{
14149     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14150   %}
14151 
14152   ins_pipe(fp_l2f);
14153 %}
14154 
14155 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
14156   match(Set dst (ConvD2I src));
14157 
14158   ins_cost(INSN_COST * 5);
14159   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
14160 
14161   ins_encode %{
14162     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14163   %}
14164 
14165   ins_pipe(fp_d2i);
14166 %}
14167 
14168 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14169   match(Set dst (ConvD2L src));
14170 
14171   ins_cost(INSN_COST * 5);
14172   format %{ "fcvtzd  $dst, $src \t// d2l" %}
14173 
14174   ins_encode %{
14175     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14176   %}
14177 
14178   ins_pipe(fp_d2l);
14179 %}
14180 
14181 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
14182   match(Set dst (ConvI2D src));
14183 
14184   ins_cost(INSN_COST * 5);
14185   format %{ "scvtfwd  $dst, $src \t// i2d" %}
14186 
14187   ins_encode %{
14188     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14189   %}
14190 
14191   ins_pipe(fp_i2d);
14192 %}
14193 
14194 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
14195   match(Set dst (ConvL2D src));
14196 
14197   ins_cost(INSN_COST * 5);
14198   format %{ "scvtfd  $dst, $src \t// l2d" %}
14199 
14200   ins_encode %{
14201     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14202   %}
14203 
14204   ins_pipe(fp_l2d);
14205 %}
14206 
14207 // stack <-> reg and reg <-> reg shuffles with no conversion
14208 
14209 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
14210 
14211   match(Set dst (MoveF2I src));
14212 
14213   effect(DEF dst, USE src);
14214 
14215   ins_cost(4 * INSN_COST);
14216 
14217   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
14218 
14219   ins_encode %{
14220     __ ldrw($dst$$Register, Address(sp, $src$$disp));
14221   %}
14222 
14223   ins_pipe(iload_reg_reg);
14224 
14225 %}
14226 
14227 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
14228 
14229   match(Set dst (MoveI2F src));
14230 
14231   effect(DEF dst, USE src);
14232 
14233   ins_cost(4 * INSN_COST);
14234 
14235   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
14236 
14237   ins_encode %{
14238     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14239   %}
14240 
14241   ins_pipe(pipe_class_memory);
14242 
14243 %}
14244 
14245 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
14246 
14247   match(Set dst (MoveD2L src));
14248 
14249   effect(DEF dst, USE src);
14250 
14251   ins_cost(4 * INSN_COST);
14252 
14253   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
14254 
14255   ins_encode %{
14256     __ ldr($dst$$Register, Address(sp, $src$$disp));
14257   %}
14258 
14259   ins_pipe(iload_reg_reg);
14260 
14261 %}
14262 
14263 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
14264 
14265   match(Set dst (MoveL2D src));
14266 
14267   effect(DEF dst, USE src);
14268 
14269   ins_cost(4 * INSN_COST);
14270 
14271   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
14272 
14273   ins_encode %{
14274     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14275   %}
14276 
14277   ins_pipe(pipe_class_memory);
14278 
14279 %}
14280 
14281 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
14282 
14283   match(Set dst (MoveF2I src));
14284 
14285   effect(DEF dst, USE src);
14286 
14287   ins_cost(INSN_COST);
14288 
14289   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
14290 
14291   ins_encode %{
14292     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14293   %}
14294 
14295   ins_pipe(pipe_class_memory);
14296 
14297 %}
14298 
14299 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
14300 
14301   match(Set dst (MoveI2F src));
14302 
14303   effect(DEF dst, USE src);
14304 
14305   ins_cost(INSN_COST);
14306 
14307   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
14308 
14309   ins_encode %{
14310     __ strw($src$$Register, Address(sp, $dst$$disp));
14311   %}
14312 
14313   ins_pipe(istore_reg_reg);
14314 
14315 %}
14316 
14317 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
14318 
14319   match(Set dst (MoveD2L src));
14320 
14321   effect(DEF dst, USE src);
14322 
14323   ins_cost(INSN_COST);
14324 
14325   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
14326 
14327   ins_encode %{
14328     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14329   %}
14330 
14331   ins_pipe(pipe_class_memory);
14332 
14333 %}
14334 
14335 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
14336 
14337   match(Set dst (MoveL2D src));
14338 
14339   effect(DEF dst, USE src);
14340 
14341   ins_cost(INSN_COST);
14342 
14343   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
14344 
14345   ins_encode %{
14346     __ str($src$$Register, Address(sp, $dst$$disp));
14347   %}
14348 
14349   ins_pipe(istore_reg_reg);
14350 
14351 %}
14352 
14353 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14354 
14355   match(Set dst (MoveF2I src));
14356 
14357   effect(DEF dst, USE src);
14358 
14359   ins_cost(INSN_COST);
14360 
14361   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
14362 
14363   ins_encode %{
14364     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
14365   %}
14366 
14367   ins_pipe(fp_f2i);
14368 
14369 %}
14370 
14371 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
14372 
14373   match(Set dst (MoveI2F src));
14374 
14375   effect(DEF dst, USE src);
14376 
14377   ins_cost(INSN_COST);
14378 
14379   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
14380 
14381   ins_encode %{
14382     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
14383   %}
14384 
14385   ins_pipe(fp_i2f);
14386 
14387 %}
14388 
14389 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14390 
14391   match(Set dst (MoveD2L src));
14392 
14393   effect(DEF dst, USE src);
14394 
14395   ins_cost(INSN_COST);
14396 
14397   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
14398 
14399   ins_encode %{
14400     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
14401   %}
14402 
14403   ins_pipe(fp_d2l);
14404 
14405 %}
14406 
14407 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
14408 
14409   match(Set dst (MoveL2D src));
14410 
14411   effect(DEF dst, USE src);
14412 
14413   ins_cost(INSN_COST);
14414 
14415   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
14416 
14417   ins_encode %{
14418     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
14419   %}
14420 
14421   ins_pipe(fp_l2d);
14422 
14423 %}
14424 
14425 // ============================================================================
14426 // clearing of an array
14427 
14428 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
14429 %{
14430   match(Set dummy (ClearArray cnt base));
14431   effect(USE_KILL cnt, USE_KILL base);
14432 
14433   ins_cost(4 * INSN_COST);
14434   format %{ "ClearArray $cnt, $base" %}
14435 
14436   ins_encode %{
14437     __ zero_words($base$$Register, $cnt$$Register);
14438   %}
14439 
14440   ins_pipe(pipe_class_memory);
14441 %}
14442 
14443 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
14444 %{
14445   predicate((u_int64_t)n->in(2)->get_long()
14446             < (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
14447   match(Set dummy (ClearArray cnt base));
14448   effect(USE_KILL base);
14449 
14450   ins_cost(4 * INSN_COST);
14451   format %{ "ClearArray $cnt, $base" %}
14452 
14453   ins_encode %{
14454     __ zero_words($base$$Register, (u_int64_t)$cnt$$constant);
14455   %}
14456 
14457   ins_pipe(pipe_class_memory);
14458 %}
14459 
14460 // ============================================================================
14461 // Overflow Math Instructions
14462 
14463 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14464 %{
14465   match(Set cr (OverflowAddI op1 op2));
14466 
14467   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
14468   ins_cost(INSN_COST);
14469   ins_encode %{
14470     __ cmnw($op1$$Register, $op2$$Register);
14471   %}
14472 
14473   ins_pipe(icmp_reg_reg);
14474 %}
14475 
14476 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
14477 %{
14478   match(Set cr (OverflowAddI op1 op2));
14479 
14480   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
14481   ins_cost(INSN_COST);
14482   ins_encode %{
14483     __ cmnw($op1$$Register, $op2$$constant);
14484   %}
14485 
14486   ins_pipe(icmp_reg_imm);
14487 %}
14488 
14489 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14490 %{
14491   match(Set cr (OverflowAddL op1 op2));
14492 
14493   format %{ "cmn   $op1, $op2\t# overflow check long" %}
14494   ins_cost(INSN_COST);
14495   ins_encode %{
14496     __ cmn($op1$$Register, $op2$$Register);
14497   %}
14498 
14499   ins_pipe(icmp_reg_reg);
14500 %}
14501 
14502 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
14503 %{
14504   match(Set cr (OverflowAddL op1 op2));
14505 
14506   format %{ "cmn   $op1, $op2\t# overflow check long" %}
14507   ins_cost(INSN_COST);
14508   ins_encode %{
14509     __ cmn($op1$$Register, $op2$$constant);
14510   %}
14511 
14512   ins_pipe(icmp_reg_imm);
14513 %}
14514 
14515 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14516 %{
14517   match(Set cr (OverflowSubI op1 op2));
14518 
14519   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
14520   ins_cost(INSN_COST);
14521   ins_encode %{
14522     __ cmpw($op1$$Register, $op2$$Register);
14523   %}
14524 
14525   ins_pipe(icmp_reg_reg);
14526 %}
14527 
14528 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
14529 %{
14530   match(Set cr (OverflowSubI op1 op2));
14531 
14532   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
14533   ins_cost(INSN_COST);
14534   ins_encode %{
14535     __ cmpw($op1$$Register, $op2$$constant);
14536   %}
14537 
14538   ins_pipe(icmp_reg_imm);
14539 %}
14540 
14541 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14542 %{
14543   match(Set cr (OverflowSubL op1 op2));
14544 
14545   format %{ "cmp   $op1, $op2\t# overflow check long" %}
14546   ins_cost(INSN_COST);
14547   ins_encode %{
14548     __ cmp($op1$$Register, $op2$$Register);
14549   %}
14550 
14551   ins_pipe(icmp_reg_reg);
14552 %}
14553 
14554 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
14555 %{
14556   match(Set cr (OverflowSubL op1 op2));
14557 
14558   format %{ "cmp   $op1, $op2\t# overflow check long" %}
14559   ins_cost(INSN_COST);
14560   ins_encode %{
14561     __ cmp($op1$$Register, $op2$$constant);
14562   %}
14563 
14564   ins_pipe(icmp_reg_imm);
14565 %}
14566 
14567 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
14568 %{
14569   match(Set cr (OverflowSubI zero op1));
14570 
14571   format %{ "cmpw  zr, $op1\t# overflow check int" %}
14572   ins_cost(INSN_COST);
14573   ins_encode %{
14574     __ cmpw(zr, $op1$$Register);
14575   %}
14576 
14577   ins_pipe(icmp_reg_imm);
14578 %}
14579 
14580 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
14581 %{
14582   match(Set cr (OverflowSubL zero op1));
14583 
14584   format %{ "cmp   zr, $op1\t# overflow check long" %}
14585   ins_cost(INSN_COST);
14586   ins_encode %{
14587     __ cmp(zr, $op1$$Register);
14588   %}
14589 
14590   ins_pipe(icmp_reg_imm);
14591 %}
14592 
14593 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14594 %{
14595   match(Set cr (OverflowMulI op1 op2));
14596 
14597   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14598             "cmp   rscratch1, rscratch1, sxtw\n\t"
14599             "movw  rscratch1, #0x80000000\n\t"
14600             "cselw rscratch1, rscratch1, zr, NE\n\t"
14601             "cmpw  rscratch1, #1" %}
14602   ins_cost(5 * INSN_COST);
14603   ins_encode %{
14604     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14605     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14606     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
14607     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
14608     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
14609   %}
14610 
14611   ins_pipe(pipe_slow);
14612 %}
14613 
14614 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
14615 %{
14616   match(If cmp (OverflowMulI op1 op2));
14617   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14618             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14619   effect(USE labl, KILL cr);
14620 
14621   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14622             "cmp   rscratch1, rscratch1, sxtw\n\t"
14623             "b$cmp   $labl" %}
14624   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
14625   ins_encode %{
14626     Label* L = $labl$$label;
14627     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14628     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14629     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14630     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14631   %}
14632 
14633   ins_pipe(pipe_serial);
14634 %}
14635 
14636 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14637 %{
14638   match(Set cr (OverflowMulL op1 op2));
14639 
14640   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
14641             "smulh rscratch2, $op1, $op2\n\t"
14642             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14643             "movw  rscratch1, #0x80000000\n\t"
14644             "cselw rscratch1, rscratch1, zr, NE\n\t"
14645             "cmpw  rscratch1, #1" %}
14646   ins_cost(6 * INSN_COST);
14647   ins_encode %{
14648     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14649     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14650     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14651     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
14652     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
14653     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
14654   %}
14655 
14656   ins_pipe(pipe_slow);
14657 %}
14658 
14659 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
14660 %{
14661   match(If cmp (OverflowMulL op1 op2));
14662   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14663             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14664   effect(USE labl, KILL cr);
14665 
14666   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
14667             "smulh rscratch2, $op1, $op2\n\t"
14668             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14669             "b$cmp $labl" %}
14670   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
14671   ins_encode %{
14672     Label* L = $labl$$label;
14673     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14674     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14675     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14676     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14677     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14678   %}
14679 
14680   ins_pipe(pipe_serial);
14681 %}
14682 
14683 // ============================================================================
14684 // Compare Instructions
14685 
14686 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
14687 %{
14688   match(Set cr (CmpI op1 op2));
14689 
14690   effect(DEF cr, USE op1, USE op2);
14691 
14692   ins_cost(INSN_COST);
14693   format %{ "cmpw  $op1, $op2" %}
14694 
14695   ins_encode(aarch64_enc_cmpw(op1, op2));
14696 
14697   ins_pipe(icmp_reg_reg);
14698 %}
14699 
14700 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
14701 %{
14702   match(Set cr (CmpI op1 zero));
14703 
14704   effect(DEF cr, USE op1);
14705 
14706   ins_cost(INSN_COST);
14707   format %{ "cmpw $op1, 0" %}
14708 
14709   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14710 
14711   ins_pipe(icmp_reg_imm);
14712 %}
14713 
14714 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
14715 %{
14716   match(Set cr (CmpI op1 op2));
14717 
14718   effect(DEF cr, USE op1);
14719 
14720   ins_cost(INSN_COST);
14721   format %{ "cmpw  $op1, $op2" %}
14722 
14723   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14724 
14725   ins_pipe(icmp_reg_imm);
14726 %}
14727 
14728 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
14729 %{
14730   match(Set cr (CmpI op1 op2));
14731 
14732   effect(DEF cr, USE op1);
14733 
14734   ins_cost(INSN_COST * 2);
14735   format %{ "cmpw  $op1, $op2" %}
14736 
14737   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14738 
14739   ins_pipe(icmp_reg_imm);
14740 %}
14741 
14742 // Unsigned compare Instructions; really, same as signed compare
14743 // except it should only be used to feed an If or a CMovI which takes a
14744 // cmpOpU.
14745 
14746 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
14747 %{
14748   match(Set cr (CmpU op1 op2));
14749 
14750   effect(DEF cr, USE op1, USE op2);
14751 
14752   ins_cost(INSN_COST);
14753   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14754 
14755   ins_encode(aarch64_enc_cmpw(op1, op2));
14756 
14757   ins_pipe(icmp_reg_reg);
14758 %}
14759 
14760 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
14761 %{
14762   match(Set cr (CmpU op1 zero));
14763 
14764   effect(DEF cr, USE op1);
14765 
14766   ins_cost(INSN_COST);
14767   format %{ "cmpw $op1, #0\t# unsigned" %}
14768 
14769   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14770 
14771   ins_pipe(icmp_reg_imm);
14772 %}
14773 
14774 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
14775 %{
14776   match(Set cr (CmpU op1 op2));
14777 
14778   effect(DEF cr, USE op1);
14779 
14780   ins_cost(INSN_COST);
14781   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14782 
14783   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14784 
14785   ins_pipe(icmp_reg_imm);
14786 %}
14787 
14788 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
14789 %{
14790   match(Set cr (CmpU op1 op2));
14791 
14792   effect(DEF cr, USE op1);
14793 
14794   ins_cost(INSN_COST * 2);
14795   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14796 
14797   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14798 
14799   ins_pipe(icmp_reg_imm);
14800 %}
14801 
14802 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14803 %{
14804   match(Set cr (CmpL op1 op2));
14805 
14806   effect(DEF cr, USE op1, USE op2);
14807 
14808   ins_cost(INSN_COST);
14809   format %{ "cmp  $op1, $op2" %}
14810 
14811   ins_encode(aarch64_enc_cmp(op1, op2));
14812 
14813   ins_pipe(icmp_reg_reg);
14814 %}
14815 
14816 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
14817 %{
14818   match(Set cr (CmpL op1 zero));
14819 
14820   effect(DEF cr, USE op1);
14821 
14822   ins_cost(INSN_COST);
14823   format %{ "tst  $op1" %}
14824 
14825   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14826 
14827   ins_pipe(icmp_reg_imm);
14828 %}
14829 
14830 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
14831 %{
14832   match(Set cr (CmpL op1 op2));
14833 
14834   effect(DEF cr, USE op1);
14835 
14836   ins_cost(INSN_COST);
14837   format %{ "cmp  $op1, $op2" %}
14838 
14839   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
14840 
14841   ins_pipe(icmp_reg_imm);
14842 %}
14843 
14844 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
14845 %{
14846   match(Set cr (CmpL op1 op2));
14847 
14848   effect(DEF cr, USE op1);
14849 
14850   ins_cost(INSN_COST * 2);
14851   format %{ "cmp  $op1, $op2" %}
14852 
14853   ins_encode(aarch64_enc_cmp_imm(op1, op2));
14854 
14855   ins_pipe(icmp_reg_imm);
14856 %}
14857 
14858 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
14859 %{
14860   match(Set cr (CmpUL op1 op2));
14861 
14862   effect(DEF cr, USE op1, USE op2);
14863 
14864   ins_cost(INSN_COST);
14865   format %{ "cmp  $op1, $op2" %}
14866 
14867   ins_encode(aarch64_enc_cmp(op1, op2));
14868 
14869   ins_pipe(icmp_reg_reg);
14870 %}
14871 
14872 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
14873 %{
14874   match(Set cr (CmpUL op1 zero));
14875 
14876   effect(DEF cr, USE op1);
14877 
14878   ins_cost(INSN_COST);
14879   format %{ "tst  $op1" %}
14880 
14881   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14882 
14883   ins_pipe(icmp_reg_imm);
14884 %}
14885 
14886 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
14887 %{
14888   match(Set cr (CmpUL op1 op2));
14889 
14890   effect(DEF cr, USE op1);
14891 
14892   ins_cost(INSN_COST);
14893   format %{ "cmp  $op1, $op2" %}
14894 
14895   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
14896 
14897   ins_pipe(icmp_reg_imm);
14898 %}
14899 
14900 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
14901 %{
14902   match(Set cr (CmpUL op1 op2));
14903 
14904   effect(DEF cr, USE op1);
14905 
14906   ins_cost(INSN_COST * 2);
14907   format %{ "cmp  $op1, $op2" %}
14908 
14909   ins_encode(aarch64_enc_cmp_imm(op1, op2));
14910 
14911   ins_pipe(icmp_reg_imm);
14912 %}
14913 
14914 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
14915 %{
14916   match(Set cr (CmpP op1 op2));
14917 
14918   effect(DEF cr, USE op1, USE op2);
14919 
14920   ins_cost(INSN_COST);
14921   format %{ "cmp  $op1, $op2\t // ptr" %}
14922 
14923   ins_encode(aarch64_enc_cmpp(op1, op2));
14924 
14925   ins_pipe(icmp_reg_reg);
14926 %}
14927 
14928 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
14929 %{
14930   match(Set cr (CmpN op1 op2));
14931 
14932   effect(DEF cr, USE op1, USE op2);
14933 
14934   ins_cost(INSN_COST);
14935   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
14936 
14937   ins_encode(aarch64_enc_cmpn(op1, op2));
14938 
14939   ins_pipe(icmp_reg_reg);
14940 %}
14941 
14942 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
14943 %{
14944   match(Set cr (CmpP op1 zero));
14945 
14946   effect(DEF cr, USE op1, USE zero);
14947 
14948   ins_cost(INSN_COST);
14949   format %{ "cmp  $op1, 0\t // ptr" %}
14950 
14951   ins_encode(aarch64_enc_testp(op1));
14952 
14953   ins_pipe(icmp_reg_imm);
14954 %}
14955 
14956 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
14957 %{
14958   match(Set cr (CmpN op1 zero));
14959 
14960   effect(DEF cr, USE op1, USE zero);
14961 
14962   ins_cost(INSN_COST);
14963   format %{ "cmp  $op1, 0\t // compressed ptr" %}
14964 
14965   ins_encode(aarch64_enc_testn(op1));
14966 
14967   ins_pipe(icmp_reg_imm);
14968 %}
14969 
14970 // FP comparisons
14971 //
14972 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
14973 // using normal cmpOp. See declaration of rFlagsReg for details.
14974 
14975 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
14976 %{
14977   match(Set cr (CmpF src1 src2));
14978 
14979   ins_cost(3 * INSN_COST);
14980   format %{ "fcmps $src1, $src2" %}
14981 
14982   ins_encode %{
14983     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14984   %}
14985 
14986   ins_pipe(pipe_class_compare);
14987 %}
14988 
14989 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
14990 %{
14991   match(Set cr (CmpF src1 src2));
14992 
14993   ins_cost(3 * INSN_COST);
14994   format %{ "fcmps $src1, 0.0" %}
14995 
14996   ins_encode %{
14997     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
14998   %}
14999 
15000   ins_pipe(pipe_class_compare);
15001 %}
15002 // FROM HERE
15003 
15004 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
15005 %{
15006   match(Set cr (CmpD src1 src2));
15007 
15008   ins_cost(3 * INSN_COST);
15009   format %{ "fcmpd $src1, $src2" %}
15010 
15011   ins_encode %{
15012     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15013   %}
15014 
15015   ins_pipe(pipe_class_compare);
15016 %}
15017 
15018 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
15019 %{
15020   match(Set cr (CmpD src1 src2));
15021 
15022   ins_cost(3 * INSN_COST);
15023   format %{ "fcmpd $src1, 0.0" %}
15024 
15025   ins_encode %{
15026     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
15027   %}
15028 
15029   ins_pipe(pipe_class_compare);
15030 %}
15031 
15032 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
15033 %{
15034   match(Set dst (CmpF3 src1 src2));
15035   effect(KILL cr);
15036 
15037   ins_cost(5 * INSN_COST);
15038   format %{ "fcmps $src1, $src2\n\t"
15039             "csinvw($dst, zr, zr, eq\n\t"
15040             "csnegw($dst, $dst, $dst, lt)"
15041   %}
15042 
15043   ins_encode %{
15044     Label done;
15045     FloatRegister s1 = as_FloatRegister($src1$$reg);
15046     FloatRegister s2 = as_FloatRegister($src2$$reg);
15047     Register d = as_Register($dst$$reg);
15048     __ fcmps(s1, s2);
15049     // installs 0 if EQ else -1
15050     __ csinvw(d, zr, zr, Assembler::EQ);
15051     // keeps -1 if less or unordered else installs 1
15052     __ csnegw(d, d, d, Assembler::LT);
15053     __ bind(done);
15054   %}
15055 
15056   ins_pipe(pipe_class_default);
15057 
15058 %}
15059 
15060 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
15061 %{
15062   match(Set dst (CmpD3 src1 src2));
15063   effect(KILL cr);
15064 
15065   ins_cost(5 * INSN_COST);
15066   format %{ "fcmpd $src1, $src2\n\t"
15067             "csinvw($dst, zr, zr, eq\n\t"
15068             "csnegw($dst, $dst, $dst, lt)"
15069   %}
15070 
15071   ins_encode %{
15072     Label done;
15073     FloatRegister s1 = as_FloatRegister($src1$$reg);
15074     FloatRegister s2 = as_FloatRegister($src2$$reg);
15075     Register d = as_Register($dst$$reg);
15076     __ fcmpd(s1, s2);
15077     // installs 0 if EQ else -1
15078     __ csinvw(d, zr, zr, Assembler::EQ);
15079     // keeps -1 if less or unordered else installs 1
15080     __ csnegw(d, d, d, Assembler::LT);
15081     __ bind(done);
15082   %}
15083   ins_pipe(pipe_class_default);
15084 
15085 %}
15086 
15087 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
15088 %{
15089   match(Set dst (CmpF3 src1 zero));
15090   effect(KILL cr);
15091 
15092   ins_cost(5 * INSN_COST);
15093   format %{ "fcmps $src1, 0.0\n\t"
15094             "csinvw($dst, zr, zr, eq\n\t"
15095             "csnegw($dst, $dst, $dst, lt)"
15096   %}
15097 
15098   ins_encode %{
15099     Label done;
15100     FloatRegister s1 = as_FloatRegister($src1$$reg);
15101     Register d = as_Register($dst$$reg);
15102     __ fcmps(s1, 0.0D);
15103     // installs 0 if EQ else -1
15104     __ csinvw(d, zr, zr, Assembler::EQ);
15105     // keeps -1 if less or unordered else installs 1
15106     __ csnegw(d, d, d, Assembler::LT);
15107     __ bind(done);
15108   %}
15109 
15110   ins_pipe(pipe_class_default);
15111 
15112 %}
15113 
15114 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
15115 %{
15116   match(Set dst (CmpD3 src1 zero));
15117   effect(KILL cr);
15118 
15119   ins_cost(5 * INSN_COST);
15120   format %{ "fcmpd $src1, 0.0\n\t"
15121             "csinvw($dst, zr, zr, eq\n\t"
15122             "csnegw($dst, $dst, $dst, lt)"
15123   %}
15124 
15125   ins_encode %{
15126     Label done;
15127     FloatRegister s1 = as_FloatRegister($src1$$reg);
15128     Register d = as_Register($dst$$reg);
15129     __ fcmpd(s1, 0.0D);
15130     // installs 0 if EQ else -1
15131     __ csinvw(d, zr, zr, Assembler::EQ);
15132     // keeps -1 if less or unordered else installs 1
15133     __ csnegw(d, d, d, Assembler::LT);
15134     __ bind(done);
15135   %}
15136   ins_pipe(pipe_class_default);
15137 
15138 %}
15139 
15140 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
15141 %{
15142   match(Set dst (CmpLTMask p q));
15143   effect(KILL cr);
15144 
15145   ins_cost(3 * INSN_COST);
15146 
15147   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
15148             "csetw $dst, lt\n\t"
15149             "subw $dst, zr, $dst"
15150   %}
15151 
15152   ins_encode %{
15153     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
15154     __ csetw(as_Register($dst$$reg), Assembler::LT);
15155     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
15156   %}
15157 
15158   ins_pipe(ialu_reg_reg);
15159 %}
15160 
15161 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
15162 %{
15163   match(Set dst (CmpLTMask src zero));
15164   effect(KILL cr);
15165 
15166   ins_cost(INSN_COST);
15167 
15168   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
15169 
15170   ins_encode %{
15171     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
15172   %}
15173 
15174   ins_pipe(ialu_reg_shift);
15175 %}
15176 
15177 // ============================================================================
15178 // Max and Min
15179 
15180 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
15181 %{
15182   match(Set dst (MinI src1 src2));
15183 
15184   effect(DEF dst, USE src1, USE src2, KILL cr);
15185   size(8);
15186 
15187   ins_cost(INSN_COST * 3);
15188   format %{
15189     "cmpw $src1 $src2\t signed int\n\t"
15190     "cselw $dst, $src1, $src2 lt\t"
15191   %}
15192 
15193   ins_encode %{
15194     __ cmpw(as_Register($src1$$reg),
15195             as_Register($src2$$reg));
15196     __ cselw(as_Register($dst$$reg),
15197              as_Register($src1$$reg),
15198              as_Register($src2$$reg),
15199              Assembler::LT);
15200   %}
15201 
15202   ins_pipe(ialu_reg_reg);
15203 %}
15204 // FROM HERE
15205 
15206 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
15207 %{
15208   match(Set dst (MaxI src1 src2));
15209 
15210   effect(DEF dst, USE src1, USE src2, KILL cr);
15211   size(8);
15212 
15213   ins_cost(INSN_COST * 3);
15214   format %{
15215     "cmpw $src1 $src2\t signed int\n\t"
15216     "cselw $dst, $src1, $src2 gt\t"
15217   %}
15218 
15219   ins_encode %{
15220     __ cmpw(as_Register($src1$$reg),
15221             as_Register($src2$$reg));
15222     __ cselw(as_Register($dst$$reg),
15223              as_Register($src1$$reg),
15224              as_Register($src2$$reg),
15225              Assembler::GT);
15226   %}
15227 
15228   ins_pipe(ialu_reg_reg);
15229 %}
15230 
15231 // ============================================================================
15232 // Branch Instructions
15233 
15234 // Direct Branch.
15235 instruct branch(label lbl)
15236 %{
15237   match(Goto);
15238 
15239   effect(USE lbl);
15240 
15241   ins_cost(BRANCH_COST);
15242   format %{ "b  $lbl" %}
15243 
15244   ins_encode(aarch64_enc_b(lbl));
15245 
15246   ins_pipe(pipe_branch);
15247 %}
15248 
15249 // Conditional Near Branch
15250 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
15251 %{
15252   // Same match rule as `branchConFar'.
15253   match(If cmp cr);
15254 
15255   effect(USE lbl);
15256 
15257   ins_cost(BRANCH_COST);
15258   // If set to 1 this indicates that the current instruction is a
15259   // short variant of a long branch. This avoids using this
15260   // instruction in first-pass matching. It will then only be used in
15261   // the `Shorten_branches' pass.
15262   // ins_short_branch(1);
15263   format %{ "b$cmp  $lbl" %}
15264 
15265   ins_encode(aarch64_enc_br_con(cmp, lbl));
15266 
15267   ins_pipe(pipe_branch_cond);
15268 %}
15269 
15270 // Conditional Near Branch Unsigned
15271 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
15272 %{
15273   // Same match rule as `branchConFar'.
15274   match(If cmp cr);
15275 
15276   effect(USE lbl);
15277 
15278   ins_cost(BRANCH_COST);
15279   // If set to 1 this indicates that the current instruction is a
15280   // short variant of a long branch. This avoids using this
15281   // instruction in first-pass matching. It will then only be used in
15282   // the `Shorten_branches' pass.
15283   // ins_short_branch(1);
15284   format %{ "b$cmp  $lbl\t# unsigned" %}
15285 
15286   ins_encode(aarch64_enc_br_conU(cmp, lbl));
15287 
15288   ins_pipe(pipe_branch_cond);
15289 %}
15290 
15291 // Make use of CBZ and CBNZ.  These instructions, as well as being
15292 // shorter than (cmp; branch), have the additional benefit of not
15293 // killing the flags.
15294 
15295 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
15296   match(If cmp (CmpI op1 op2));
15297   effect(USE labl);
15298 
15299   ins_cost(BRANCH_COST);
15300   format %{ "cbw$cmp   $op1, $labl" %}
15301   ins_encode %{
15302     Label* L = $labl$$label;
15303     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15304     if (cond == Assembler::EQ)
15305       __ cbzw($op1$$Register, *L);
15306     else
15307       __ cbnzw($op1$$Register, *L);
15308   %}
15309   ins_pipe(pipe_cmp_branch);
15310 %}
15311 
15312 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
15313   match(If cmp (CmpL op1 op2));
15314   effect(USE labl);
15315 
15316   ins_cost(BRANCH_COST);
15317   format %{ "cb$cmp   $op1, $labl" %}
15318   ins_encode %{
15319     Label* L = $labl$$label;
15320     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15321     if (cond == Assembler::EQ)
15322       __ cbz($op1$$Register, *L);
15323     else
15324       __ cbnz($op1$$Register, *L);
15325   %}
15326   ins_pipe(pipe_cmp_branch);
15327 %}
15328 
15329 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
15330   match(If cmp (CmpP op1 op2));
15331   effect(USE labl);
15332 
15333   ins_cost(BRANCH_COST);
15334   format %{ "cb$cmp   $op1, $labl" %}
15335   ins_encode %{
15336     Label* L = $labl$$label;
15337     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15338     if (cond == Assembler::EQ)
15339       __ cbz($op1$$Register, *L);
15340     else
15341       __ cbnz($op1$$Register, *L);
15342   %}
15343   ins_pipe(pipe_cmp_branch);
15344 %}
15345 
15346 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
15347   match(If cmp (CmpN op1 op2));
15348   effect(USE labl);
15349 
15350   ins_cost(BRANCH_COST);
15351   format %{ "cbw$cmp   $op1, $labl" %}
15352   ins_encode %{
15353     Label* L = $labl$$label;
15354     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15355     if (cond == Assembler::EQ)
15356       __ cbzw($op1$$Register, *L);
15357     else
15358       __ cbnzw($op1$$Register, *L);
15359   %}
15360   ins_pipe(pipe_cmp_branch);
15361 %}
15362 
15363 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
15364   match(If cmp (CmpP (DecodeN oop) zero));
15365   effect(USE labl);
15366 
15367   ins_cost(BRANCH_COST);
15368   format %{ "cb$cmp   $oop, $labl" %}
15369   ins_encode %{
15370     Label* L = $labl$$label;
15371     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15372     if (cond == Assembler::EQ)
15373       __ cbzw($oop$$Register, *L);
15374     else
15375       __ cbnzw($oop$$Register, *L);
15376   %}
15377   ins_pipe(pipe_cmp_branch);
15378 %}
15379 
15380 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
15381   match(If cmp (CmpU op1 op2));
15382   effect(USE labl);
15383 
15384   ins_cost(BRANCH_COST);
15385   format %{ "cbw$cmp   $op1, $labl" %}
15386   ins_encode %{
15387     Label* L = $labl$$label;
15388     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15389     if (cond == Assembler::EQ || cond == Assembler::LS)
15390       __ cbzw($op1$$Register, *L);
15391     else
15392       __ cbnzw($op1$$Register, *L);
15393   %}
15394   ins_pipe(pipe_cmp_branch);
15395 %}
15396 
15397 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
15398   match(If cmp (CmpUL op1 op2));
15399   effect(USE labl);
15400 
15401   ins_cost(BRANCH_COST);
15402   format %{ "cb$cmp   $op1, $labl" %}
15403   ins_encode %{
15404     Label* L = $labl$$label;
15405     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15406     if (cond == Assembler::EQ || cond == Assembler::LS)
15407       __ cbz($op1$$Register, *L);
15408     else
15409       __ cbnz($op1$$Register, *L);
15410   %}
15411   ins_pipe(pipe_cmp_branch);
15412 %}
15413 
15414 // Test bit and Branch
15415 
15416 // Patterns for short (< 32KiB) variants
15417 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
15418   match(If cmp (CmpL op1 op2));
15419   effect(USE labl);
15420 
15421   ins_cost(BRANCH_COST);
15422   format %{ "cb$cmp   $op1, $labl # long" %}
15423   ins_encode %{
15424     Label* L = $labl$$label;
15425     Assembler::Condition cond =
15426       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15427     __ tbr(cond, $op1$$Register, 63, *L);
15428   %}
15429   ins_pipe(pipe_cmp_branch);
15430   ins_short_branch(1);
15431 %}
15432 
15433 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15434   match(If cmp (CmpI op1 op2));
15435   effect(USE labl);
15436 
15437   ins_cost(BRANCH_COST);
15438   format %{ "cb$cmp   $op1, $labl # int" %}
15439   ins_encode %{
15440     Label* L = $labl$$label;
15441     Assembler::Condition cond =
15442       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15443     __ tbr(cond, $op1$$Register, 31, *L);
15444   %}
15445   ins_pipe(pipe_cmp_branch);
15446   ins_short_branch(1);
15447 %}
15448 
15449 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
15450   match(If cmp (CmpL (AndL op1 op2) op3));
15451   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
15452   effect(USE labl);
15453 
15454   ins_cost(BRANCH_COST);
15455   format %{ "tb$cmp   $op1, $op2, $labl" %}
15456   ins_encode %{
15457     Label* L = $labl$$label;
15458     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15459     int bit = exact_log2($op2$$constant);
15460     __ tbr(cond, $op1$$Register, bit, *L);
15461   %}
15462   ins_pipe(pipe_cmp_branch);
15463   ins_short_branch(1);
15464 %}
15465 
15466 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
15467   match(If cmp (CmpI (AndI op1 op2) op3));
15468   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
15469   effect(USE labl);
15470 
15471   ins_cost(BRANCH_COST);
15472   format %{ "tb$cmp   $op1, $op2, $labl" %}
15473   ins_encode %{
15474     Label* L = $labl$$label;
15475     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15476     int bit = exact_log2($op2$$constant);
15477     __ tbr(cond, $op1$$Register, bit, *L);
15478   %}
15479   ins_pipe(pipe_cmp_branch);
15480   ins_short_branch(1);
15481 %}
15482 
15483 // And far variants
15484 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
15485   match(If cmp (CmpL op1 op2));
15486   effect(USE labl);
15487 
15488   ins_cost(BRANCH_COST);
15489   format %{ "cb$cmp   $op1, $labl # long" %}
15490   ins_encode %{
15491     Label* L = $labl$$label;
15492     Assembler::Condition cond =
15493       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15494     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
15495   %}
15496   ins_pipe(pipe_cmp_branch);
15497 %}
15498 
15499 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15500   match(If cmp (CmpI op1 op2));
15501   effect(USE labl);
15502 
15503   ins_cost(BRANCH_COST);
15504   format %{ "cb$cmp   $op1, $labl # int" %}
15505   ins_encode %{
15506     Label* L = $labl$$label;
15507     Assembler::Condition cond =
15508       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15509     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
15510   %}
15511   ins_pipe(pipe_cmp_branch);
15512 %}
15513 
15514 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
15515   match(If cmp (CmpL (AndL op1 op2) op3));
15516   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
15517   effect(USE labl);
15518 
15519   ins_cost(BRANCH_COST);
15520   format %{ "tb$cmp   $op1, $op2, $labl" %}
15521   ins_encode %{
15522     Label* L = $labl$$label;
15523     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15524     int bit = exact_log2($op2$$constant);
15525     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
15526   %}
15527   ins_pipe(pipe_cmp_branch);
15528 %}
15529 
15530 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
15531   match(If cmp (CmpI (AndI op1 op2) op3));
15532   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
15533   effect(USE labl);
15534 
15535   ins_cost(BRANCH_COST);
15536   format %{ "tb$cmp   $op1, $op2, $labl" %}
15537   ins_encode %{
15538     Label* L = $labl$$label;
15539     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15540     int bit = exact_log2($op2$$constant);
15541     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
15542   %}
15543   ins_pipe(pipe_cmp_branch);
15544 %}
15545 
15546 // Test bits
15547 
15548 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
15549   match(Set cr (CmpL (AndL op1 op2) op3));
15550   predicate(Assembler::operand_valid_for_logical_immediate
15551             (/*is_32*/false, n->in(1)->in(2)->get_long()));
15552 
15553   ins_cost(INSN_COST);
15554   format %{ "tst $op1, $op2 # long" %}
15555   ins_encode %{
15556     __ tst($op1$$Register, $op2$$constant);
15557   %}
15558   ins_pipe(ialu_reg_reg);
15559 %}
15560 
15561 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
15562   match(Set cr (CmpI (AndI op1 op2) op3));
15563   predicate(Assembler::operand_valid_for_logical_immediate
15564             (/*is_32*/true, n->in(1)->in(2)->get_int()));
15565 
15566   ins_cost(INSN_COST);
15567   format %{ "tst $op1, $op2 # int" %}
15568   ins_encode %{
15569     __ tstw($op1$$Register, $op2$$constant);
15570   %}
15571   ins_pipe(ialu_reg_reg);
15572 %}
15573 
15574 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
15575   match(Set cr (CmpL (AndL op1 op2) op3));
15576 
15577   ins_cost(INSN_COST);
15578   format %{ "tst $op1, $op2 # long" %}
15579   ins_encode %{
15580     __ tst($op1$$Register, $op2$$Register);
15581   %}
15582   ins_pipe(ialu_reg_reg);
15583 %}
15584 
15585 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
15586   match(Set cr (CmpI (AndI op1 op2) op3));
15587 
15588   ins_cost(INSN_COST);
15589   format %{ "tstw $op1, $op2 # int" %}
15590   ins_encode %{
15591     __ tstw($op1$$Register, $op2$$Register);
15592   %}
15593   ins_pipe(ialu_reg_reg);
15594 %}
15595 
15596 
15597 // Conditional Far Branch
15598 // Conditional Far Branch Unsigned
15599 // TODO: fixme
15600 
15601 // counted loop end branch near
15602 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
15603 %{
15604   match(CountedLoopEnd cmp cr);
15605 
15606   effect(USE lbl);
15607 
15608   ins_cost(BRANCH_COST);
15609   // short variant.
15610   // ins_short_branch(1);
15611   format %{ "b$cmp $lbl \t// counted loop end" %}
15612 
15613   ins_encode(aarch64_enc_br_con(cmp, lbl));
15614 
15615   ins_pipe(pipe_branch);
15616 %}
15617 
15618 // counted loop end branch near Unsigned
15619 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
15620 %{
15621   match(CountedLoopEnd cmp cr);
15622 
15623   effect(USE lbl);
15624 
15625   ins_cost(BRANCH_COST);
15626   // short variant.
15627   // ins_short_branch(1);
15628   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
15629 
15630   ins_encode(aarch64_enc_br_conU(cmp, lbl));
15631 
15632   ins_pipe(pipe_branch);
15633 %}
15634 
15635 // counted loop end branch far
15636 // counted loop end branch far unsigned
15637 // TODO: fixme
15638 
15639 // ============================================================================
15640 // inlined locking and unlocking
15641 
15642 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
15643 %{
15644   match(Set cr (FastLock object box));
15645   effect(TEMP tmp, TEMP tmp2);
15646 
15647   // TODO
15648   // identify correct cost
15649   ins_cost(5 * INSN_COST);
15650   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
15651 
15652   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
15653 
15654   ins_pipe(pipe_serial);
15655 %}
15656 
15657 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
15658 %{
15659   match(Set cr (FastUnlock object box));
15660   effect(TEMP tmp, TEMP tmp2);
15661 
15662   ins_cost(5 * INSN_COST);
15663   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
15664 
15665   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
15666 
15667   ins_pipe(pipe_serial);
15668 %}
15669 
15670 
15671 // ============================================================================
15672 // Safepoint Instructions
15673 
15674 // TODO
15675 // provide a near and far version of this code
15676 
15677 instruct safePoint(iRegP poll)
15678 %{
15679   match(SafePoint poll);
15680 
15681   format %{
15682     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
15683   %}
15684   ins_encode %{
15685     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
15686   %}
15687   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
15688 %}
15689 
15690 
15691 // ============================================================================
15692 // Procedure Call/Return Instructions
15693 
15694 // Call Java Static Instruction
15695 
15696 instruct CallStaticJavaDirect(method meth)
15697 %{
15698   match(CallStaticJava);
15699 
15700   effect(USE meth);
15701 
15702   ins_cost(CALL_COST);
15703 
15704   format %{ "call,static $meth \t// ==> " %}
15705 
15706   ins_encode( aarch64_enc_java_static_call(meth),
15707               aarch64_enc_call_epilog );
15708 
15709   ins_pipe(pipe_class_call);
15710 %}
15711 
15712 // TO HERE
15713 
15714 // Call Java Dynamic Instruction
15715 instruct CallDynamicJavaDirect(method meth)
15716 %{
15717   match(CallDynamicJava);
15718 
15719   effect(USE meth);
15720 
15721   ins_cost(CALL_COST);
15722 
15723   format %{ "CALL,dynamic $meth \t// ==> " %}
15724 
15725   ins_encode( aarch64_enc_java_dynamic_call(meth),
15726                aarch64_enc_call_epilog );
15727 
15728   ins_pipe(pipe_class_call);
15729 %}
15730 
15731 // Call Runtime Instruction
15732 
15733 instruct CallRuntimeDirect(method meth)
15734 %{
15735   match(CallRuntime);
15736 
15737   effect(USE meth);
15738 
15739   ins_cost(CALL_COST);
15740 
15741   format %{ "CALL, runtime $meth" %}
15742 
15743   ins_encode( aarch64_enc_java_to_runtime(meth) );
15744 
15745   ins_pipe(pipe_class_call);
15746 %}
15747 
15748 // Call Runtime Instruction
15749 
15750 instruct CallLeafDirect(method meth)
15751 %{
15752   match(CallLeaf);
15753 
15754   effect(USE meth);
15755 
15756   ins_cost(CALL_COST);
15757 
15758   format %{ "CALL, runtime leaf $meth" %}
15759 
15760   ins_encode( aarch64_enc_java_to_runtime(meth) );
15761 
15762   ins_pipe(pipe_class_call);
15763 %}
15764 
15765 // Call Runtime Instruction
15766 
15767 instruct CallLeafNoFPDirect(method meth)
15768 %{
15769   match(CallLeafNoFP);
15770 
15771   effect(USE meth);
15772 
15773   ins_cost(CALL_COST);
15774 
15775   format %{ "CALL, runtime leaf nofp $meth" %}
15776 
15777   ins_encode( aarch64_enc_java_to_runtime(meth) );
15778 
15779   ins_pipe(pipe_class_call);
15780 %}
15781 
15782 // Tail Call; Jump from runtime stub to Java code.
15783 // Also known as an 'interprocedural jump'.
15784 // Target of jump will eventually return to caller.
15785 // TailJump below removes the return address.
15786 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
15787 %{
15788   match(TailCall jump_target method_oop);
15789 
15790   ins_cost(CALL_COST);
15791 
15792   format %{ "br $jump_target\t# $method_oop holds method oop" %}
15793 
15794   ins_encode(aarch64_enc_tail_call(jump_target));
15795 
15796   ins_pipe(pipe_class_call);
15797 %}
15798 
15799 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
15800 %{
15801   match(TailJump jump_target ex_oop);
15802 
15803   ins_cost(CALL_COST);
15804 
15805   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
15806 
15807   ins_encode(aarch64_enc_tail_jmp(jump_target));
15808 
15809   ins_pipe(pipe_class_call);
15810 %}
15811 
15812 // Create exception oop: created by stack-crawling runtime code.
15813 // Created exception is now available to this handler, and is setup
15814 // just prior to jumping to this handler. No code emitted.
15815 // TODO check
15816 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
15817 instruct CreateException(iRegP_R0 ex_oop)
15818 %{
15819   match(Set ex_oop (CreateEx));
15820 
15821   format %{ " -- \t// exception oop; no code emitted" %}
15822 
15823   size(0);
15824 
15825   ins_encode( /*empty*/ );
15826 
15827   ins_pipe(pipe_class_empty);
15828 %}
15829 
15830 // Rethrow exception: The exception oop will come in the first
15831 // argument position. Then JUMP (not call) to the rethrow stub code.
15832 instruct RethrowException() %{
15833   match(Rethrow);
15834   ins_cost(CALL_COST);
15835 
15836   format %{ "b rethrow_stub" %}
15837 
15838   ins_encode( aarch64_enc_rethrow() );
15839 
15840   ins_pipe(pipe_class_call);
15841 %}
15842 
15843 
15844 // Return Instruction
15845 // epilog node loads ret address into lr as part of frame pop
15846 instruct Ret()
15847 %{
15848   match(Return);
15849 
15850   format %{ "ret\t// return register" %}
15851 
15852   ins_encode( aarch64_enc_ret() );
15853 
15854   ins_pipe(pipe_branch);
15855 %}
15856 
15857 // Die now.
15858 instruct ShouldNotReachHere() %{
15859   match(Halt);
15860 
15861   ins_cost(CALL_COST);
15862   format %{ "ShouldNotReachHere" %}
15863 
15864   ins_encode %{
15865     // +1 so NativeInstruction::is_sigill_zombie_not_entrant() doesn't
15866     // return true
15867     __ dpcs1(0xdead + 1);
15868   %}
15869 
15870   ins_pipe(pipe_class_default);
15871 %}
15872 
15873 // ============================================================================
15874 // Partial Subtype Check
15875 //
15876 // superklass array for an instance of the superklass.  Set a hidden
15877 // internal cache on a hit (cache is checked with exposed code in
15878 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
15879 // encoding ALSO sets flags.
15880 
15881 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
15882 %{
15883   match(Set result (PartialSubtypeCheck sub super));
15884   effect(KILL cr, KILL temp);
15885 
15886   ins_cost(1100);  // slightly larger than the next version
15887   format %{ "partialSubtypeCheck $result, $sub, $super" %}
15888 
15889   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
15890 
15891   opcode(0x1); // Force zero of result reg on hit
15892 
15893   ins_pipe(pipe_class_memory);
15894 %}
15895 
15896 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
15897 %{
15898   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
15899   effect(KILL temp, KILL result);
15900 
15901   ins_cost(1100);  // slightly larger than the next version
15902   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
15903 
15904   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
15905 
15906   opcode(0x0); // Don't zero result reg on hit
15907 
15908   ins_pipe(pipe_class_memory);
15909 %}
15910 
15911 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15912                         iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
15913 %{
15914   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
15915   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15916   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15917 
15918   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15919   ins_encode %{
15920     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15921     __ string_compare($str1$$Register, $str2$$Register,
15922                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15923                       $tmp1$$Register,
15924                       fnoreg, fnoreg, StrIntrinsicNode::UU);
15925   %}
15926   ins_pipe(pipe_class_memory);
15927 %}
15928 
15929 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15930                         iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
15931 %{
15932   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
15933   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15934   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15935 
15936   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15937   ins_encode %{
15938     __ string_compare($str1$$Register, $str2$$Register,
15939                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15940                       $tmp1$$Register,
15941                       fnoreg, fnoreg, StrIntrinsicNode::LL);
15942   %}
15943   ins_pipe(pipe_class_memory);
15944 %}
15945 
15946 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15947                         iRegI_R0 result, vRegD vtmp1, vRegD vtmp2, iRegP_R10 tmp1, rFlagsReg cr)
15948 %{
15949   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
15950   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15951   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP vtmp1, TEMP vtmp2, KILL cr);
15952 
15953   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15954   ins_encode %{
15955     __ string_compare($str1$$Register, $str2$$Register,
15956                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15957                       $tmp1$$Register,
15958                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, StrIntrinsicNode::UL);
15959   %}
15960   ins_pipe(pipe_class_memory);
15961 %}
15962 
15963 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15964                         iRegI_R0 result, vRegD vtmp1, vRegD vtmp2, iRegP_R10 tmp1, rFlagsReg cr)
15965 %{
15966   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
15967   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15968   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP vtmp1, TEMP vtmp2, KILL cr);
15969 
15970   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15971   ins_encode %{
15972     __ string_compare($str1$$Register, $str2$$Register,
15973                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15974                       $tmp1$$Register,
15975                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, StrIntrinsicNode::LU);
15976   %}
15977   ins_pipe(pipe_class_memory);
15978 %}
15979 
15980 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15981        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15982 %{
15983   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
15984   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15985   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15986          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15987   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
15988 
15989   ins_encode %{
15990     __ string_indexof($str1$$Register, $str2$$Register,
15991                       $cnt1$$Register, $cnt2$$Register,
15992                       $tmp1$$Register, $tmp2$$Register,
15993                       $tmp3$$Register, $tmp4$$Register,
15994                       -1, $result$$Register, StrIntrinsicNode::UU);
15995   %}
15996   ins_pipe(pipe_class_memory);
15997 %}
15998 
15999 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16000        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16001 %{
16002   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16003   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16004   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16005          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16006   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
16007 
16008   ins_encode %{
16009     __ string_indexof($str1$$Register, $str2$$Register,
16010                       $cnt1$$Register, $cnt2$$Register,
16011                       $tmp1$$Register, $tmp2$$Register,
16012                       $tmp3$$Register, $tmp4$$Register,
16013                       -1, $result$$Register, StrIntrinsicNode::LL);
16014   %}
16015   ins_pipe(pipe_class_memory);
16016 %}
16017 
16018 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16019        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16020 %{
16021   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16022   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16023   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16024          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16025   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
16026 
16027   ins_encode %{
16028     __ string_indexof($str1$$Register, $str2$$Register,
16029                       $cnt1$$Register, $cnt2$$Register,
16030                       $tmp1$$Register, $tmp2$$Register,
16031                       $tmp3$$Register, $tmp4$$Register,
16032                       -1, $result$$Register, StrIntrinsicNode::UL);
16033   %}
16034   ins_pipe(pipe_class_memory);
16035 %}
16036 
16037 instruct string_indexofLU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16038        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16039 %{
16040   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LU);
16041   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16042   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16043          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16044   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LU)" %}
16045 
16046   ins_encode %{
16047     __ string_indexof($str1$$Register, $str2$$Register,
16048                       $cnt1$$Register, $cnt2$$Register,
16049                       $tmp1$$Register, $tmp2$$Register,
16050                       $tmp3$$Register, $tmp4$$Register,
16051                       -1, $result$$Register, StrIntrinsicNode::LU);
16052   %}
16053   ins_pipe(pipe_class_memory);
16054 %}
16055 
16056 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16057                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16058                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16059 %{
16060   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16061   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16062   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16063          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16064   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
16065 
16066   ins_encode %{
16067     int icnt2 = (int)$int_cnt2$$constant;
16068     __ string_indexof($str1$$Register, $str2$$Register,
16069                       $cnt1$$Register, zr,
16070                       $tmp1$$Register, $tmp2$$Register,
16071                       $tmp3$$Register, $tmp4$$Register,
16072                       icnt2, $result$$Register, StrIntrinsicNode::UU);
16073   %}
16074   ins_pipe(pipe_class_memory);
16075 %}
16076 
16077 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16078                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16079                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16080 %{
16081   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16082   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16083   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16084          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16085   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
16086 
16087   ins_encode %{
16088     int icnt2 = (int)$int_cnt2$$constant;
16089     __ string_indexof($str1$$Register, $str2$$Register,
16090                       $cnt1$$Register, zr,
16091                       $tmp1$$Register, $tmp2$$Register,
16092                       $tmp3$$Register, $tmp4$$Register,
16093                       icnt2, $result$$Register, StrIntrinsicNode::LL);
16094   %}
16095   ins_pipe(pipe_class_memory);
16096 %}
16097 
16098 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16099                  immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16100                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16101 %{
16102   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16103   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16104   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16105          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16106   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
16107 
16108   ins_encode %{
16109     int icnt2 = (int)$int_cnt2$$constant;
16110     __ string_indexof($str1$$Register, $str2$$Register,
16111                       $cnt1$$Register, zr,
16112                       $tmp1$$Register, $tmp2$$Register,
16113                       $tmp3$$Register, $tmp4$$Register,
16114                       icnt2, $result$$Register, StrIntrinsicNode::UL);
16115   %}
16116   ins_pipe(pipe_class_memory);
16117 %}
16118 
16119 instruct string_indexof_conLU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16120                  immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16121                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16122 %{
16123   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LU);
16124   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16125   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16126          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16127   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LU)" %}
16128 
16129   ins_encode %{
16130     int icnt2 = (int)$int_cnt2$$constant;
16131     __ string_indexof($str1$$Register, $str2$$Register,
16132                       $cnt1$$Register, zr,
16133                       $tmp1$$Register, $tmp2$$Register,
16134                       $tmp3$$Register, $tmp4$$Register,
16135                       icnt2, $result$$Register, StrIntrinsicNode::LU);
16136   %}
16137   ins_pipe(pipe_class_memory);
16138 %}
16139 
16140 instruct string_indexofU_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16141                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16142                               iRegINoSp tmp3, rFlagsReg cr)
16143 %{
16144   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
16145   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
16146          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16147 
16148   format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result" %}
16149 
16150   ins_encode %{
16151     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
16152                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
16153                            $tmp3$$Register);
16154   %}
16155   ins_pipe(pipe_class_memory);
16156 %}
16157 
16158 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
16159                         iRegI_R0 result, rFlagsReg cr)
16160 %{
16161   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
16162   match(Set result (StrEquals (Binary str1 str2) cnt));
16163   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
16164 
16165   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
16166   ins_encode %{
16167     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16168     __ arrays_equals($str1$$Register, $str2$$Register,
16169                      $result$$Register, $cnt$$Register,
16170                      1, /*is_string*/true);
16171   %}
16172   ins_pipe(pipe_class_memory);
16173 %}
16174 
16175 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
16176                         iRegI_R0 result, rFlagsReg cr)
16177 %{
16178   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
16179   match(Set result (StrEquals (Binary str1 str2) cnt));
16180   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
16181 
16182   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
16183   ins_encode %{
16184     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16185     __ asrw($cnt$$Register, $cnt$$Register, 1);
16186     __ arrays_equals($str1$$Register, $str2$$Register,
16187                      $result$$Register, $cnt$$Register,
16188                      2, /*is_string*/true);
16189   %}
16190   ins_pipe(pipe_class_memory);
16191 %}
16192 
16193 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
16194                       iRegP_R10 tmp, rFlagsReg cr)
16195 %{
16196   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
16197   match(Set result (AryEq ary1 ary2));
16198   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
16199 
16200   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
16201   ins_encode %{
16202     __ arrays_equals($ary1$$Register, $ary2$$Register,
16203                      $result$$Register, $tmp$$Register,
16204                      1, /*is_string*/false);
16205     %}
16206   ins_pipe(pipe_class_memory);
16207 %}
16208 
16209 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
16210                       iRegP_R10 tmp, rFlagsReg cr)
16211 %{
16212   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
16213   match(Set result (AryEq ary1 ary2));
16214   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
16215 
16216   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
16217   ins_encode %{
16218     __ arrays_equals($ary1$$Register, $ary2$$Register,
16219                      $result$$Register, $tmp$$Register,
16220                      2, /*is_string*/false);
16221   %}
16222   ins_pipe(pipe_class_memory);
16223 %}
16224 
16225 instruct has_negatives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
16226 %{
16227   match(Set result (HasNegatives ary1 len));
16228   effect(USE_KILL ary1, USE_KILL len, KILL cr);
16229   format %{ "has negatives byte[] $ary1,$len -> $result" %}
16230   ins_encode %{
16231     __ has_negatives($ary1$$Register, $len$$Register, $result$$Register);
16232   %}
16233   ins_pipe( pipe_slow );
16234 %}
16235 
16236 // fast char[] to byte[] compression
16237 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
16238                          vRegD_V0 tmp1, vRegD_V1 tmp2,
16239                          vRegD_V2 tmp3, vRegD_V3 tmp4,
16240                          iRegI_R0 result, rFlagsReg cr)
16241 %{
16242   match(Set result (StrCompressedCopy src (Binary dst len)));
16243   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
16244 
16245   format %{ "String Compress $src,$dst -> $result    // KILL R1, R2, R3, R4" %}
16246   ins_encode %{
16247     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
16248                            $tmp1$$FloatRegister, $tmp2$$FloatRegister,
16249                            $tmp3$$FloatRegister, $tmp4$$FloatRegister,
16250                            $result$$Register);
16251   %}
16252   ins_pipe( pipe_slow );
16253 %}
16254 
16255 // fast byte[] to char[] inflation
16256 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len,
16257                         vRegD tmp1, vRegD tmp2, vRegD tmp3, iRegP_R3 tmp4, rFlagsReg cr)
16258 %{
16259   match(Set dummy (StrInflatedCopy src (Binary dst len)));
16260   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
16261 
16262   format %{ "String Inflate $src,$dst    // KILL $tmp1, $tmp2" %}
16263   ins_encode %{
16264     __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
16265                           $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister, $tmp4$$Register);
16266   %}
16267   ins_pipe(pipe_class_memory);
16268 %}
16269 
16270 // encode char[] to byte[] in ISO_8859_1
16271 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
16272                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
16273                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
16274                           iRegI_R0 result, rFlagsReg cr)
16275 %{
16276   match(Set result (EncodeISOArray src (Binary dst len)));
16277   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
16278          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
16279 
16280   format %{ "Encode array $src,$dst,$len -> $result" %}
16281   ins_encode %{
16282     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
16283          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
16284          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
16285   %}
16286   ins_pipe( pipe_class_memory );
16287 %}
16288 
16289 // ============================================================================
16290 // This name is KNOWN by the ADLC and cannot be changed.
16291 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
16292 // for this guy.
16293 instruct tlsLoadP(thread_RegP dst)
16294 %{
16295   match(Set dst (ThreadLocal));
16296 
16297   ins_cost(0);
16298 
16299   format %{ " -- \t// $dst=Thread::current(), empty" %}
16300 
16301   size(0);
16302 
16303   ins_encode( /*empty*/ );
16304 
16305   ins_pipe(pipe_class_empty);
16306 %}
16307 
16308 // ====================VECTOR INSTRUCTIONS=====================================
16309 
16310 // Load vector (32 bits)
16311 instruct loadV4(vecD dst, vmem4 mem)
16312 %{
16313   predicate(n->as_LoadVector()->memory_size() == 4);
16314   match(Set dst (LoadVector mem));
16315   ins_cost(4 * INSN_COST);
16316   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
16317   ins_encode( aarch64_enc_ldrvS(dst, mem) );
16318   ins_pipe(vload_reg_mem64);
16319 %}
16320 
16321 // Load vector (64 bits)
16322 instruct loadV8(vecD dst, vmem8 mem)
16323 %{
16324   predicate(n->as_LoadVector()->memory_size() == 8);
16325   match(Set dst (LoadVector mem));
16326   ins_cost(4 * INSN_COST);
16327   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
16328   ins_encode( aarch64_enc_ldrvD(dst, mem) );
16329   ins_pipe(vload_reg_mem64);
16330 %}
16331 
16332 // Load Vector (128 bits)
16333 instruct loadV16(vecX dst, vmem16 mem)
16334 %{
16335   predicate(n->as_LoadVector()->memory_size() == 16);
16336   match(Set dst (LoadVector mem));
16337   ins_cost(4 * INSN_COST);
16338   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
16339   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
16340   ins_pipe(vload_reg_mem128);
16341 %}
16342 
16343 // Store Vector (32 bits)
16344 instruct storeV4(vecD src, vmem4 mem)
16345 %{
16346   predicate(n->as_StoreVector()->memory_size() == 4);
16347   match(Set mem (StoreVector mem src));
16348   ins_cost(4 * INSN_COST);
16349   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
16350   ins_encode( aarch64_enc_strvS(src, mem) );
16351   ins_pipe(vstore_reg_mem64);
16352 %}
16353 
16354 // Store Vector (64 bits)
16355 instruct storeV8(vecD src, vmem8 mem)
16356 %{
16357   predicate(n->as_StoreVector()->memory_size() == 8);
16358   match(Set mem (StoreVector mem src));
16359   ins_cost(4 * INSN_COST);
16360   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
16361   ins_encode( aarch64_enc_strvD(src, mem) );
16362   ins_pipe(vstore_reg_mem64);
16363 %}
16364 
16365 // Store Vector (128 bits)
16366 instruct storeV16(vecX src, vmem16 mem)
16367 %{
16368   predicate(n->as_StoreVector()->memory_size() == 16);
16369   match(Set mem (StoreVector mem src));
16370   ins_cost(4 * INSN_COST);
16371   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
16372   ins_encode( aarch64_enc_strvQ(src, mem) );
16373   ins_pipe(vstore_reg_mem128);
16374 %}
16375 
16376 instruct replicate8B(vecD dst, iRegIorL2I src)
16377 %{
16378   predicate(n->as_Vector()->length() == 4 ||
16379             n->as_Vector()->length() == 8);
16380   match(Set dst (ReplicateB src));
16381   ins_cost(INSN_COST);
16382   format %{ "dup  $dst, $src\t# vector (8B)" %}
16383   ins_encode %{
16384     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
16385   %}
16386   ins_pipe(vdup_reg_reg64);
16387 %}
16388 
16389 instruct replicate16B(vecX dst, iRegIorL2I src)
16390 %{
16391   predicate(n->as_Vector()->length() == 16);
16392   match(Set dst (ReplicateB src));
16393   ins_cost(INSN_COST);
16394   format %{ "dup  $dst, $src\t# vector (16B)" %}
16395   ins_encode %{
16396     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
16397   %}
16398   ins_pipe(vdup_reg_reg128);
16399 %}
16400 
16401 instruct replicate8B_imm(vecD dst, immI con)
16402 %{
16403   predicate(n->as_Vector()->length() == 4 ||
16404             n->as_Vector()->length() == 8);
16405   match(Set dst (ReplicateB con));
16406   ins_cost(INSN_COST);
16407   format %{ "movi  $dst, $con\t# vector(8B)" %}
16408   ins_encode %{
16409     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
16410   %}
16411   ins_pipe(vmovi_reg_imm64);
16412 %}
16413 
16414 instruct replicate16B_imm(vecX dst, immI con)
16415 %{
16416   predicate(n->as_Vector()->length() == 16);
16417   match(Set dst (ReplicateB con));
16418   ins_cost(INSN_COST);
16419   format %{ "movi  $dst, $con\t# vector(16B)" %}
16420   ins_encode %{
16421     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
16422   %}
16423   ins_pipe(vmovi_reg_imm128);
16424 %}
16425 
16426 instruct replicate4S(vecD dst, iRegIorL2I src)
16427 %{
16428   predicate(n->as_Vector()->length() == 2 ||
16429             n->as_Vector()->length() == 4);
16430   match(Set dst (ReplicateS src));
16431   ins_cost(INSN_COST);
16432   format %{ "dup  $dst, $src\t# vector (4S)" %}
16433   ins_encode %{
16434     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
16435   %}
16436   ins_pipe(vdup_reg_reg64);
16437 %}
16438 
16439 instruct replicate8S(vecX dst, iRegIorL2I src)
16440 %{
16441   predicate(n->as_Vector()->length() == 8);
16442   match(Set dst (ReplicateS src));
16443   ins_cost(INSN_COST);
16444   format %{ "dup  $dst, $src\t# vector (8S)" %}
16445   ins_encode %{
16446     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
16447   %}
16448   ins_pipe(vdup_reg_reg128);
16449 %}
16450 
16451 instruct replicate4S_imm(vecD dst, immI con)
16452 %{
16453   predicate(n->as_Vector()->length() == 2 ||
16454             n->as_Vector()->length() == 4);
16455   match(Set dst (ReplicateS con));
16456   ins_cost(INSN_COST);
16457   format %{ "movi  $dst, $con\t# vector(4H)" %}
16458   ins_encode %{
16459     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
16460   %}
16461   ins_pipe(vmovi_reg_imm64);
16462 %}
16463 
16464 instruct replicate8S_imm(vecX dst, immI con)
16465 %{
16466   predicate(n->as_Vector()->length() == 8);
16467   match(Set dst (ReplicateS con));
16468   ins_cost(INSN_COST);
16469   format %{ "movi  $dst, $con\t# vector(8H)" %}
16470   ins_encode %{
16471     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
16472   %}
16473   ins_pipe(vmovi_reg_imm128);
16474 %}
16475 
16476 instruct replicate2I(vecD dst, iRegIorL2I src)
16477 %{
16478   predicate(n->as_Vector()->length() == 2);
16479   match(Set dst (ReplicateI src));
16480   ins_cost(INSN_COST);
16481   format %{ "dup  $dst, $src\t# vector (2I)" %}
16482   ins_encode %{
16483     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
16484   %}
16485   ins_pipe(vdup_reg_reg64);
16486 %}
16487 
16488 instruct replicate4I(vecX dst, iRegIorL2I src)
16489 %{
16490   predicate(n->as_Vector()->length() == 4);
16491   match(Set dst (ReplicateI src));
16492   ins_cost(INSN_COST);
16493   format %{ "dup  $dst, $src\t# vector (4I)" %}
16494   ins_encode %{
16495     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
16496   %}
16497   ins_pipe(vdup_reg_reg128);
16498 %}
16499 
16500 instruct replicate2I_imm(vecD dst, immI con)
16501 %{
16502   predicate(n->as_Vector()->length() == 2);
16503   match(Set dst (ReplicateI con));
16504   ins_cost(INSN_COST);
16505   format %{ "movi  $dst, $con\t# vector(2I)" %}
16506   ins_encode %{
16507     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
16508   %}
16509   ins_pipe(vmovi_reg_imm64);
16510 %}
16511 
16512 instruct replicate4I_imm(vecX dst, immI con)
16513 %{
16514   predicate(n->as_Vector()->length() == 4);
16515   match(Set dst (ReplicateI con));
16516   ins_cost(INSN_COST);
16517   format %{ "movi  $dst, $con\t# vector(4I)" %}
16518   ins_encode %{
16519     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
16520   %}
16521   ins_pipe(vmovi_reg_imm128);
16522 %}
16523 
16524 instruct replicate2L(vecX dst, iRegL src)
16525 %{
16526   predicate(n->as_Vector()->length() == 2);
16527   match(Set dst (ReplicateL src));
16528   ins_cost(INSN_COST);
16529   format %{ "dup  $dst, $src\t# vector (2L)" %}
16530   ins_encode %{
16531     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
16532   %}
16533   ins_pipe(vdup_reg_reg128);
16534 %}
16535 
16536 instruct replicate2L_zero(vecX dst, immI0 zero)
16537 %{
16538   predicate(n->as_Vector()->length() == 2);
16539   match(Set dst (ReplicateI zero));
16540   ins_cost(INSN_COST);
16541   format %{ "movi  $dst, $zero\t# vector(4I)" %}
16542   ins_encode %{
16543     __ eor(as_FloatRegister($dst$$reg), __ T16B,
16544            as_FloatRegister($dst$$reg),
16545            as_FloatRegister($dst$$reg));
16546   %}
16547   ins_pipe(vmovi_reg_imm128);
16548 %}
16549 
16550 instruct replicate2F(vecD dst, vRegF src)
16551 %{
16552   predicate(n->as_Vector()->length() == 2);
16553   match(Set dst (ReplicateF src));
16554   ins_cost(INSN_COST);
16555   format %{ "dup  $dst, $src\t# vector (2F)" %}
16556   ins_encode %{
16557     __ dup(as_FloatRegister($dst$$reg), __ T2S,
16558            as_FloatRegister($src$$reg));
16559   %}
16560   ins_pipe(vdup_reg_freg64);
16561 %}
16562 
16563 instruct replicate4F(vecX dst, vRegF src)
16564 %{
16565   predicate(n->as_Vector()->length() == 4);
16566   match(Set dst (ReplicateF src));
16567   ins_cost(INSN_COST);
16568   format %{ "dup  $dst, $src\t# vector (4F)" %}
16569   ins_encode %{
16570     __ dup(as_FloatRegister($dst$$reg), __ T4S,
16571            as_FloatRegister($src$$reg));
16572   %}
16573   ins_pipe(vdup_reg_freg128);
16574 %}
16575 
16576 instruct replicate2D(vecX dst, vRegD src)
16577 %{
16578   predicate(n->as_Vector()->length() == 2);
16579   match(Set dst (ReplicateD src));
16580   ins_cost(INSN_COST);
16581   format %{ "dup  $dst, $src\t# vector (2D)" %}
16582   ins_encode %{
16583     __ dup(as_FloatRegister($dst$$reg), __ T2D,
16584            as_FloatRegister($src$$reg));
16585   %}
16586   ins_pipe(vdup_reg_dreg128);
16587 %}
16588 
16589 // ====================REDUCTION ARITHMETIC====================================
16590 
16591 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp, iRegINoSp tmp2)
16592 %{
16593   match(Set dst (AddReductionVI src1 src2));
16594   ins_cost(INSN_COST);
16595   effect(TEMP tmp, TEMP tmp2);
16596   format %{ "umov  $tmp, $src2, S, 0\n\t"
16597             "umov  $tmp2, $src2, S, 1\n\t"
16598             "addw  $dst, $src1, $tmp\n\t"
16599             "addw  $dst, $dst, $tmp2\t add reduction2i"
16600   %}
16601   ins_encode %{
16602     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
16603     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
16604     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
16605     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
16606   %}
16607   ins_pipe(pipe_class_default);
16608 %}
16609 
16610 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
16611 %{
16612   match(Set dst (AddReductionVI src1 src2));
16613   ins_cost(INSN_COST);
16614   effect(TEMP tmp, TEMP tmp2);
16615   format %{ "addv  $tmp, T4S, $src2\n\t"
16616             "umov  $tmp2, $tmp, S, 0\n\t"
16617             "addw  $dst, $tmp2, $src1\t add reduction4i"
16618   %}
16619   ins_encode %{
16620     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
16621             as_FloatRegister($src2$$reg));
16622     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
16623     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
16624   %}
16625   ins_pipe(pipe_class_default);
16626 %}
16627 
16628 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
16629 %{
16630   match(Set dst (MulReductionVI src1 src2));
16631   ins_cost(INSN_COST);
16632   effect(TEMP tmp, TEMP dst);
16633   format %{ "umov  $tmp, $src2, S, 0\n\t"
16634             "mul   $dst, $tmp, $src1\n\t"
16635             "umov  $tmp, $src2, S, 1\n\t"
16636             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
16637   %}
16638   ins_encode %{
16639     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
16640     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
16641     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
16642     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
16643   %}
16644   ins_pipe(pipe_class_default);
16645 %}
16646 
16647 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
16648 %{
16649   match(Set dst (MulReductionVI src1 src2));
16650   ins_cost(INSN_COST);
16651   effect(TEMP tmp, TEMP tmp2, TEMP dst);
16652   format %{ "ins   $tmp, $src2, 0, 1\n\t"
16653             "mul   $tmp, $tmp, $src2\n\t"
16654             "umov  $tmp2, $tmp, S, 0\n\t"
16655             "mul   $dst, $tmp2, $src1\n\t"
16656             "umov  $tmp2, $tmp, S, 1\n\t"
16657             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
16658   %}
16659   ins_encode %{
16660     __ ins(as_FloatRegister($tmp$$reg), __ D,
16661            as_FloatRegister($src2$$reg), 0, 1);
16662     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
16663            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
16664     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
16665     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
16666     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
16667     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
16668   %}
16669   ins_pipe(pipe_class_default);
16670 %}
16671 
16672 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
16673 %{
16674   match(Set dst (AddReductionVF src1 src2));
16675   ins_cost(INSN_COST);
16676   effect(TEMP tmp, TEMP dst);
16677   format %{ "fadds $dst, $src1, $src2\n\t"
16678             "ins   $tmp, S, $src2, 0, 1\n\t"
16679             "fadds $dst, $dst, $tmp\t add reduction2f"
16680   %}
16681   ins_encode %{
16682     __ fadds(as_FloatRegister($dst$$reg),
16683              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16684     __ ins(as_FloatRegister($tmp$$reg), __ S,
16685            as_FloatRegister($src2$$reg), 0, 1);
16686     __ fadds(as_FloatRegister($dst$$reg),
16687              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16688   %}
16689   ins_pipe(pipe_class_default);
16690 %}
16691 
16692 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
16693 %{
16694   match(Set dst (AddReductionVF src1 src2));
16695   ins_cost(INSN_COST);
16696   effect(TEMP tmp, TEMP dst);
16697   format %{ "fadds $dst, $src1, $src2\n\t"
16698             "ins   $tmp, S, $src2, 0, 1\n\t"
16699             "fadds $dst, $dst, $tmp\n\t"
16700             "ins   $tmp, S, $src2, 0, 2\n\t"
16701             "fadds $dst, $dst, $tmp\n\t"
16702             "ins   $tmp, S, $src2, 0, 3\n\t"
16703             "fadds $dst, $dst, $tmp\t add reduction4f"
16704   %}
16705   ins_encode %{
16706     __ fadds(as_FloatRegister($dst$$reg),
16707              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16708     __ ins(as_FloatRegister($tmp$$reg), __ S,
16709            as_FloatRegister($src2$$reg), 0, 1);
16710     __ fadds(as_FloatRegister($dst$$reg),
16711              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16712     __ ins(as_FloatRegister($tmp$$reg), __ S,
16713            as_FloatRegister($src2$$reg), 0, 2);
16714     __ fadds(as_FloatRegister($dst$$reg),
16715              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16716     __ ins(as_FloatRegister($tmp$$reg), __ S,
16717            as_FloatRegister($src2$$reg), 0, 3);
16718     __ fadds(as_FloatRegister($dst$$reg),
16719              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16720   %}
16721   ins_pipe(pipe_class_default);
16722 %}
16723 
16724 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
16725 %{
16726   match(Set dst (MulReductionVF src1 src2));
16727   ins_cost(INSN_COST);
16728   effect(TEMP tmp, TEMP dst);
16729   format %{ "fmuls $dst, $src1, $src2\n\t"
16730             "ins   $tmp, S, $src2, 0, 1\n\t"
16731             "fmuls $dst, $dst, $tmp\t add reduction4f"
16732   %}
16733   ins_encode %{
16734     __ fmuls(as_FloatRegister($dst$$reg),
16735              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16736     __ ins(as_FloatRegister($tmp$$reg), __ S,
16737            as_FloatRegister($src2$$reg), 0, 1);
16738     __ fmuls(as_FloatRegister($dst$$reg),
16739              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16740   %}
16741   ins_pipe(pipe_class_default);
16742 %}
16743 
16744 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
16745 %{
16746   match(Set dst (MulReductionVF src1 src2));
16747   ins_cost(INSN_COST);
16748   effect(TEMP tmp, TEMP dst);
16749   format %{ "fmuls $dst, $src1, $src2\n\t"
16750             "ins   $tmp, S, $src2, 0, 1\n\t"
16751             "fmuls $dst, $dst, $tmp\n\t"
16752             "ins   $tmp, S, $src2, 0, 2\n\t"
16753             "fmuls $dst, $dst, $tmp\n\t"
16754             "ins   $tmp, S, $src2, 0, 3\n\t"
16755             "fmuls $dst, $dst, $tmp\t add reduction4f"
16756   %}
16757   ins_encode %{
16758     __ fmuls(as_FloatRegister($dst$$reg),
16759              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16760     __ ins(as_FloatRegister($tmp$$reg), __ S,
16761            as_FloatRegister($src2$$reg), 0, 1);
16762     __ fmuls(as_FloatRegister($dst$$reg),
16763              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16764     __ ins(as_FloatRegister($tmp$$reg), __ S,
16765            as_FloatRegister($src2$$reg), 0, 2);
16766     __ fmuls(as_FloatRegister($dst$$reg),
16767              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16768     __ ins(as_FloatRegister($tmp$$reg), __ S,
16769            as_FloatRegister($src2$$reg), 0, 3);
16770     __ fmuls(as_FloatRegister($dst$$reg),
16771              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16772   %}
16773   ins_pipe(pipe_class_default);
16774 %}
16775 
16776 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
16777 %{
16778   match(Set dst (AddReductionVD src1 src2));
16779   ins_cost(INSN_COST);
16780   effect(TEMP tmp, TEMP dst);
16781   format %{ "faddd $dst, $src1, $src2\n\t"
16782             "ins   $tmp, D, $src2, 0, 1\n\t"
16783             "faddd $dst, $dst, $tmp\t add reduction2d"
16784   %}
16785   ins_encode %{
16786     __ faddd(as_FloatRegister($dst$$reg),
16787              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16788     __ ins(as_FloatRegister($tmp$$reg), __ D,
16789            as_FloatRegister($src2$$reg), 0, 1);
16790     __ faddd(as_FloatRegister($dst$$reg),
16791              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16792   %}
16793   ins_pipe(pipe_class_default);
16794 %}
16795 
16796 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
16797 %{
16798   match(Set dst (MulReductionVD src1 src2));
16799   ins_cost(INSN_COST);
16800   effect(TEMP tmp, TEMP dst);
16801   format %{ "fmuld $dst, $src1, $src2\n\t"
16802             "ins   $tmp, D, $src2, 0, 1\n\t"
16803             "fmuld $dst, $dst, $tmp\t add reduction2d"
16804   %}
16805   ins_encode %{
16806     __ fmuld(as_FloatRegister($dst$$reg),
16807              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16808     __ ins(as_FloatRegister($tmp$$reg), __ D,
16809            as_FloatRegister($src2$$reg), 0, 1);
16810     __ fmuld(as_FloatRegister($dst$$reg),
16811              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16812   %}
16813   ins_pipe(pipe_class_default);
16814 %}
16815 
16816 // ====================VECTOR ARITHMETIC=======================================
16817 
16818 // --------------------------------- ADD --------------------------------------
16819 
16820 instruct vadd8B(vecD dst, vecD src1, vecD src2)
16821 %{
16822   predicate(n->as_Vector()->length() == 4 ||
16823             n->as_Vector()->length() == 8);
16824   match(Set dst (AddVB src1 src2));
16825   ins_cost(INSN_COST);
16826   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
16827   ins_encode %{
16828     __ addv(as_FloatRegister($dst$$reg), __ T8B,
16829             as_FloatRegister($src1$$reg),
16830             as_FloatRegister($src2$$reg));
16831   %}
16832   ins_pipe(vdop64);
16833 %}
16834 
16835 instruct vadd16B(vecX dst, vecX src1, vecX src2)
16836 %{
16837   predicate(n->as_Vector()->length() == 16);
16838   match(Set dst (AddVB src1 src2));
16839   ins_cost(INSN_COST);
16840   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
16841   ins_encode %{
16842     __ addv(as_FloatRegister($dst$$reg), __ T16B,
16843             as_FloatRegister($src1$$reg),
16844             as_FloatRegister($src2$$reg));
16845   %}
16846   ins_pipe(vdop128);
16847 %}
16848 
16849 instruct vadd4S(vecD dst, vecD src1, vecD src2)
16850 %{
16851   predicate(n->as_Vector()->length() == 2 ||
16852             n->as_Vector()->length() == 4);
16853   match(Set dst (AddVS src1 src2));
16854   ins_cost(INSN_COST);
16855   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
16856   ins_encode %{
16857     __ addv(as_FloatRegister($dst$$reg), __ T4H,
16858             as_FloatRegister($src1$$reg),
16859             as_FloatRegister($src2$$reg));
16860   %}
16861   ins_pipe(vdop64);
16862 %}
16863 
16864 instruct vadd8S(vecX dst, vecX src1, vecX src2)
16865 %{
16866   predicate(n->as_Vector()->length() == 8);
16867   match(Set dst (AddVS src1 src2));
16868   ins_cost(INSN_COST);
16869   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
16870   ins_encode %{
16871     __ addv(as_FloatRegister($dst$$reg), __ T8H,
16872             as_FloatRegister($src1$$reg),
16873             as_FloatRegister($src2$$reg));
16874   %}
16875   ins_pipe(vdop128);
16876 %}
16877 
16878 instruct vadd2I(vecD dst, vecD src1, vecD src2)
16879 %{
16880   predicate(n->as_Vector()->length() == 2);
16881   match(Set dst (AddVI src1 src2));
16882   ins_cost(INSN_COST);
16883   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
16884   ins_encode %{
16885     __ addv(as_FloatRegister($dst$$reg), __ T2S,
16886             as_FloatRegister($src1$$reg),
16887             as_FloatRegister($src2$$reg));
16888   %}
16889   ins_pipe(vdop64);
16890 %}
16891 
16892 instruct vadd4I(vecX dst, vecX src1, vecX src2)
16893 %{
16894   predicate(n->as_Vector()->length() == 4);
16895   match(Set dst (AddVI src1 src2));
16896   ins_cost(INSN_COST);
16897   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
16898   ins_encode %{
16899     __ addv(as_FloatRegister($dst$$reg), __ T4S,
16900             as_FloatRegister($src1$$reg),
16901             as_FloatRegister($src2$$reg));
16902   %}
16903   ins_pipe(vdop128);
16904 %}
16905 
16906 instruct vadd2L(vecX dst, vecX src1, vecX src2)
16907 %{
16908   predicate(n->as_Vector()->length() == 2);
16909   match(Set dst (AddVL src1 src2));
16910   ins_cost(INSN_COST);
16911   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
16912   ins_encode %{
16913     __ addv(as_FloatRegister($dst$$reg), __ T2D,
16914             as_FloatRegister($src1$$reg),
16915             as_FloatRegister($src2$$reg));
16916   %}
16917   ins_pipe(vdop128);
16918 %}
16919 
16920 instruct vadd2F(vecD dst, vecD src1, vecD src2)
16921 %{
16922   predicate(n->as_Vector()->length() == 2);
16923   match(Set dst (AddVF src1 src2));
16924   ins_cost(INSN_COST);
16925   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
16926   ins_encode %{
16927     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
16928             as_FloatRegister($src1$$reg),
16929             as_FloatRegister($src2$$reg));
16930   %}
16931   ins_pipe(vdop_fp64);
16932 %}
16933 
16934 instruct vadd4F(vecX dst, vecX src1, vecX src2)
16935 %{
16936   predicate(n->as_Vector()->length() == 4);
16937   match(Set dst (AddVF src1 src2));
16938   ins_cost(INSN_COST);
16939   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
16940   ins_encode %{
16941     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
16942             as_FloatRegister($src1$$reg),
16943             as_FloatRegister($src2$$reg));
16944   %}
16945   ins_pipe(vdop_fp128);
16946 %}
16947 
16948 instruct vadd2D(vecX dst, vecX src1, vecX src2)
16949 %{
16950   match(Set dst (AddVD src1 src2));
16951   ins_cost(INSN_COST);
16952   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
16953   ins_encode %{
16954     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
16955             as_FloatRegister($src1$$reg),
16956             as_FloatRegister($src2$$reg));
16957   %}
16958   ins_pipe(vdop_fp128);
16959 %}
16960 
16961 // --------------------------------- SUB --------------------------------------
16962 
16963 instruct vsub8B(vecD dst, vecD src1, vecD src2)
16964 %{
16965   predicate(n->as_Vector()->length() == 4 ||
16966             n->as_Vector()->length() == 8);
16967   match(Set dst (SubVB src1 src2));
16968   ins_cost(INSN_COST);
16969   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
16970   ins_encode %{
16971     __ subv(as_FloatRegister($dst$$reg), __ T8B,
16972             as_FloatRegister($src1$$reg),
16973             as_FloatRegister($src2$$reg));
16974   %}
16975   ins_pipe(vdop64);
16976 %}
16977 
16978 instruct vsub16B(vecX dst, vecX src1, vecX src2)
16979 %{
16980   predicate(n->as_Vector()->length() == 16);
16981   match(Set dst (SubVB src1 src2));
16982   ins_cost(INSN_COST);
16983   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
16984   ins_encode %{
16985     __ subv(as_FloatRegister($dst$$reg), __ T16B,
16986             as_FloatRegister($src1$$reg),
16987             as_FloatRegister($src2$$reg));
16988   %}
16989   ins_pipe(vdop128);
16990 %}
16991 
16992 instruct vsub4S(vecD dst, vecD src1, vecD src2)
16993 %{
16994   predicate(n->as_Vector()->length() == 2 ||
16995             n->as_Vector()->length() == 4);
16996   match(Set dst (SubVS src1 src2));
16997   ins_cost(INSN_COST);
16998   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
16999   ins_encode %{
17000     __ subv(as_FloatRegister($dst$$reg), __ T4H,
17001             as_FloatRegister($src1$$reg),
17002             as_FloatRegister($src2$$reg));
17003   %}
17004   ins_pipe(vdop64);
17005 %}
17006 
17007 instruct vsub8S(vecX dst, vecX src1, vecX src2)
17008 %{
17009   predicate(n->as_Vector()->length() == 8);
17010   match(Set dst (SubVS src1 src2));
17011   ins_cost(INSN_COST);
17012   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
17013   ins_encode %{
17014     __ subv(as_FloatRegister($dst$$reg), __ T8H,
17015             as_FloatRegister($src1$$reg),
17016             as_FloatRegister($src2$$reg));
17017   %}
17018   ins_pipe(vdop128);
17019 %}
17020 
17021 instruct vsub2I(vecD dst, vecD src1, vecD src2)
17022 %{
17023   predicate(n->as_Vector()->length() == 2);
17024   match(Set dst (SubVI src1 src2));
17025   ins_cost(INSN_COST);
17026   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
17027   ins_encode %{
17028     __ subv(as_FloatRegister($dst$$reg), __ T2S,
17029             as_FloatRegister($src1$$reg),
17030             as_FloatRegister($src2$$reg));
17031   %}
17032   ins_pipe(vdop64);
17033 %}
17034 
17035 instruct vsub4I(vecX dst, vecX src1, vecX src2)
17036 %{
17037   predicate(n->as_Vector()->length() == 4);
17038   match(Set dst (SubVI src1 src2));
17039   ins_cost(INSN_COST);
17040   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
17041   ins_encode %{
17042     __ subv(as_FloatRegister($dst$$reg), __ T4S,
17043             as_FloatRegister($src1$$reg),
17044             as_FloatRegister($src2$$reg));
17045   %}
17046   ins_pipe(vdop128);
17047 %}
17048 
17049 instruct vsub2L(vecX dst, vecX src1, vecX src2)
17050 %{
17051   predicate(n->as_Vector()->length() == 2);
17052   match(Set dst (SubVL src1 src2));
17053   ins_cost(INSN_COST);
17054   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
17055   ins_encode %{
17056     __ subv(as_FloatRegister($dst$$reg), __ T2D,
17057             as_FloatRegister($src1$$reg),
17058             as_FloatRegister($src2$$reg));
17059   %}
17060   ins_pipe(vdop128);
17061 %}
17062 
17063 instruct vsub2F(vecD dst, vecD src1, vecD src2)
17064 %{
17065   predicate(n->as_Vector()->length() == 2);
17066   match(Set dst (SubVF src1 src2));
17067   ins_cost(INSN_COST);
17068   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
17069   ins_encode %{
17070     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
17071             as_FloatRegister($src1$$reg),
17072             as_FloatRegister($src2$$reg));
17073   %}
17074   ins_pipe(vdop_fp64);
17075 %}
17076 
17077 instruct vsub4F(vecX dst, vecX src1, vecX src2)
17078 %{
17079   predicate(n->as_Vector()->length() == 4);
17080   match(Set dst (SubVF src1 src2));
17081   ins_cost(INSN_COST);
17082   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
17083   ins_encode %{
17084     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
17085             as_FloatRegister($src1$$reg),
17086             as_FloatRegister($src2$$reg));
17087   %}
17088   ins_pipe(vdop_fp128);
17089 %}
17090 
17091 instruct vsub2D(vecX dst, vecX src1, vecX src2)
17092 %{
17093   predicate(n->as_Vector()->length() == 2);
17094   match(Set dst (SubVD src1 src2));
17095   ins_cost(INSN_COST);
17096   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
17097   ins_encode %{
17098     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
17099             as_FloatRegister($src1$$reg),
17100             as_FloatRegister($src2$$reg));
17101   %}
17102   ins_pipe(vdop_fp128);
17103 %}
17104 
17105 // --------------------------------- MUL --------------------------------------
17106 
17107 instruct vmul4S(vecD dst, vecD src1, vecD src2)
17108 %{
17109   predicate(n->as_Vector()->length() == 2 ||
17110             n->as_Vector()->length() == 4);
17111   match(Set dst (MulVS src1 src2));
17112   ins_cost(INSN_COST);
17113   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
17114   ins_encode %{
17115     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
17116             as_FloatRegister($src1$$reg),
17117             as_FloatRegister($src2$$reg));
17118   %}
17119   ins_pipe(vmul64);
17120 %}
17121 
17122 instruct vmul8S(vecX dst, vecX src1, vecX src2)
17123 %{
17124   predicate(n->as_Vector()->length() == 8);
17125   match(Set dst (MulVS src1 src2));
17126   ins_cost(INSN_COST);
17127   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
17128   ins_encode %{
17129     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
17130             as_FloatRegister($src1$$reg),
17131             as_FloatRegister($src2$$reg));
17132   %}
17133   ins_pipe(vmul128);
17134 %}
17135 
17136 instruct vmul2I(vecD dst, vecD src1, vecD src2)
17137 %{
17138   predicate(n->as_Vector()->length() == 2);
17139   match(Set dst (MulVI src1 src2));
17140   ins_cost(INSN_COST);
17141   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
17142   ins_encode %{
17143     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
17144             as_FloatRegister($src1$$reg),
17145             as_FloatRegister($src2$$reg));
17146   %}
17147   ins_pipe(vmul64);
17148 %}
17149 
17150 instruct vmul4I(vecX dst, vecX src1, vecX src2)
17151 %{
17152   predicate(n->as_Vector()->length() == 4);
17153   match(Set dst (MulVI src1 src2));
17154   ins_cost(INSN_COST);
17155   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
17156   ins_encode %{
17157     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
17158             as_FloatRegister($src1$$reg),
17159             as_FloatRegister($src2$$reg));
17160   %}
17161   ins_pipe(vmul128);
17162 %}
17163 
17164 instruct vmul2F(vecD dst, vecD src1, vecD src2)
17165 %{
17166   predicate(n->as_Vector()->length() == 2);
17167   match(Set dst (MulVF src1 src2));
17168   ins_cost(INSN_COST);
17169   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
17170   ins_encode %{
17171     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
17172             as_FloatRegister($src1$$reg),
17173             as_FloatRegister($src2$$reg));
17174   %}
17175   ins_pipe(vmuldiv_fp64);
17176 %}
17177 
17178 instruct vmul4F(vecX dst, vecX src1, vecX src2)
17179 %{
17180   predicate(n->as_Vector()->length() == 4);
17181   match(Set dst (MulVF src1 src2));
17182   ins_cost(INSN_COST);
17183   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
17184   ins_encode %{
17185     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
17186             as_FloatRegister($src1$$reg),
17187             as_FloatRegister($src2$$reg));
17188   %}
17189   ins_pipe(vmuldiv_fp128);
17190 %}
17191 
17192 instruct vmul2D(vecX dst, vecX src1, vecX src2)
17193 %{
17194   predicate(n->as_Vector()->length() == 2);
17195   match(Set dst (MulVD src1 src2));
17196   ins_cost(INSN_COST);
17197   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
17198   ins_encode %{
17199     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
17200             as_FloatRegister($src1$$reg),
17201             as_FloatRegister($src2$$reg));
17202   %}
17203   ins_pipe(vmuldiv_fp128);
17204 %}
17205 
17206 // --------------------------------- MLA --------------------------------------
17207 
17208 instruct vmla4S(vecD dst, vecD src1, vecD src2)
17209 %{
17210   predicate(n->as_Vector()->length() == 2 ||
17211             n->as_Vector()->length() == 4);
17212   match(Set dst (AddVS dst (MulVS src1 src2)));
17213   ins_cost(INSN_COST);
17214   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
17215   ins_encode %{
17216     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
17217             as_FloatRegister($src1$$reg),
17218             as_FloatRegister($src2$$reg));
17219   %}
17220   ins_pipe(vmla64);
17221 %}
17222 
17223 instruct vmla8S(vecX dst, vecX src1, vecX src2)
17224 %{
17225   predicate(n->as_Vector()->length() == 8);
17226   match(Set dst (AddVS dst (MulVS src1 src2)));
17227   ins_cost(INSN_COST);
17228   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
17229   ins_encode %{
17230     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
17231             as_FloatRegister($src1$$reg),
17232             as_FloatRegister($src2$$reg));
17233   %}
17234   ins_pipe(vmla128);
17235 %}
17236 
17237 instruct vmla2I(vecD dst, vecD src1, vecD src2)
17238 %{
17239   predicate(n->as_Vector()->length() == 2);
17240   match(Set dst (AddVI dst (MulVI src1 src2)));
17241   ins_cost(INSN_COST);
17242   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
17243   ins_encode %{
17244     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
17245             as_FloatRegister($src1$$reg),
17246             as_FloatRegister($src2$$reg));
17247   %}
17248   ins_pipe(vmla64);
17249 %}
17250 
17251 instruct vmla4I(vecX dst, vecX src1, vecX src2)
17252 %{
17253   predicate(n->as_Vector()->length() == 4);
17254   match(Set dst (AddVI dst (MulVI src1 src2)));
17255   ins_cost(INSN_COST);
17256   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
17257   ins_encode %{
17258     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
17259             as_FloatRegister($src1$$reg),
17260             as_FloatRegister($src2$$reg));
17261   %}
17262   ins_pipe(vmla128);
17263 %}
17264 
17265 // dst + src1 * src2
17266 instruct vmla2F(vecD dst, vecD src1, vecD src2) %{
17267   predicate(UseFMA && n->as_Vector()->length() == 2);
17268   match(Set dst (FmaVF  dst (Binary src1 src2)));
17269   format %{ "fmla  $dst,$src1,$src2\t# vector (2S)" %}
17270   ins_cost(INSN_COST);
17271   ins_encode %{
17272     __ fmla(as_FloatRegister($dst$$reg), __ T2S,
17273             as_FloatRegister($src1$$reg),
17274             as_FloatRegister($src2$$reg));
17275   %}
17276   ins_pipe(vmuldiv_fp64);
17277 %}
17278 
17279 // dst + src1 * src2
17280 instruct vmla4F(vecX dst, vecX src1, vecX src2) %{
17281   predicate(UseFMA && n->as_Vector()->length() == 4);
17282   match(Set dst (FmaVF  dst (Binary src1 src2)));
17283   format %{ "fmla  $dst,$src1,$src2\t# vector (4S)" %}
17284   ins_cost(INSN_COST);
17285   ins_encode %{
17286     __ fmla(as_FloatRegister($dst$$reg), __ T4S,
17287             as_FloatRegister($src1$$reg),
17288             as_FloatRegister($src2$$reg));
17289   %}
17290   ins_pipe(vmuldiv_fp128);
17291 %}
17292 
17293 // dst + src1 * src2
17294 instruct vmla2D(vecX dst, vecX src1, vecX src2) %{
17295   predicate(UseFMA && n->as_Vector()->length() == 2);
17296   match(Set dst (FmaVD  dst (Binary src1 src2)));
17297   format %{ "fmla  $dst,$src1,$src2\t# vector (2D)" %}
17298   ins_cost(INSN_COST);
17299   ins_encode %{
17300     __ fmla(as_FloatRegister($dst$$reg), __ T2D,
17301             as_FloatRegister($src1$$reg),
17302             as_FloatRegister($src2$$reg));
17303   %}
17304   ins_pipe(vmuldiv_fp128);
17305 %}
17306 
17307 // --------------------------------- MLS --------------------------------------
17308 
17309 instruct vmls4S(vecD dst, vecD src1, vecD src2)
17310 %{
17311   predicate(n->as_Vector()->length() == 2 ||
17312             n->as_Vector()->length() == 4);
17313   match(Set dst (SubVS dst (MulVS src1 src2)));
17314   ins_cost(INSN_COST);
17315   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
17316   ins_encode %{
17317     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
17318             as_FloatRegister($src1$$reg),
17319             as_FloatRegister($src2$$reg));
17320   %}
17321   ins_pipe(vmla64);
17322 %}
17323 
17324 instruct vmls8S(vecX dst, vecX src1, vecX src2)
17325 %{
17326   predicate(n->as_Vector()->length() == 8);
17327   match(Set dst (SubVS dst (MulVS src1 src2)));
17328   ins_cost(INSN_COST);
17329   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
17330   ins_encode %{
17331     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
17332             as_FloatRegister($src1$$reg),
17333             as_FloatRegister($src2$$reg));
17334   %}
17335   ins_pipe(vmla128);
17336 %}
17337 
17338 instruct vmls2I(vecD dst, vecD src1, vecD src2)
17339 %{
17340   predicate(n->as_Vector()->length() == 2);
17341   match(Set dst (SubVI dst (MulVI src1 src2)));
17342   ins_cost(INSN_COST);
17343   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
17344   ins_encode %{
17345     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
17346             as_FloatRegister($src1$$reg),
17347             as_FloatRegister($src2$$reg));
17348   %}
17349   ins_pipe(vmla64);
17350 %}
17351 
17352 instruct vmls4I(vecX dst, vecX src1, vecX src2)
17353 %{
17354   predicate(n->as_Vector()->length() == 4);
17355   match(Set dst (SubVI dst (MulVI src1 src2)));
17356   ins_cost(INSN_COST);
17357   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
17358   ins_encode %{
17359     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
17360             as_FloatRegister($src1$$reg),
17361             as_FloatRegister($src2$$reg));
17362   %}
17363   ins_pipe(vmla128);
17364 %}
17365 
17366 // dst - src1 * src2
17367 instruct vmls2F(vecD dst, vecD src1, vecD src2) %{
17368   predicate(UseFMA && n->as_Vector()->length() == 2);
17369   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
17370   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
17371   format %{ "fmls  $dst,$src1,$src2\t# vector (2S)" %}
17372   ins_cost(INSN_COST);
17373   ins_encode %{
17374     __ fmls(as_FloatRegister($dst$$reg), __ T2S,
17375             as_FloatRegister($src1$$reg),
17376             as_FloatRegister($src2$$reg));
17377   %}
17378   ins_pipe(vmuldiv_fp64);
17379 %}
17380 
17381 // dst - src1 * src2
17382 instruct vmls4F(vecX dst, vecX src1, vecX src2) %{
17383   predicate(UseFMA && n->as_Vector()->length() == 4);
17384   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
17385   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
17386   format %{ "fmls  $dst,$src1,$src2\t# vector (4S)" %}
17387   ins_cost(INSN_COST);
17388   ins_encode %{
17389     __ fmls(as_FloatRegister($dst$$reg), __ T4S,
17390             as_FloatRegister($src1$$reg),
17391             as_FloatRegister($src2$$reg));
17392   %}
17393   ins_pipe(vmuldiv_fp128);
17394 %}
17395 
17396 // dst - src1 * src2
17397 instruct vmls2D(vecX dst, vecX src1, vecX src2) %{
17398   predicate(UseFMA && n->as_Vector()->length() == 2);
17399   match(Set dst (FmaVD  dst (Binary (NegVD src1) src2)));
17400   match(Set dst (FmaVD  dst (Binary src1 (NegVD src2))));
17401   format %{ "fmls  $dst,$src1,$src2\t# vector (2D)" %}
17402   ins_cost(INSN_COST);
17403   ins_encode %{
17404     __ fmls(as_FloatRegister($dst$$reg), __ T2D,
17405             as_FloatRegister($src1$$reg),
17406             as_FloatRegister($src2$$reg));
17407   %}
17408   ins_pipe(vmuldiv_fp128);
17409 %}
17410 
17411 // --------------------------------- DIV --------------------------------------
17412 
17413 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
17414 %{
17415   predicate(n->as_Vector()->length() == 2);
17416   match(Set dst (DivVF src1 src2));
17417   ins_cost(INSN_COST);
17418   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
17419   ins_encode %{
17420     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
17421             as_FloatRegister($src1$$reg),
17422             as_FloatRegister($src2$$reg));
17423   %}
17424   ins_pipe(vmuldiv_fp64);
17425 %}
17426 
17427 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
17428 %{
17429   predicate(n->as_Vector()->length() == 4);
17430   match(Set dst (DivVF src1 src2));
17431   ins_cost(INSN_COST);
17432   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
17433   ins_encode %{
17434     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
17435             as_FloatRegister($src1$$reg),
17436             as_FloatRegister($src2$$reg));
17437   %}
17438   ins_pipe(vmuldiv_fp128);
17439 %}
17440 
17441 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
17442 %{
17443   predicate(n->as_Vector()->length() == 2);
17444   match(Set dst (DivVD src1 src2));
17445   ins_cost(INSN_COST);
17446   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
17447   ins_encode %{
17448     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
17449             as_FloatRegister($src1$$reg),
17450             as_FloatRegister($src2$$reg));
17451   %}
17452   ins_pipe(vmuldiv_fp128);
17453 %}
17454 
17455 // --------------------------------- SQRT -------------------------------------
17456 
17457 instruct vsqrt2D(vecX dst, vecX src)
17458 %{
17459   predicate(n->as_Vector()->length() == 2);
17460   match(Set dst (SqrtVD src));
17461   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
17462   ins_encode %{
17463     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
17464              as_FloatRegister($src$$reg));
17465   %}
17466   ins_pipe(vsqrt_fp128);
17467 %}
17468 
17469 // --------------------------------- ABS --------------------------------------
17470 
17471 instruct vabs2F(vecD dst, vecD src)
17472 %{
17473   predicate(n->as_Vector()->length() == 2);
17474   match(Set dst (AbsVF src));
17475   ins_cost(INSN_COST * 3);
17476   format %{ "fabs  $dst,$src\t# vector (2S)" %}
17477   ins_encode %{
17478     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
17479             as_FloatRegister($src$$reg));
17480   %}
17481   ins_pipe(vunop_fp64);
17482 %}
17483 
17484 instruct vabs4F(vecX dst, vecX src)
17485 %{
17486   predicate(n->as_Vector()->length() == 4);
17487   match(Set dst (AbsVF src));
17488   ins_cost(INSN_COST * 3);
17489   format %{ "fabs  $dst,$src\t# vector (4S)" %}
17490   ins_encode %{
17491     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
17492             as_FloatRegister($src$$reg));
17493   %}
17494   ins_pipe(vunop_fp128);
17495 %}
17496 
17497 instruct vabs2D(vecX dst, vecX src)
17498 %{
17499   predicate(n->as_Vector()->length() == 2);
17500   match(Set dst (AbsVD src));
17501   ins_cost(INSN_COST * 3);
17502   format %{ "fabs  $dst,$src\t# vector (2D)" %}
17503   ins_encode %{
17504     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
17505             as_FloatRegister($src$$reg));
17506   %}
17507   ins_pipe(vunop_fp128);
17508 %}
17509 
17510 // --------------------------------- NEG --------------------------------------
17511 
17512 instruct vneg2F(vecD dst, vecD src)
17513 %{
17514   predicate(n->as_Vector()->length() == 2);
17515   match(Set dst (NegVF src));
17516   ins_cost(INSN_COST * 3);
17517   format %{ "fneg  $dst,$src\t# vector (2S)" %}
17518   ins_encode %{
17519     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
17520             as_FloatRegister($src$$reg));
17521   %}
17522   ins_pipe(vunop_fp64);
17523 %}
17524 
17525 instruct vneg4F(vecX dst, vecX src)
17526 %{
17527   predicate(n->as_Vector()->length() == 4);
17528   match(Set dst (NegVF src));
17529   ins_cost(INSN_COST * 3);
17530   format %{ "fneg  $dst,$src\t# vector (4S)" %}
17531   ins_encode %{
17532     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
17533             as_FloatRegister($src$$reg));
17534   %}
17535   ins_pipe(vunop_fp128);
17536 %}
17537 
17538 instruct vneg2D(vecX dst, vecX src)
17539 %{
17540   predicate(n->as_Vector()->length() == 2);
17541   match(Set dst (NegVD src));
17542   ins_cost(INSN_COST * 3);
17543   format %{ "fneg  $dst,$src\t# vector (2D)" %}
17544   ins_encode %{
17545     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
17546             as_FloatRegister($src$$reg));
17547   %}
17548   ins_pipe(vunop_fp128);
17549 %}
17550 
17551 // --------------------------------- AND --------------------------------------
17552 
17553 instruct vand8B(vecD dst, vecD src1, vecD src2)
17554 %{
17555   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17556             n->as_Vector()->length_in_bytes() == 8);
17557   match(Set dst (AndV src1 src2));
17558   ins_cost(INSN_COST);
17559   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
17560   ins_encode %{
17561     __ andr(as_FloatRegister($dst$$reg), __ T8B,
17562             as_FloatRegister($src1$$reg),
17563             as_FloatRegister($src2$$reg));
17564   %}
17565   ins_pipe(vlogical64);
17566 %}
17567 
17568 instruct vand16B(vecX dst, vecX src1, vecX src2)
17569 %{
17570   predicate(n->as_Vector()->length_in_bytes() == 16);
17571   match(Set dst (AndV src1 src2));
17572   ins_cost(INSN_COST);
17573   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
17574   ins_encode %{
17575     __ andr(as_FloatRegister($dst$$reg), __ T16B,
17576             as_FloatRegister($src1$$reg),
17577             as_FloatRegister($src2$$reg));
17578   %}
17579   ins_pipe(vlogical128);
17580 %}
17581 
17582 // --------------------------------- OR ---------------------------------------
17583 
17584 instruct vor8B(vecD dst, vecD src1, vecD src2)
17585 %{
17586   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17587             n->as_Vector()->length_in_bytes() == 8);
17588   match(Set dst (OrV src1 src2));
17589   ins_cost(INSN_COST);
17590   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
17591   ins_encode %{
17592     __ orr(as_FloatRegister($dst$$reg), __ T8B,
17593             as_FloatRegister($src1$$reg),
17594             as_FloatRegister($src2$$reg));
17595   %}
17596   ins_pipe(vlogical64);
17597 %}
17598 
17599 instruct vor16B(vecX dst, vecX src1, vecX src2)
17600 %{
17601   predicate(n->as_Vector()->length_in_bytes() == 16);
17602   match(Set dst (OrV src1 src2));
17603   ins_cost(INSN_COST);
17604   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
17605   ins_encode %{
17606     __ orr(as_FloatRegister($dst$$reg), __ T16B,
17607             as_FloatRegister($src1$$reg),
17608             as_FloatRegister($src2$$reg));
17609   %}
17610   ins_pipe(vlogical128);
17611 %}
17612 
17613 // --------------------------------- XOR --------------------------------------
17614 
17615 instruct vxor8B(vecD dst, vecD src1, vecD src2)
17616 %{
17617   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17618             n->as_Vector()->length_in_bytes() == 8);
17619   match(Set dst (XorV src1 src2));
17620   ins_cost(INSN_COST);
17621   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
17622   ins_encode %{
17623     __ eor(as_FloatRegister($dst$$reg), __ T8B,
17624             as_FloatRegister($src1$$reg),
17625             as_FloatRegister($src2$$reg));
17626   %}
17627   ins_pipe(vlogical64);
17628 %}
17629 
17630 instruct vxor16B(vecX dst, vecX src1, vecX src2)
17631 %{
17632   predicate(n->as_Vector()->length_in_bytes() == 16);
17633   match(Set dst (XorV src1 src2));
17634   ins_cost(INSN_COST);
17635   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
17636   ins_encode %{
17637     __ eor(as_FloatRegister($dst$$reg), __ T16B,
17638             as_FloatRegister($src1$$reg),
17639             as_FloatRegister($src2$$reg));
17640   %}
17641   ins_pipe(vlogical128);
17642 %}
17643 
17644 // ------------------------------ Shift ---------------------------------------
17645 
17646 instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
17647   match(Set dst (LShiftCntV cnt));
17648   format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
17649   ins_encode %{
17650     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
17651   %}
17652   ins_pipe(vdup_reg_reg128);
17653 %}
17654 
17655 // Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
17656 instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
17657   match(Set dst (RShiftCntV cnt));
17658   format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
17659   ins_encode %{
17660     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
17661     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
17662   %}
17663   ins_pipe(vdup_reg_reg128);
17664 %}
17665 
17666 instruct vsll8B(vecD dst, vecD src, vecX shift) %{
17667   predicate(n->as_Vector()->length() == 4 ||
17668             n->as_Vector()->length() == 8);
17669   match(Set dst (LShiftVB src shift));
17670   match(Set dst (RShiftVB src shift));
17671   ins_cost(INSN_COST);
17672   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
17673   ins_encode %{
17674     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
17675             as_FloatRegister($src$$reg),
17676             as_FloatRegister($shift$$reg));
17677   %}
17678   ins_pipe(vshift64);
17679 %}
17680 
17681 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
17682   predicate(n->as_Vector()->length() == 16);
17683   match(Set dst (LShiftVB src shift));
17684   match(Set dst (RShiftVB src shift));
17685   ins_cost(INSN_COST);
17686   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
17687   ins_encode %{
17688     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
17689             as_FloatRegister($src$$reg),
17690             as_FloatRegister($shift$$reg));
17691   %}
17692   ins_pipe(vshift128);
17693 %}
17694 
17695 instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
17696   predicate(n->as_Vector()->length() == 4 ||
17697             n->as_Vector()->length() == 8);
17698   match(Set dst (URShiftVB src shift));
17699   ins_cost(INSN_COST);
17700   format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
17701   ins_encode %{
17702     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
17703             as_FloatRegister($src$$reg),
17704             as_FloatRegister($shift$$reg));
17705   %}
17706   ins_pipe(vshift64);
17707 %}
17708 
17709 instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
17710   predicate(n->as_Vector()->length() == 16);
17711   match(Set dst (URShiftVB src shift));
17712   ins_cost(INSN_COST);
17713   format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
17714   ins_encode %{
17715     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
17716             as_FloatRegister($src$$reg),
17717             as_FloatRegister($shift$$reg));
17718   %}
17719   ins_pipe(vshift128);
17720 %}
17721 
17722 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
17723   predicate(n->as_Vector()->length() == 4 ||
17724             n->as_Vector()->length() == 8);
17725   match(Set dst (LShiftVB src shift));
17726   ins_cost(INSN_COST);
17727   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
17728   ins_encode %{
17729     int sh = (int)$shift$$constant;
17730     if (sh >= 8) {
17731       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17732              as_FloatRegister($src$$reg),
17733              as_FloatRegister($src$$reg));
17734     } else {
17735       __ shl(as_FloatRegister($dst$$reg), __ T8B,
17736              as_FloatRegister($src$$reg), sh);
17737     }
17738   %}
17739   ins_pipe(vshift64_imm);
17740 %}
17741 
17742 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
17743   predicate(n->as_Vector()->length() == 16);
17744   match(Set dst (LShiftVB src shift));
17745   ins_cost(INSN_COST);
17746   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
17747   ins_encode %{
17748     int sh = (int)$shift$$constant;
17749     if (sh >= 8) {
17750       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17751              as_FloatRegister($src$$reg),
17752              as_FloatRegister($src$$reg));
17753     } else {
17754       __ shl(as_FloatRegister($dst$$reg), __ T16B,
17755              as_FloatRegister($src$$reg), sh);
17756     }
17757   %}
17758   ins_pipe(vshift128_imm);
17759 %}
17760 
17761 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
17762   predicate(n->as_Vector()->length() == 4 ||
17763             n->as_Vector()->length() == 8);
17764   match(Set dst (RShiftVB src shift));
17765   ins_cost(INSN_COST);
17766   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
17767   ins_encode %{
17768     int sh = (int)$shift$$constant;
17769     if (sh >= 8) sh = 7;
17770     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
17771            as_FloatRegister($src$$reg), sh);
17772   %}
17773   ins_pipe(vshift64_imm);
17774 %}
17775 
17776 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
17777   predicate(n->as_Vector()->length() == 16);
17778   match(Set dst (RShiftVB src shift));
17779   ins_cost(INSN_COST);
17780   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
17781   ins_encode %{
17782     int sh = (int)$shift$$constant;
17783     if (sh >= 8) sh = 7;
17784     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
17785            as_FloatRegister($src$$reg), sh);
17786   %}
17787   ins_pipe(vshift128_imm);
17788 %}
17789 
17790 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
17791   predicate(n->as_Vector()->length() == 4 ||
17792             n->as_Vector()->length() == 8);
17793   match(Set dst (URShiftVB src shift));
17794   ins_cost(INSN_COST);
17795   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
17796   ins_encode %{
17797     int sh = (int)$shift$$constant;
17798     if (sh >= 8) {
17799       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17800              as_FloatRegister($src$$reg),
17801              as_FloatRegister($src$$reg));
17802     } else {
17803       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
17804              as_FloatRegister($src$$reg), sh);
17805     }
17806   %}
17807   ins_pipe(vshift64_imm);
17808 %}
17809 
17810 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
17811   predicate(n->as_Vector()->length() == 16);
17812   match(Set dst (URShiftVB src shift));
17813   ins_cost(INSN_COST);
17814   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
17815   ins_encode %{
17816     int sh = (int)$shift$$constant;
17817     if (sh >= 8) {
17818       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17819              as_FloatRegister($src$$reg),
17820              as_FloatRegister($src$$reg));
17821     } else {
17822       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
17823              as_FloatRegister($src$$reg), sh);
17824     }
17825   %}
17826   ins_pipe(vshift128_imm);
17827 %}
17828 
17829 instruct vsll4S(vecD dst, vecD src, vecX shift) %{
17830   predicate(n->as_Vector()->length() == 2 ||
17831             n->as_Vector()->length() == 4);
17832   match(Set dst (LShiftVS src shift));
17833   match(Set dst (RShiftVS src shift));
17834   ins_cost(INSN_COST);
17835   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
17836   ins_encode %{
17837     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
17838             as_FloatRegister($src$$reg),
17839             as_FloatRegister($shift$$reg));
17840   %}
17841   ins_pipe(vshift64);
17842 %}
17843 
17844 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
17845   predicate(n->as_Vector()->length() == 8);
17846   match(Set dst (LShiftVS src shift));
17847   match(Set dst (RShiftVS src shift));
17848   ins_cost(INSN_COST);
17849   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
17850   ins_encode %{
17851     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
17852             as_FloatRegister($src$$reg),
17853             as_FloatRegister($shift$$reg));
17854   %}
17855   ins_pipe(vshift128);
17856 %}
17857 
17858 instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
17859   predicate(n->as_Vector()->length() == 2 ||
17860             n->as_Vector()->length() == 4);
17861   match(Set dst (URShiftVS src shift));
17862   ins_cost(INSN_COST);
17863   format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
17864   ins_encode %{
17865     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
17866             as_FloatRegister($src$$reg),
17867             as_FloatRegister($shift$$reg));
17868   %}
17869   ins_pipe(vshift64);
17870 %}
17871 
17872 instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
17873   predicate(n->as_Vector()->length() == 8);
17874   match(Set dst (URShiftVS src shift));
17875   ins_cost(INSN_COST);
17876   format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
17877   ins_encode %{
17878     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
17879             as_FloatRegister($src$$reg),
17880             as_FloatRegister($shift$$reg));
17881   %}
17882   ins_pipe(vshift128);
17883 %}
17884 
17885 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
17886   predicate(n->as_Vector()->length() == 2 ||
17887             n->as_Vector()->length() == 4);
17888   match(Set dst (LShiftVS src shift));
17889   ins_cost(INSN_COST);
17890   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
17891   ins_encode %{
17892     int sh = (int)$shift$$constant;
17893     if (sh >= 16) {
17894       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17895              as_FloatRegister($src$$reg),
17896              as_FloatRegister($src$$reg));
17897     } else {
17898       __ shl(as_FloatRegister($dst$$reg), __ T4H,
17899              as_FloatRegister($src$$reg), sh);
17900     }
17901   %}
17902   ins_pipe(vshift64_imm);
17903 %}
17904 
17905 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
17906   predicate(n->as_Vector()->length() == 8);
17907   match(Set dst (LShiftVS src shift));
17908   ins_cost(INSN_COST);
17909   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
17910   ins_encode %{
17911     int sh = (int)$shift$$constant;
17912     if (sh >= 16) {
17913       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17914              as_FloatRegister($src$$reg),
17915              as_FloatRegister($src$$reg));
17916     } else {
17917       __ shl(as_FloatRegister($dst$$reg), __ T8H,
17918              as_FloatRegister($src$$reg), sh);
17919     }
17920   %}
17921   ins_pipe(vshift128_imm);
17922 %}
17923 
17924 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
17925   predicate(n->as_Vector()->length() == 2 ||
17926             n->as_Vector()->length() == 4);
17927   match(Set dst (RShiftVS src shift));
17928   ins_cost(INSN_COST);
17929   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
17930   ins_encode %{
17931     int sh = (int)$shift$$constant;
17932     if (sh >= 16) sh = 15;
17933     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
17934            as_FloatRegister($src$$reg), sh);
17935   %}
17936   ins_pipe(vshift64_imm);
17937 %}
17938 
17939 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
17940   predicate(n->as_Vector()->length() == 8);
17941   match(Set dst (RShiftVS src shift));
17942   ins_cost(INSN_COST);
17943   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
17944   ins_encode %{
17945     int sh = (int)$shift$$constant;
17946     if (sh >= 16) sh = 15;
17947     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
17948            as_FloatRegister($src$$reg), sh);
17949   %}
17950   ins_pipe(vshift128_imm);
17951 %}
17952 
17953 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
17954   predicate(n->as_Vector()->length() == 2 ||
17955             n->as_Vector()->length() == 4);
17956   match(Set dst (URShiftVS src shift));
17957   ins_cost(INSN_COST);
17958   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
17959   ins_encode %{
17960     int sh = (int)$shift$$constant;
17961     if (sh >= 16) {
17962       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17963              as_FloatRegister($src$$reg),
17964              as_FloatRegister($src$$reg));
17965     } else {
17966       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
17967              as_FloatRegister($src$$reg), sh);
17968     }
17969   %}
17970   ins_pipe(vshift64_imm);
17971 %}
17972 
17973 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
17974   predicate(n->as_Vector()->length() == 8);
17975   match(Set dst (URShiftVS src shift));
17976   ins_cost(INSN_COST);
17977   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
17978   ins_encode %{
17979     int sh = (int)$shift$$constant;
17980     if (sh >= 16) {
17981       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17982              as_FloatRegister($src$$reg),
17983              as_FloatRegister($src$$reg));
17984     } else {
17985       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
17986              as_FloatRegister($src$$reg), sh);
17987     }
17988   %}
17989   ins_pipe(vshift128_imm);
17990 %}
17991 
17992 instruct vsll2I(vecD dst, vecD src, vecX shift) %{
17993   predicate(n->as_Vector()->length() == 2);
17994   match(Set dst (LShiftVI src shift));
17995   match(Set dst (RShiftVI src shift));
17996   ins_cost(INSN_COST);
17997   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
17998   ins_encode %{
17999     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
18000             as_FloatRegister($src$$reg),
18001             as_FloatRegister($shift$$reg));
18002   %}
18003   ins_pipe(vshift64);
18004 %}
18005 
18006 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
18007   predicate(n->as_Vector()->length() == 4);
18008   match(Set dst (LShiftVI src shift));
18009   match(Set dst (RShiftVI src shift));
18010   ins_cost(INSN_COST);
18011   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
18012   ins_encode %{
18013     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
18014             as_FloatRegister($src$$reg),
18015             as_FloatRegister($shift$$reg));
18016   %}
18017   ins_pipe(vshift128);
18018 %}
18019 
18020 instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
18021   predicate(n->as_Vector()->length() == 2);
18022   match(Set dst (URShiftVI src shift));
18023   ins_cost(INSN_COST);
18024   format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
18025   ins_encode %{
18026     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
18027             as_FloatRegister($src$$reg),
18028             as_FloatRegister($shift$$reg));
18029   %}
18030   ins_pipe(vshift64);
18031 %}
18032 
18033 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
18034   predicate(n->as_Vector()->length() == 4);
18035   match(Set dst (URShiftVI src shift));
18036   ins_cost(INSN_COST);
18037   format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
18038   ins_encode %{
18039     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
18040             as_FloatRegister($src$$reg),
18041             as_FloatRegister($shift$$reg));
18042   %}
18043   ins_pipe(vshift128);
18044 %}
18045 
18046 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
18047   predicate(n->as_Vector()->length() == 2);
18048   match(Set dst (LShiftVI src shift));
18049   ins_cost(INSN_COST);
18050   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
18051   ins_encode %{
18052     __ shl(as_FloatRegister($dst$$reg), __ T2S,
18053            as_FloatRegister($src$$reg),
18054            (int)$shift$$constant);
18055   %}
18056   ins_pipe(vshift64_imm);
18057 %}
18058 
18059 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
18060   predicate(n->as_Vector()->length() == 4);
18061   match(Set dst (LShiftVI src shift));
18062   ins_cost(INSN_COST);
18063   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
18064   ins_encode %{
18065     __ shl(as_FloatRegister($dst$$reg), __ T4S,
18066            as_FloatRegister($src$$reg),
18067            (int)$shift$$constant);
18068   %}
18069   ins_pipe(vshift128_imm);
18070 %}
18071 
18072 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
18073   predicate(n->as_Vector()->length() == 2);
18074   match(Set dst (RShiftVI src shift));
18075   ins_cost(INSN_COST);
18076   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
18077   ins_encode %{
18078     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
18079             as_FloatRegister($src$$reg),
18080             (int)$shift$$constant);
18081   %}
18082   ins_pipe(vshift64_imm);
18083 %}
18084 
18085 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
18086   predicate(n->as_Vector()->length() == 4);
18087   match(Set dst (RShiftVI src shift));
18088   ins_cost(INSN_COST);
18089   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
18090   ins_encode %{
18091     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
18092             as_FloatRegister($src$$reg),
18093             (int)$shift$$constant);
18094   %}
18095   ins_pipe(vshift128_imm);
18096 %}
18097 
18098 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
18099   predicate(n->as_Vector()->length() == 2);
18100   match(Set dst (URShiftVI src shift));
18101   ins_cost(INSN_COST);
18102   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
18103   ins_encode %{
18104     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
18105             as_FloatRegister($src$$reg),
18106             (int)$shift$$constant);
18107   %}
18108   ins_pipe(vshift64_imm);
18109 %}
18110 
18111 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
18112   predicate(n->as_Vector()->length() == 4);
18113   match(Set dst (URShiftVI src shift));
18114   ins_cost(INSN_COST);
18115   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
18116   ins_encode %{
18117     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
18118             as_FloatRegister($src$$reg),
18119             (int)$shift$$constant);
18120   %}
18121   ins_pipe(vshift128_imm);
18122 %}
18123 
18124 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
18125   predicate(n->as_Vector()->length() == 2);
18126   match(Set dst (LShiftVL src shift));
18127   match(Set dst (RShiftVL src shift));
18128   ins_cost(INSN_COST);
18129   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
18130   ins_encode %{
18131     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
18132             as_FloatRegister($src$$reg),
18133             as_FloatRegister($shift$$reg));
18134   %}
18135   ins_pipe(vshift128);
18136 %}
18137 
18138 instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
18139   predicate(n->as_Vector()->length() == 2);
18140   match(Set dst (URShiftVL src shift));
18141   ins_cost(INSN_COST);
18142   format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
18143   ins_encode %{
18144     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
18145             as_FloatRegister($src$$reg),
18146             as_FloatRegister($shift$$reg));
18147   %}
18148   ins_pipe(vshift128);
18149 %}
18150 
18151 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
18152   predicate(n->as_Vector()->length() == 2);
18153   match(Set dst (LShiftVL src shift));
18154   ins_cost(INSN_COST);
18155   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
18156   ins_encode %{
18157     __ shl(as_FloatRegister($dst$$reg), __ T2D,
18158            as_FloatRegister($src$$reg),
18159            (int)$shift$$constant);
18160   %}
18161   ins_pipe(vshift128_imm);
18162 %}
18163 
18164 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
18165   predicate(n->as_Vector()->length() == 2);
18166   match(Set dst (RShiftVL src shift));
18167   ins_cost(INSN_COST);
18168   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
18169   ins_encode %{
18170     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
18171             as_FloatRegister($src$$reg),
18172             (int)$shift$$constant);
18173   %}
18174   ins_pipe(vshift128_imm);
18175 %}
18176 
18177 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
18178   predicate(n->as_Vector()->length() == 2);
18179   match(Set dst (URShiftVL src shift));
18180   ins_cost(INSN_COST);
18181   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
18182   ins_encode %{
18183     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
18184             as_FloatRegister($src$$reg),
18185             (int)$shift$$constant);
18186   %}
18187   ins_pipe(vshift128_imm);
18188 %}
18189 
18190 //----------PEEPHOLE RULES-----------------------------------------------------
18191 // These must follow all instruction definitions as they use the names
18192 // defined in the instructions definitions.
18193 //
18194 // peepmatch ( root_instr_name [preceding_instruction]* );
18195 //
18196 // peepconstraint %{
18197 // (instruction_number.operand_name relational_op instruction_number.operand_name
18198 //  [, ...] );
18199 // // instruction numbers are zero-based using left to right order in peepmatch
18200 //
18201 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
18202 // // provide an instruction_number.operand_name for each operand that appears
18203 // // in the replacement instruction's match rule
18204 //
18205 // ---------VM FLAGS---------------------------------------------------------
18206 //
18207 // All peephole optimizations can be turned off using -XX:-OptoPeephole
18208 //
18209 // Each peephole rule is given an identifying number starting with zero and
18210 // increasing by one in the order seen by the parser.  An individual peephole
18211 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
18212 // on the command-line.
18213 //
18214 // ---------CURRENT LIMITATIONS----------------------------------------------
18215 //
18216 // Only match adjacent instructions in same basic block
18217 // Only equality constraints
18218 // Only constraints between operands, not (0.dest_reg == RAX_enc)
18219 // Only one replacement instruction
18220 //
18221 // ---------EXAMPLE----------------------------------------------------------
18222 //
18223 // // pertinent parts of existing instructions in architecture description
18224 // instruct movI(iRegINoSp dst, iRegI src)
18225 // %{
18226 //   match(Set dst (CopyI src));
18227 // %}
18228 //
18229 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
18230 // %{
18231 //   match(Set dst (AddI dst src));
18232 //   effect(KILL cr);
18233 // %}
18234 //
18235 // // Change (inc mov) to lea
18236 // peephole %{
18237 //   // increment preceeded by register-register move
18238 //   peepmatch ( incI_iReg movI );
18239 //   // require that the destination register of the increment
18240 //   // match the destination register of the move
18241 //   peepconstraint ( 0.dst == 1.dst );
18242 //   // construct a replacement instruction that sets
18243 //   // the destination to ( move's source register + one )
18244 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
18245 // %}
18246 //
18247 
18248 // Implementation no longer uses movX instructions since
18249 // machine-independent system no longer uses CopyX nodes.
18250 //
18251 // peephole
18252 // %{
18253 //   peepmatch (incI_iReg movI);
18254 //   peepconstraint (0.dst == 1.dst);
18255 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18256 // %}
18257 
18258 // peephole
18259 // %{
18260 //   peepmatch (decI_iReg movI);
18261 //   peepconstraint (0.dst == 1.dst);
18262 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18263 // %}
18264 
18265 // peephole
18266 // %{
18267 //   peepmatch (addI_iReg_imm movI);
18268 //   peepconstraint (0.dst == 1.dst);
18269 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18270 // %}
18271 
18272 // peephole
18273 // %{
18274 //   peepmatch (incL_iReg movL);
18275 //   peepconstraint (0.dst == 1.dst);
18276 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
18277 // %}
18278 
18279 // peephole
18280 // %{
18281 //   peepmatch (decL_iReg movL);
18282 //   peepconstraint (0.dst == 1.dst);
18283 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
18284 // %}
18285 
18286 // peephole
18287 // %{
18288 //   peepmatch (addL_iReg_imm movL);
18289 //   peepconstraint (0.dst == 1.dst);
18290 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
18291 // %}
18292 
18293 // peephole
18294 // %{
18295 //   peepmatch (addP_iReg_imm movP);
18296 //   peepconstraint (0.dst == 1.dst);
18297 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
18298 // %}
18299 
18300 // // Change load of spilled value to only a spill
18301 // instruct storeI(memory mem, iRegI src)
18302 // %{
18303 //   match(Set mem (StoreI mem src));
18304 // %}
18305 //
18306 // instruct loadI(iRegINoSp dst, memory mem)
18307 // %{
18308 //   match(Set dst (LoadI mem));
18309 // %}
18310 //
18311 
18312 //----------SMARTSPILL RULES---------------------------------------------------
18313 // These must follow all instruction definitions as they use the names
18314 // defined in the instructions definitions.
18315 
18316 // Local Variables:
18317 // mode: c++
18318 // End: