1 //
    2 // Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
    3 // Copyright (c) 2014, 2020, Red Hat, Inc. All rights reserved.
    4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    5 //
    6 // This code is free software; you can redistribute it and/or modify it
    7 // under the terms of the GNU General Public License version 2 only, as
    8 // published by the Free Software Foundation.
    9 //
   10 // This code is distributed in the hope that it will be useful, but WITHOUT
   11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   13 // version 2 for more details (a copy is included in the LICENSE file that
   14 // accompanied this code).
   15 //
   16 // You should have received a copy of the GNU General Public License version
   17 // 2 along with this work; if not, write to the Free Software Foundation,
   18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   19 //
   20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   21 // or visit www.oracle.com if you need additional information or have any
   22 // questions.
   23 //
   24 //
   25 
   26 // AArch64 Architecture Description File
   27 
   28 //----------REGISTER DEFINITION BLOCK------------------------------------------
   29 // This information is used by the matcher and the register allocator to
   30 // describe individual registers and classes of registers within the target
   31 // archtecture.
   32 
   33 register %{
   34 //----------Architecture Description Register Definitions----------------------
   35 // General Registers
   36 // "reg_def"  name ( register save type, C convention save type,
   37 //                   ideal register type, encoding );
   38 // Register Save Types:
   39 //
   40 // NS  = No-Save:       The register allocator assumes that these registers
   41 //                      can be used without saving upon entry to the method, &
   42 //                      that they do not need to be saved at call sites.
   43 //
   44 // SOC = Save-On-Call:  The register allocator assumes that these registers
   45 //                      can be used without saving upon entry to the method,
   46 //                      but that they must be saved at call sites.
   47 //
   48 // SOE = Save-On-Entry: The register allocator assumes that these registers
   49 //                      must be saved before using them upon entry to the
   50 //                      method, but they do not need to be saved at call
   51 //                      sites.
   52 //
   53 // AS  = Always-Save:   The register allocator assumes that these registers
   54 //                      must be saved before using them upon entry to the
   55 //                      method, & that they must be saved at call sites.
   56 //
   57 // Ideal Register Type is used to determine how to save & restore a
   58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
   59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
   60 //
   61 // The encoding number is the actual bit-pattern placed into the opcodes.
   62 
   63 // We must define the 64 bit int registers in two 32 bit halves, the
   64 // real lower register and a virtual upper half register. upper halves
   65 // are used by the register allocator but are not actually supplied as
   66 // operands to memory ops.
   67 //
   68 // follow the C1 compiler in making registers
   69 //
   70 //   r0-r7,r10-r26 volatile (caller save)
   71 //   r27-r32 system (no save, no allocate)
   72 //   r8-r9 non-allocatable (so we can use them as scratch regs)
   73 //
   74 // as regards Java usage. we don't use any callee save registers
   75 // because this makes it difficult to de-optimise a frame (see comment
   76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
   77 //
   78 
   79 // General Registers
   80 
   81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
   82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
   83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
   84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
   85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
   86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
   87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
   88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
   89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
   90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
   91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
   92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
   93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
   94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
   95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
   96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
   97 reg_def R8      ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()         );
   98 reg_def R8_H    ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()->next() );
   99 reg_def R9      ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()         );
  100 reg_def R9_H    ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()->next() );
  101 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  102 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  103 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
  104 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
  105 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
  106 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
  107 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
  108 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
  109 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
  110 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
  111 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
  112 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
  113 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
  114 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
  115 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
  116 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
  117 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
  118 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
  119 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
  120 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
  121 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
  122 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
  123 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
  124 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
  125 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
  126 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
  127 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
  128 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
  129 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
  130 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
  131 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
  132 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
  133 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
  134 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
  135 reg_def R27     ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
  136 reg_def R27_H   ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()->next());
  137 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
  138 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
  139 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
  140 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
  141 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
  142 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
  143 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
  144 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
  145 
  146 // ----------------------------
  147 // Float/Double/Vector Registers
  148 // ----------------------------
  149 
  150 // Double Registers
  151 
  152 // The rules of ADL require that double registers be defined in pairs.
  153 // Each pair must be two 32-bit values, but not necessarily a pair of
  154 // single float registers. In each pair, ADLC-assigned register numbers
  155 // must be adjacent, with the lower number even. Finally, when the
  156 // CPU stores such a register pair to memory, the word associated with
  157 // the lower ADLC-assigned number must be stored to the lower address.
  158 
  159 // AArch64 has 32 floating-point registers. Each can store a vector of
  160 // single or double precision floating-point values up to 8 * 32
  161 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
  162 // use the first float or double element of the vector.
  163 
  164 // for Java use float registers v0-v15 are always save on call whereas
  165 // the platform ABI treats v8-v15 as callee save). float registers
  166 // v16-v31 are SOC as per the platform spec
  167 
  168 // For SVE vector registers, we simply extend vector register size to 8
  169 // slots. A vector register with lower 4 slots, denotes a 128-bit vector
  170 // NEON vector register. While a vector register with whole 8 slots,
  171 // indicating an SVE vector register with vector size >= 128 bits
  172 // (128 ~ 2048 bits, multiple of 128 bits). A 128-bit SVE vector
  173 // register also has 8 slots, but the the actual size is 128 bits, the
  174 // same as a NEON vector register.
  175 
  176   reg_def V0   ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()          );
  177   reg_def V0_H ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next()  );
  178   reg_def V0_J ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(2) );
  179   reg_def V0_K ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(3) );
  180   reg_def V0_L ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(4) );
  181   reg_def V0_M ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(5) );
  182   reg_def V0_N ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(6) );
  183   reg_def V0_O ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(7) );
  184 
  185   reg_def V1   ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()          );
  186   reg_def V1_H ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next()  );
  187   reg_def V1_J ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(2) );
  188   reg_def V1_K ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(3) );
  189   reg_def V1_L ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(4) );
  190   reg_def V1_M ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(5) );
  191   reg_def V1_N ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(6) );
  192   reg_def V1_O ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(7) );
  193 
  194   reg_def V2   ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()          );
  195   reg_def V2_H ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next()  );
  196   reg_def V2_J ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(2) );
  197   reg_def V2_K ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(3) );
  198   reg_def V2_L ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(4) );
  199   reg_def V2_M ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(5) );
  200   reg_def V2_N ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(6) );
  201   reg_def V2_O ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(7) );
  202 
  203   reg_def V3   ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()          );
  204   reg_def V3_H ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next()  );
  205   reg_def V3_J ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(2) );
  206   reg_def V3_K ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(3) );
  207   reg_def V3_L ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(4) );
  208   reg_def V3_M ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(5) );
  209   reg_def V3_N ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(6) );
  210   reg_def V3_O ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(7) );
  211 
  212   reg_def V4   ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()          );
  213   reg_def V4_H ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next()  );
  214   reg_def V4_J ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(2) );
  215   reg_def V4_K ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(3) );
  216   reg_def V4_L ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(4) );
  217   reg_def V4_M ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(5) );
  218   reg_def V4_N ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(6) );
  219   reg_def V4_O ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(7) );
  220 
  221   reg_def V5   ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()          );
  222   reg_def V5_H ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next()  );
  223   reg_def V5_J ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(2) );
  224   reg_def V5_K ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(3) );
  225   reg_def V5_L ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(4) );
  226   reg_def V5_M ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(5) );
  227   reg_def V5_N ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(6) );
  228   reg_def V5_O ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(7) );
  229 
  230   reg_def V6   ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()          );
  231   reg_def V6_H ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next()  );
  232   reg_def V6_J ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(2) );
  233   reg_def V6_K ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(3) );
  234   reg_def V6_L ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(4) );
  235   reg_def V6_M ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(5) );
  236   reg_def V6_N ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(6) );
  237   reg_def V6_O ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(7) );
  238 
  239   reg_def V7   ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()          );
  240   reg_def V7_H ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next()  );
  241   reg_def V7_J ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(2) );
  242   reg_def V7_K ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(3) );
  243   reg_def V7_L ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(4) );
  244   reg_def V7_M ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(5) );
  245   reg_def V7_N ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(6) );
  246   reg_def V7_O ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(7) );
  247 
  248   reg_def V8   ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()          );
  249   reg_def V8_H ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next()  );
  250   reg_def V8_J ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(2) );
  251   reg_def V8_K ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(3) );
  252   reg_def V8_L ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(4) );
  253   reg_def V8_M ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(5) );
  254   reg_def V8_N ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(6) );
  255   reg_def V8_O ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(7) );
  256 
  257   reg_def V9   ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()          );
  258   reg_def V9_H ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next()  );
  259   reg_def V9_J ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(2) );
  260   reg_def V9_K ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(3) );
  261   reg_def V9_L ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(4) );
  262   reg_def V9_M ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(5) );
  263   reg_def V9_N ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(6) );
  264   reg_def V9_O ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(7) );
  265 
  266   reg_def V10   ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()          );
  267   reg_def V10_H ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next()  );
  268   reg_def V10_J ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2) );
  269   reg_def V10_K ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3) );
  270   reg_def V10_L ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(4) );
  271   reg_def V10_M ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(5) );
  272   reg_def V10_N ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(6) );
  273   reg_def V10_O ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(7) );
  274 
  275   reg_def V11   ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()          );
  276   reg_def V11_H ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next()  );
  277   reg_def V11_J ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2) );
  278   reg_def V11_K ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3) );
  279   reg_def V11_L ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(4) );
  280   reg_def V11_M ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(5) );
  281   reg_def V11_N ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(6) );
  282   reg_def V11_O ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(7) );
  283 
  284   reg_def V12   ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()          );
  285   reg_def V12_H ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next()  );
  286   reg_def V12_J ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2) );
  287   reg_def V12_K ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3) );
  288   reg_def V12_L ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(4) );
  289   reg_def V12_M ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(5) );
  290   reg_def V12_N ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(6) );
  291   reg_def V12_O ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(7) );
  292 
  293   reg_def V13   ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()          );
  294   reg_def V13_H ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next()  );
  295   reg_def V13_J ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2) );
  296   reg_def V13_K ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3) );
  297   reg_def V13_L ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(4) );
  298   reg_def V13_M ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(5) );
  299   reg_def V13_N ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(6) );
  300   reg_def V13_O ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(7) );
  301 
  302   reg_def V14   ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()          );
  303   reg_def V14_H ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next()  );
  304   reg_def V14_J ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2) );
  305   reg_def V14_K ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3) );
  306   reg_def V14_L ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(4) );
  307   reg_def V14_M ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(5) );
  308   reg_def V14_N ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(6) );
  309   reg_def V14_O ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(7) );
  310 
  311   reg_def V15   ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()          );
  312   reg_def V15_H ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next()  );
  313   reg_def V15_J ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2) );
  314   reg_def V15_K ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3) );
  315   reg_def V15_L ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(4) );
  316   reg_def V15_M ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(5) );
  317   reg_def V15_N ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(6) );
  318   reg_def V15_O ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(7) );
  319 
  320   reg_def V16   ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()          );
  321   reg_def V16_H ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next()  );
  322   reg_def V16_J ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2) );
  323   reg_def V16_K ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3) );
  324   reg_def V16_L ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(4) );
  325   reg_def V16_M ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(5) );
  326   reg_def V16_N ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(6) );
  327   reg_def V16_O ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(7) );
  328 
  329   reg_def V17   ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()          );
  330   reg_def V17_H ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next()  );
  331   reg_def V17_J ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2) );
  332   reg_def V17_K ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3) );
  333   reg_def V17_L ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(4) );
  334   reg_def V17_M ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(5) );
  335   reg_def V17_N ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(6) );
  336   reg_def V17_O ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(7) );
  337 
  338   reg_def V18   ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()          );
  339   reg_def V18_H ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next()  );
  340   reg_def V18_J ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2) );
  341   reg_def V18_K ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3) );
  342   reg_def V18_L ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(4) );
  343   reg_def V18_M ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(5) );
  344   reg_def V18_N ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(6) );
  345   reg_def V18_O ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(7) );
  346 
  347   reg_def V19   ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()          );
  348   reg_def V19_H ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next()  );
  349   reg_def V19_J ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2) );
  350   reg_def V19_K ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3) );
  351   reg_def V19_L ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(4) );
  352   reg_def V19_M ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(5) );
  353   reg_def V19_N ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(6) );
  354   reg_def V19_O ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(7) );
  355 
  356   reg_def V20   ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()          );
  357   reg_def V20_H ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next()  );
  358   reg_def V20_J ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2) );
  359   reg_def V20_K ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3) );
  360   reg_def V20_L ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(4) );
  361   reg_def V20_M ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(5) );
  362   reg_def V20_N ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(6) );
  363   reg_def V20_O ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(7) );
  364 
  365   reg_def V21   ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()          );
  366   reg_def V21_H ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next()  );
  367   reg_def V21_J ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2) );
  368   reg_def V21_K ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3) );
  369   reg_def V21_L ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(4) );
  370   reg_def V21_M ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(5) );
  371   reg_def V21_N ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(6) );
  372   reg_def V21_O ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(7) );
  373 
  374   reg_def V22   ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()          );
  375   reg_def V22_H ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next()  );
  376   reg_def V22_J ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2) );
  377   reg_def V22_K ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3) );
  378   reg_def V22_L ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(4) );
  379   reg_def V22_M ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(5) );
  380   reg_def V22_N ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(6) );
  381   reg_def V22_O ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(7) );
  382 
  383   reg_def V23   ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()          );
  384   reg_def V23_H ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next()  );
  385   reg_def V23_J ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2) );
  386   reg_def V23_K ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3) );
  387   reg_def V23_L ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(4) );
  388   reg_def V23_M ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(5) );
  389   reg_def V23_N ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(6) );
  390   reg_def V23_O ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(7) );
  391 
  392   reg_def V24   ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()          );
  393   reg_def V24_H ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next()  );
  394   reg_def V24_J ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2) );
  395   reg_def V24_K ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3) );
  396   reg_def V24_L ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(4) );
  397   reg_def V24_M ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(5) );
  398   reg_def V24_N ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(6) );
  399   reg_def V24_O ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(7) );
  400 
  401   reg_def V25   ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()          );
  402   reg_def V25_H ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next()  );
  403   reg_def V25_J ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2) );
  404   reg_def V25_K ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3) );
  405   reg_def V25_L ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(4) );
  406   reg_def V25_M ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(5) );
  407   reg_def V25_N ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(6) );
  408   reg_def V25_O ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(7) );
  409 
  410   reg_def V26   ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()          );
  411   reg_def V26_H ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next()  );
  412   reg_def V26_J ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2) );
  413   reg_def V26_K ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3) );
  414   reg_def V26_L ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(4) );
  415   reg_def V26_M ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(5) );
  416   reg_def V26_N ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(6) );
  417   reg_def V26_O ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(7) );
  418 
  419   reg_def V27   ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()          );
  420   reg_def V27_H ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next()  );
  421   reg_def V27_J ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2) );
  422   reg_def V27_K ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3) );
  423   reg_def V27_L ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(4) );
  424   reg_def V27_M ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(5) );
  425   reg_def V27_N ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(6) );
  426   reg_def V27_O ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(7) );
  427 
  428   reg_def V28   ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()          );
  429   reg_def V28_H ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next()  );
  430   reg_def V28_J ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2) );
  431   reg_def V28_K ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3) );
  432   reg_def V28_L ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(4) );
  433   reg_def V28_M ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(5) );
  434   reg_def V28_N ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(6) );
  435   reg_def V28_O ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(7) );
  436 
  437   reg_def V29   ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()          );
  438   reg_def V29_H ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next()  );
  439   reg_def V29_J ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2) );
  440   reg_def V29_K ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3) );
  441   reg_def V29_L ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(4) );
  442   reg_def V29_M ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(5) );
  443   reg_def V29_N ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(6) );
  444   reg_def V29_O ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(7) );
  445 
  446   reg_def V30   ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()          );
  447   reg_def V30_H ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next()  );
  448   reg_def V30_J ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2) );
  449   reg_def V30_K ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3) );
  450   reg_def V30_L ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(4) );
  451   reg_def V30_M ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(5) );
  452   reg_def V30_N ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(6) );
  453   reg_def V30_O ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(7) );
  454 
  455   reg_def V31   ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()          );
  456   reg_def V31_H ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next()  );
  457   reg_def V31_J ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2) );
  458   reg_def V31_K ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3) );
  459   reg_def V31_L ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(4) );
  460   reg_def V31_M ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(5) );
  461   reg_def V31_N ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(6) );
  462   reg_def V31_O ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(7) );
  463 
  464 
  465 // ----------------------------
  466 // SVE Predicate Registers
  467 // ----------------------------
  468   reg_def P0 (SOC, SOC, Op_RegVMask, 0, p0->as_VMReg());
  469   reg_def P1 (SOC, SOC, Op_RegVMask, 1, p1->as_VMReg());
  470   reg_def P2 (SOC, SOC, Op_RegVMask, 2, p2->as_VMReg());
  471   reg_def P3 (SOC, SOC, Op_RegVMask, 3, p3->as_VMReg());
  472   reg_def P4 (SOC, SOC, Op_RegVMask, 4, p4->as_VMReg());
  473   reg_def P5 (SOC, SOC, Op_RegVMask, 5, p5->as_VMReg());
  474   reg_def P6 (SOC, SOC, Op_RegVMask, 6, p6->as_VMReg());
  475   reg_def P7 (SOC, SOC, Op_RegVMask, 7, p7->as_VMReg());
  476   reg_def P8 (SOC, SOC, Op_RegVMask, 8, p8->as_VMReg());
  477   reg_def P9 (SOC, SOC, Op_RegVMask, 9, p9->as_VMReg());
  478   reg_def P10 (SOC, SOC, Op_RegVMask, 10, p10->as_VMReg());
  479   reg_def P11 (SOC, SOC, Op_RegVMask, 11, p11->as_VMReg());
  480   reg_def P12 (SOC, SOC, Op_RegVMask, 12, p12->as_VMReg());
  481   reg_def P13 (SOC, SOC, Op_RegVMask, 13, p13->as_VMReg());
  482   reg_def P14 (SOC, SOC, Op_RegVMask, 14, p14->as_VMReg());
  483   reg_def P15 (SOC, SOC, Op_RegVMask, 15, p15->as_VMReg());
  484 
  485 // ----------------------------
  486 // Special Registers
  487 // ----------------------------
  488 
  489 // the AArch64 CSPR status flag register is not directly acessible as
  490 // instruction operand. the FPSR status flag register is a system
  491 // register which can be written/read using MSR/MRS but again does not
  492 // appear as an operand (a code identifying the FSPR occurs as an
  493 // immediate value in the instruction).
  494 
  495 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
  496 
  497 // Specify priority of register selection within phases of register
  498 // allocation.  Highest priority is first.  A useful heuristic is to
  499 // give registers a low priority when they are required by machine
  500 // instructions, like EAX and EDX on I486, and choose no-save registers
  501 // before save-on-call, & save-on-call before save-on-entry.  Registers
  502 // which participate in fixed calling sequences should come last.
  503 // Registers which are used as pairs must fall on an even boundary.
  504 
  505 alloc_class chunk0(
  506     // volatiles
  507     R10, R10_H,
  508     R11, R11_H,
  509     R12, R12_H,
  510     R13, R13_H,
  511     R14, R14_H,
  512     R15, R15_H,
  513     R16, R16_H,
  514     R17, R17_H,
  515     R18, R18_H,
  516 
  517     // arg registers
  518     R0, R0_H,
  519     R1, R1_H,
  520     R2, R2_H,
  521     R3, R3_H,
  522     R4, R4_H,
  523     R5, R5_H,
  524     R6, R6_H,
  525     R7, R7_H,
  526 
  527     // non-volatiles
  528     R19, R19_H,
  529     R20, R20_H,
  530     R21, R21_H,
  531     R22, R22_H,
  532     R23, R23_H,
  533     R24, R24_H,
  534     R25, R25_H,
  535     R26, R26_H,
  536 
  537     // non-allocatable registers
  538 
  539     R27, R27_H, // heapbase
  540     R28, R28_H, // thread
  541     R29, R29_H, // fp
  542     R30, R30_H, // lr
  543     R31, R31_H, // sp
  544     R8, R8_H,   // rscratch1
  545     R9, R9_H,   // rscratch2
  546 );
  547 
  548 alloc_class chunk1(
  549 
  550     // no save
  551     V16, V16_H, V16_J, V16_K, V16_L, V16_M, V16_N, V16_O,
  552     V17, V17_H, V17_J, V17_K, V17_L, V17_M, V17_N, V17_O,
  553     V18, V18_H, V18_J, V18_K, V18_L, V18_M, V18_N, V18_O,
  554     V19, V19_H, V19_J, V19_K, V19_L, V19_M, V19_N, V19_O,
  555     V20, V20_H, V20_J, V20_K, V20_L, V20_M, V20_N, V20_O,
  556     V21, V21_H, V21_J, V21_K, V21_L, V21_M, V21_N, V21_O,
  557     V22, V22_H, V22_J, V22_K, V22_L, V22_M, V22_N, V22_O,
  558     V23, V23_H, V23_J, V23_K, V23_L, V23_M, V23_N, V23_O,
  559     V24, V24_H, V24_J, V24_K, V24_L, V24_M, V24_N, V24_O,
  560     V25, V25_H, V25_J, V25_K, V25_L, V25_M, V25_N, V25_O,
  561     V26, V26_H, V26_J, V26_K, V26_L, V26_M, V26_N, V26_O,
  562     V27, V27_H, V27_J, V27_K, V27_L, V27_M, V27_N, V27_O,
  563     V28, V28_H, V28_J, V28_K, V28_L, V28_M, V28_N, V28_O,
  564     V29, V29_H, V29_J, V29_K, V29_L, V29_M, V29_N, V29_O,
  565     V30, V30_H, V30_J, V30_K, V30_L, V30_M, V30_N, V30_O,
  566     V31, V31_H, V31_J, V31_K, V31_L, V31_M, V31_N, V31_O,
  567 
  568     // arg registers
  569     V0, V0_H, V0_J, V0_K, V0_L, V0_M, V0_N, V0_O,
  570     V1, V1_H, V1_J, V1_K, V1_L, V1_M, V1_N, V1_O,
  571     V2, V2_H, V2_J, V2_K, V2_L, V2_M, V2_N, V2_O,
  572     V3, V3_H, V3_J, V3_K, V3_L, V3_M, V3_N, V3_O,
  573     V4, V4_H, V4_J, V4_K, V4_L, V4_M, V4_N, V4_O,
  574     V5, V5_H, V5_J, V5_K, V5_L, V5_M, V5_N, V5_O,
  575     V6, V6_H, V6_J, V6_K, V6_L, V6_M, V6_N, V6_O,
  576     V7, V7_H, V7_J, V7_K, V7_L, V7_M, V7_N, V7_O,
  577 
  578     // non-volatiles
  579     V8, V8_H, V8_J, V8_K, V8_L, V8_M, V8_N, V8_O,
  580     V9, V9_H, V9_J, V9_K, V9_L, V9_M, V9_N, V9_O,
  581     V10, V10_H, V10_J, V10_K, V10_L, V10_M, V10_N, V10_O,
  582     V11, V11_H, V11_J, V11_K, V11_L, V11_M, V11_N, V11_O,
  583     V12, V12_H, V12_J, V12_K, V12_L, V12_M, V12_N, V12_O,
  584     V13, V13_H, V13_J, V13_K, V13_L, V13_M, V13_N, V13_O,
  585     V14, V14_H, V14_J, V14_K, V14_L, V14_M, V14_N, V14_O,
  586     V15, V15_H, V15_J, V15_K, V15_L, V15_M, V15_N, V15_O,
  587 );
  588 
  589 alloc_class chunk2 (
  590     P0,
  591     P1,
  592     P2,
  593     P3,
  594     P4,
  595     P5,
  596     P6,
  597     P7,
  598 
  599     P8,
  600     P9,
  601     P10,
  602     P11,
  603     P12,
  604     P13,
  605     P14,
  606     P15,
  607 );
  608 
  609 alloc_class chunk3(RFLAGS);
  610 
  611 //----------Architecture Description Register Classes--------------------------
  612 // Several register classes are automatically defined based upon information in
  613 // this architecture description.
  614 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
  615 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
  616 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
  617 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
  618 //
  619 
  620 // Class for all 32 bit general purpose registers
  621 reg_class all_reg32(
  622     R0,
  623     R1,
  624     R2,
  625     R3,
  626     R4,
  627     R5,
  628     R6,
  629     R7,
  630     R10,
  631     R11,
  632     R12,
  633     R13,
  634     R14,
  635     R15,
  636     R16,
  637     R17,
  638     R18,
  639     R19,
  640     R20,
  641     R21,
  642     R22,
  643     R23,
  644     R24,
  645     R25,
  646     R26,
  647     R27,
  648     R28,
  649     R29,
  650     R30,
  651     R31
  652 );
  653 
  654 
  655 // Class for all 32 bit integer registers (excluding SP which
  656 // will never be used as an integer register)
  657 reg_class any_reg32 %{
  658   return _ANY_REG32_mask;
  659 %}
  660 
  661 // Singleton class for R0 int register
  662 reg_class int_r0_reg(R0);
  663 
  664 // Singleton class for R2 int register
  665 reg_class int_r2_reg(R2);
  666 
  667 // Singleton class for R3 int register
  668 reg_class int_r3_reg(R3);
  669 
  670 // Singleton class for R4 int register
  671 reg_class int_r4_reg(R4);
  672 
  673 // Singleton class for R31 int register
  674 reg_class int_r31_reg(R31);
  675 
  676 // Class for all 64 bit general purpose registers
  677 reg_class all_reg(
  678     R0, R0_H,
  679     R1, R1_H,
  680     R2, R2_H,
  681     R3, R3_H,
  682     R4, R4_H,
  683     R5, R5_H,
  684     R6, R6_H,
  685     R7, R7_H,
  686     R10, R10_H,
  687     R11, R11_H,
  688     R12, R12_H,
  689     R13, R13_H,
  690     R14, R14_H,
  691     R15, R15_H,
  692     R16, R16_H,
  693     R17, R17_H,
  694     R18, R18_H,
  695     R19, R19_H,
  696     R20, R20_H,
  697     R21, R21_H,
  698     R22, R22_H,
  699     R23, R23_H,
  700     R24, R24_H,
  701     R25, R25_H,
  702     R26, R26_H,
  703     R27, R27_H,
  704     R28, R28_H,
  705     R29, R29_H,
  706     R30, R30_H,
  707     R31, R31_H
  708 );
  709 
  710 // Class for all long integer registers (including SP)
  711 reg_class any_reg %{
  712   return _ANY_REG_mask;
  713 %}
  714 
  715 // Class for non-allocatable 32 bit registers
  716 reg_class non_allocatable_reg32(
  717     R28,                        // thread
  718     R30,                        // lr
  719     R31                         // sp
  720 );
  721 
  722 // Class for non-allocatable 64 bit registers
  723 reg_class non_allocatable_reg(
  724     R28, R28_H,                 // thread
  725     R30, R30_H,                 // lr
  726     R31, R31_H                  // sp
  727 );
  728 
  729 // Class for all non-special integer registers
  730 reg_class no_special_reg32 %{
  731   return _NO_SPECIAL_REG32_mask;
  732 %}
  733 
  734 // Class for all non-special long integer registers
  735 reg_class no_special_reg %{
  736   return _NO_SPECIAL_REG_mask;
  737 %}
  738 
  739 // Class for 64 bit register r0
  740 reg_class r0_reg(
  741     R0, R0_H
  742 );
  743 
  744 // Class for 64 bit register r1
  745 reg_class r1_reg(
  746     R1, R1_H
  747 );
  748 
  749 // Class for 64 bit register r2
  750 reg_class r2_reg(
  751     R2, R2_H
  752 );
  753 
  754 // Class for 64 bit register r3
  755 reg_class r3_reg(
  756     R3, R3_H
  757 );
  758 
  759 // Class for 64 bit register r4
  760 reg_class r4_reg(
  761     R4, R4_H
  762 );
  763 
  764 // Class for 64 bit register r5
  765 reg_class r5_reg(
  766     R5, R5_H
  767 );
  768 
  769 // Class for 64 bit register r10
  770 reg_class r10_reg(
  771     R10, R10_H
  772 );
  773 
  774 // Class for 64 bit register r11
  775 reg_class r11_reg(
  776     R11, R11_H
  777 );
  778 
  779 // Class for method register
  780 reg_class method_reg(
  781     R12, R12_H
  782 );
  783 
  784 // Class for heapbase register
  785 reg_class heapbase_reg(
  786     R27, R27_H
  787 );
  788 
  789 // Class for thread register
  790 reg_class thread_reg(
  791     R28, R28_H
  792 );
  793 
  794 // Class for frame pointer register
  795 reg_class fp_reg(
  796     R29, R29_H
  797 );
  798 
  799 // Class for link register
  800 reg_class lr_reg(
  801     R30, R30_H
  802 );
  803 
  804 // Class for long sp register
  805 reg_class sp_reg(
  806   R31, R31_H
  807 );
  808 
  809 // Class for all pointer registers
  810 reg_class ptr_reg %{
  811   return _PTR_REG_mask;
  812 %}
  813 
  814 // Class for all non_special pointer registers
  815 reg_class no_special_ptr_reg %{
  816   return _NO_SPECIAL_PTR_REG_mask;
  817 %}
  818 
  819 // Class for all float registers
  820 reg_class float_reg(
  821     V0,
  822     V1,
  823     V2,
  824     V3,
  825     V4,
  826     V5,
  827     V6,
  828     V7,
  829     V8,
  830     V9,
  831     V10,
  832     V11,
  833     V12,
  834     V13,
  835     V14,
  836     V15,
  837     V16,
  838     V17,
  839     V18,
  840     V19,
  841     V20,
  842     V21,
  843     V22,
  844     V23,
  845     V24,
  846     V25,
  847     V26,
  848     V27,
  849     V28,
  850     V29,
  851     V30,
  852     V31
  853 );
  854 
  855 // Double precision float registers have virtual `high halves' that
  856 // are needed by the allocator.
  857 // Class for all double registers
  858 reg_class double_reg(
  859     V0, V0_H,
  860     V1, V1_H,
  861     V2, V2_H,
  862     V3, V3_H,
  863     V4, V4_H,
  864     V5, V5_H,
  865     V6, V6_H,
  866     V7, V7_H,
  867     V8, V8_H,
  868     V9, V9_H,
  869     V10, V10_H,
  870     V11, V11_H,
  871     V12, V12_H,
  872     V13, V13_H,
  873     V14, V14_H,
  874     V15, V15_H,
  875     V16, V16_H,
  876     V17, V17_H,
  877     V18, V18_H,
  878     V19, V19_H,
  879     V20, V20_H,
  880     V21, V21_H,
  881     V22, V22_H,
  882     V23, V23_H,
  883     V24, V24_H,
  884     V25, V25_H,
  885     V26, V26_H,
  886     V27, V27_H,
  887     V28, V28_H,
  888     V29, V29_H,
  889     V30, V30_H,
  890     V31, V31_H
  891 );
  892 
  893 // Class for all SVE vector registers.
  894 reg_class vectora_reg (
  895     V0, V0_H, V0_J, V0_K, V0_L, V0_M, V0_N, V0_O,
  896     V1, V1_H, V1_J, V1_K, V1_L, V1_M, V1_N, V1_O,
  897     V2, V2_H, V2_J, V2_K, V2_L, V2_M, V2_N, V2_O,
  898     V3, V3_H, V3_J, V3_K, V3_L, V3_M, V3_N, V3_O,
  899     V4, V4_H, V4_J, V4_K, V4_L, V4_M, V4_N, V4_O,
  900     V5, V5_H, V5_J, V5_K, V5_L, V5_M, V5_N, V5_O,
  901     V6, V6_H, V6_J, V6_K, V6_L, V6_M, V6_N, V6_O,
  902     V7, V7_H, V7_J, V7_K, V7_L, V7_M, V7_N, V7_O,
  903     V8, V8_H, V8_J, V8_K, V8_L, V8_M, V8_N, V8_O,
  904     V9, V9_H, V9_J, V9_K, V9_L, V9_M, V9_N, V9_O,
  905     V10, V10_H, V10_J, V10_K, V10_L, V10_M, V10_N, V10_O,
  906     V11, V11_H, V11_J, V11_K, V11_L, V11_M, V11_N, V11_O,
  907     V12, V12_H, V12_J, V12_K, V12_L, V12_M, V12_N, V12_O,
  908     V13, V13_H, V13_J, V13_K, V13_L, V13_M, V13_N, V13_O,
  909     V14, V14_H, V14_J, V14_K, V14_L, V14_M, V14_N, V14_O,
  910     V15, V15_H, V15_J, V15_K, V15_L, V15_M, V15_N, V15_O,
  911     V16, V16_H, V16_J, V16_K, V16_L, V16_M, V16_N, V16_O,
  912     V17, V17_H, V17_J, V17_K, V17_L, V17_M, V17_N, V17_O,
  913     V18, V18_H, V18_J, V18_K, V18_L, V18_M, V18_N, V18_O,
  914     V19, V19_H, V19_J, V19_K, V19_L, V19_M, V19_N, V19_O,
  915     V20, V20_H, V20_J, V20_K, V20_L, V20_M, V20_N, V20_O,
  916     V21, V21_H, V21_J, V21_K, V21_L, V21_M, V21_N, V21_O,
  917     V22, V22_H, V22_J, V22_K, V22_L, V22_M, V22_N, V22_O,
  918     V23, V23_H, V23_J, V23_K, V23_L, V23_M, V23_N, V23_O,
  919     V24, V24_H, V24_J, V24_K, V24_L, V24_M, V24_N, V24_O,
  920     V25, V25_H, V25_J, V25_K, V25_L, V25_M, V25_N, V25_O,
  921     V26, V26_H, V26_J, V26_K, V26_L, V26_M, V26_N, V26_O,
  922     V27, V27_H, V27_J, V27_K, V27_L, V27_M, V27_N, V27_O,
  923     V28, V28_H, V28_J, V28_K, V28_L, V28_M, V28_N, V28_O,
  924     V29, V29_H, V29_J, V29_K, V29_L, V29_M, V29_N, V29_O,
  925     V30, V30_H, V30_J, V30_K, V30_L, V30_M, V30_N, V30_O,
  926     V31, V31_H, V31_J, V31_K, V31_L, V31_M, V31_N, V31_O,
  927 );
  928 
  929 // Class for all 64bit vector registers
  930 reg_class vectord_reg(
  931     V0, V0_H,
  932     V1, V1_H,
  933     V2, V2_H,
  934     V3, V3_H,
  935     V4, V4_H,
  936     V5, V5_H,
  937     V6, V6_H,
  938     V7, V7_H,
  939     V8, V8_H,
  940     V9, V9_H,
  941     V10, V10_H,
  942     V11, V11_H,
  943     V12, V12_H,
  944     V13, V13_H,
  945     V14, V14_H,
  946     V15, V15_H,
  947     V16, V16_H,
  948     V17, V17_H,
  949     V18, V18_H,
  950     V19, V19_H,
  951     V20, V20_H,
  952     V21, V21_H,
  953     V22, V22_H,
  954     V23, V23_H,
  955     V24, V24_H,
  956     V25, V25_H,
  957     V26, V26_H,
  958     V27, V27_H,
  959     V28, V28_H,
  960     V29, V29_H,
  961     V30, V30_H,
  962     V31, V31_H
  963 );
  964 
  965 // Class for all 128bit vector registers
  966 reg_class vectorx_reg(
  967     V0, V0_H, V0_J, V0_K,
  968     V1, V1_H, V1_J, V1_K,
  969     V2, V2_H, V2_J, V2_K,
  970     V3, V3_H, V3_J, V3_K,
  971     V4, V4_H, V4_J, V4_K,
  972     V5, V5_H, V5_J, V5_K,
  973     V6, V6_H, V6_J, V6_K,
  974     V7, V7_H, V7_J, V7_K,
  975     V8, V8_H, V8_J, V8_K,
  976     V9, V9_H, V9_J, V9_K,
  977     V10, V10_H, V10_J, V10_K,
  978     V11, V11_H, V11_J, V11_K,
  979     V12, V12_H, V12_J, V12_K,
  980     V13, V13_H, V13_J, V13_K,
  981     V14, V14_H, V14_J, V14_K,
  982     V15, V15_H, V15_J, V15_K,
  983     V16, V16_H, V16_J, V16_K,
  984     V17, V17_H, V17_J, V17_K,
  985     V18, V18_H, V18_J, V18_K,
  986     V19, V19_H, V19_J, V19_K,
  987     V20, V20_H, V20_J, V20_K,
  988     V21, V21_H, V21_J, V21_K,
  989     V22, V22_H, V22_J, V22_K,
  990     V23, V23_H, V23_J, V23_K,
  991     V24, V24_H, V24_J, V24_K,
  992     V25, V25_H, V25_J, V25_K,
  993     V26, V26_H, V26_J, V26_K,
  994     V27, V27_H, V27_J, V27_K,
  995     V28, V28_H, V28_J, V28_K,
  996     V29, V29_H, V29_J, V29_K,
  997     V30, V30_H, V30_J, V30_K,
  998     V31, V31_H, V31_J, V31_K
  999 );
 1000 
 1001 // Class for 128 bit register v0
 1002 reg_class v0_reg(
 1003     V0, V0_H
 1004 );
 1005 
 1006 // Class for 128 bit register v1
 1007 reg_class v1_reg(
 1008     V1, V1_H
 1009 );
 1010 
 1011 // Class for 128 bit register v2
 1012 reg_class v2_reg(
 1013     V2, V2_H
 1014 );
 1015 
 1016 // Class for 128 bit register v3
 1017 reg_class v3_reg(
 1018     V3, V3_H
 1019 );
 1020 
 1021 // Class for 128 bit register v4
 1022 reg_class v4_reg(
 1023     V4, V4_H
 1024 );
 1025 
 1026 // Class for 128 bit register v5
 1027 reg_class v5_reg(
 1028     V5, V5_H
 1029 );
 1030 
 1031 // Class for 128 bit register v6
 1032 reg_class v6_reg(
 1033     V6, V6_H
 1034 );
 1035 
 1036 // Class for 128 bit register v7
 1037 reg_class v7_reg(
 1038     V7, V7_H
 1039 );
 1040 
 1041 // Class for 128 bit register v8
 1042 reg_class v8_reg(
 1043     V8, V8_H
 1044 );
 1045 
 1046 // Class for 128 bit register v9
 1047 reg_class v9_reg(
 1048     V9, V9_H
 1049 );
 1050 
 1051 // Class for 128 bit register v10
 1052 reg_class v10_reg(
 1053     V10, V10_H
 1054 );
 1055 
 1056 // Class for 128 bit register v11
 1057 reg_class v11_reg(
 1058     V11, V11_H
 1059 );
 1060 
 1061 // Class for 128 bit register v12
 1062 reg_class v12_reg(
 1063     V12, V12_H
 1064 );
 1065 
 1066 // Class for 128 bit register v13
 1067 reg_class v13_reg(
 1068     V13, V13_H
 1069 );
 1070 
 1071 // Class for 128 bit register v14
 1072 reg_class v14_reg(
 1073     V14, V14_H
 1074 );
 1075 
 1076 // Class for 128 bit register v15
 1077 reg_class v15_reg(
 1078     V15, V15_H
 1079 );
 1080 
 1081 // Class for 128 bit register v16
 1082 reg_class v16_reg(
 1083     V16, V16_H
 1084 );
 1085 
 1086 // Class for 128 bit register v17
 1087 reg_class v17_reg(
 1088     V17, V17_H
 1089 );
 1090 
 1091 // Class for 128 bit register v18
 1092 reg_class v18_reg(
 1093     V18, V18_H
 1094 );
 1095 
 1096 // Class for 128 bit register v19
 1097 reg_class v19_reg(
 1098     V19, V19_H
 1099 );
 1100 
 1101 // Class for 128 bit register v20
 1102 reg_class v20_reg(
 1103     V20, V20_H
 1104 );
 1105 
 1106 // Class for 128 bit register v21
 1107 reg_class v21_reg(
 1108     V21, V21_H
 1109 );
 1110 
 1111 // Class for 128 bit register v22
 1112 reg_class v22_reg(
 1113     V22, V22_H
 1114 );
 1115 
 1116 // Class for 128 bit register v23
 1117 reg_class v23_reg(
 1118     V23, V23_H
 1119 );
 1120 
 1121 // Class for 128 bit register v24
 1122 reg_class v24_reg(
 1123     V24, V24_H
 1124 );
 1125 
 1126 // Class for 128 bit register v25
 1127 reg_class v25_reg(
 1128     V25, V25_H
 1129 );
 1130 
 1131 // Class for 128 bit register v26
 1132 reg_class v26_reg(
 1133     V26, V26_H
 1134 );
 1135 
 1136 // Class for 128 bit register v27
 1137 reg_class v27_reg(
 1138     V27, V27_H
 1139 );
 1140 
 1141 // Class for 128 bit register v28
 1142 reg_class v28_reg(
 1143     V28, V28_H
 1144 );
 1145 
 1146 // Class for 128 bit register v29
 1147 reg_class v29_reg(
 1148     V29, V29_H
 1149 );
 1150 
 1151 // Class for 128 bit register v30
 1152 reg_class v30_reg(
 1153     V30, V30_H
 1154 );
 1155 
 1156 // Class for 128 bit register v31
 1157 reg_class v31_reg(
 1158     V31, V31_H
 1159 );
 1160 
 1161 // Class for all SVE predicate registers.
 1162 reg_class pr_reg (
 1163     P0,
 1164     P1,
 1165     P2,
 1166     P3,
 1167     P4,
 1168     P5,
 1169     P6,
 1170     // P7, non-allocatable, preserved with all elements preset to TRUE.
 1171     P8,
 1172     P9,
 1173     P10,
 1174     P11,
 1175     P12,
 1176     P13,
 1177     P14,
 1178     P15
 1179 );
 1180 
 1181 // Class for SVE governing predicate registers, which are used
 1182 // to determine the active elements of a predicated instruction.
 1183 reg_class gov_pr (
 1184     P0,
 1185     P1,
 1186     P2,
 1187     P3,
 1188     P4,
 1189     P5,
 1190     P6,
 1191     // P7, non-allocatable, preserved with all elements preset to TRUE.
 1192 );
 1193 
 1194 // Singleton class for condition codes
 1195 reg_class int_flags(RFLAGS);
 1196 
 1197 %}
 1198 
 1199 //----------DEFINITION BLOCK---------------------------------------------------
 1200 // Define name --> value mappings to inform the ADLC of an integer valued name
 1201 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 1202 // Format:
 1203 //        int_def  <name>         ( <int_value>, <expression>);
 1204 // Generated Code in ad_<arch>.hpp
 1205 //        #define  <name>   (<expression>)
 1206 //        // value == <int_value>
 1207 // Generated code in ad_<arch>.cpp adlc_verification()
 1208 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 1209 //
 1210 
 1211 // we follow the ppc-aix port in using a simple cost model which ranks
 1212 // register operations as cheap, memory ops as more expensive and
 1213 // branches as most expensive. the first two have a low as well as a
 1214 // normal cost. huge cost appears to be a way of saying don't do
 1215 // something
 1216 
 1217 definitions %{
 1218   // The default cost (of a register move instruction).
 1219   int_def INSN_COST            (    100,     100);
 1220   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 1221   int_def CALL_COST            (    200,     2 * INSN_COST);
 1222   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 1223 %}
 1224 
 1225 
 1226 //----------SOURCE BLOCK-------------------------------------------------------
 1227 // This is a block of C++ code which provides values, functions, and
 1228 // definitions necessary in the rest of the architecture description
 1229 
 1230 source_hpp %{
 1231 
 1232 #include "asm/macroAssembler.hpp"
 1233 #include "gc/shared/cardTable.hpp"
 1234 #include "gc/shared/cardTableBarrierSet.hpp"
 1235 #include "gc/shared/collectedHeap.hpp"
 1236 #include "opto/addnode.hpp"
 1237 #include "opto/convertnode.hpp"
 1238 
 1239 extern RegMask _ANY_REG32_mask;
 1240 extern RegMask _ANY_REG_mask;
 1241 extern RegMask _PTR_REG_mask;
 1242 extern RegMask _NO_SPECIAL_REG32_mask;
 1243 extern RegMask _NO_SPECIAL_REG_mask;
 1244 extern RegMask _NO_SPECIAL_PTR_REG_mask;
 1245 
 1246 class CallStubImpl {
 1247 
 1248   //--------------------------------------------------------------
 1249   //---<  Used for optimization in Compile::shorten_branches  >---
 1250   //--------------------------------------------------------------
 1251 
 1252  public:
 1253   // Size of call trampoline stub.
 1254   static uint size_call_trampoline() {
 1255     return 0; // no call trampolines on this platform
 1256   }
 1257 
 1258   // number of relocations needed by a call trampoline stub
 1259   static uint reloc_call_trampoline() {
 1260     return 0; // no call trampolines on this platform
 1261   }
 1262 };
 1263 
 1264 class HandlerImpl {
 1265 
 1266  public:
 1267 
 1268   static int emit_exception_handler(CodeBuffer &cbuf);
 1269   static int emit_deopt_handler(CodeBuffer& cbuf);
 1270 
 1271   static uint size_exception_handler() {
 1272     return MacroAssembler::far_branch_size();
 1273   }
 1274 
 1275   static uint size_deopt_handler() {
 1276     // count one adr and one far branch instruction
 1277     return 4 * NativeInstruction::instruction_size;
 1278   }
 1279 };
 1280 
 1281 class Node::PD {
 1282 public:
 1283   enum NodeFlags {
 1284     _last_flag = Node::_last_flag
 1285   };
 1286 };
 1287 
 1288  bool is_CAS(int opcode, bool maybe_volatile);
 1289 
 1290   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
 1291 
 1292   bool unnecessary_acquire(const Node *barrier);
 1293   bool needs_acquiring_load(const Node *load);
 1294 
 1295   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
 1296 
 1297   bool unnecessary_release(const Node *barrier);
 1298   bool unnecessary_volatile(const Node *barrier);
 1299   bool needs_releasing_store(const Node *store);
 1300 
 1301   // predicate controlling translation of CompareAndSwapX
 1302   bool needs_acquiring_load_exclusive(const Node *load);
 1303 
 1304   // predicate controlling addressing modes
 1305   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
 1306 %}
 1307 
 1308 source %{
 1309 
 1310   // Derived RegMask with conditionally allocatable registers
 1311 
 1312   void PhaseOutput::pd_perform_mach_node_analysis() {
 1313   }
 1314 
 1315   int MachNode::pd_alignment_required() const {
 1316     return 1;
 1317   }
 1318 
 1319   int MachNode::compute_padding(int current_offset) const {
 1320     return 0;
 1321   }
 1322 
 1323   RegMask _ANY_REG32_mask;
 1324   RegMask _ANY_REG_mask;
 1325   RegMask _PTR_REG_mask;
 1326   RegMask _NO_SPECIAL_REG32_mask;
 1327   RegMask _NO_SPECIAL_REG_mask;
 1328   RegMask _NO_SPECIAL_PTR_REG_mask;
 1329 
 1330   void reg_mask_init() {
 1331     // We derive below RegMask(s) from the ones which are auto-generated from
 1332     // adlc register classes to make AArch64 rheapbase (r27) and rfp (r29)
 1333     // registers conditionally reserved.
 1334 
 1335     _ANY_REG32_mask = _ALL_REG32_mask;
 1336     _ANY_REG32_mask.Remove(OptoReg::as_OptoReg(r31_sp->as_VMReg()));
 1337 
 1338     _ANY_REG_mask = _ALL_REG_mask;
 1339 
 1340     _PTR_REG_mask = _ALL_REG_mask;
 1341 
 1342     _NO_SPECIAL_REG32_mask = _ALL_REG32_mask;
 1343     _NO_SPECIAL_REG32_mask.SUBTRACT(_NON_ALLOCATABLE_REG32_mask);
 1344 
 1345     _NO_SPECIAL_REG_mask = _ALL_REG_mask;
 1346     _NO_SPECIAL_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1347 
 1348     _NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask;
 1349     _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1350 
 1351     // r27 is not allocatable when compressed oops is on and heapbase is not
 1352     // zero, compressed klass pointers doesn't use r27 after JDK-8234794
 1353     if (UseCompressedOops && (CompressedOops::ptrs_base() != NULL || UseAOT)) {
 1354       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1355       _NO_SPECIAL_REG_mask.SUBTRACT(_HEAPBASE_REG_mask);
 1356       _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_HEAPBASE_REG_mask);
 1357     }
 1358 
 1359     // r29 is not allocatable when PreserveFramePointer is on
 1360     if (PreserveFramePointer) {
 1361       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1362       _NO_SPECIAL_REG_mask.SUBTRACT(_FP_REG_mask);
 1363       _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_FP_REG_mask);
 1364     }
 1365   }
 1366 
 1367   // Optimizaton of volatile gets and puts
 1368   // -------------------------------------
 1369   //
 1370   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
 1371   // use to implement volatile reads and writes. For a volatile read
 1372   // we simply need
 1373   //
 1374   //   ldar<x>
 1375   //
 1376   // and for a volatile write we need
 1377   //
 1378   //   stlr<x>
 1379   //
 1380   // Alternatively, we can implement them by pairing a normal
 1381   // load/store with a memory barrier. For a volatile read we need
 1382   //
 1383   //   ldr<x>
 1384   //   dmb ishld
 1385   //
 1386   // for a volatile write
 1387   //
 1388   //   dmb ish
 1389   //   str<x>
 1390   //   dmb ish
 1391   //
 1392   // We can also use ldaxr and stlxr to implement compare and swap CAS
 1393   // sequences. These are normally translated to an instruction
 1394   // sequence like the following
 1395   //
 1396   //   dmb      ish
 1397   // retry:
 1398   //   ldxr<x>   rval raddr
 1399   //   cmp       rval rold
 1400   //   b.ne done
 1401   //   stlxr<x>  rval, rnew, rold
 1402   //   cbnz      rval retry
 1403   // done:
 1404   //   cset      r0, eq
 1405   //   dmb ishld
 1406   //
 1407   // Note that the exclusive store is already using an stlxr
 1408   // instruction. That is required to ensure visibility to other
 1409   // threads of the exclusive write (assuming it succeeds) before that
 1410   // of any subsequent writes.
 1411   //
 1412   // The following instruction sequence is an improvement on the above
 1413   //
 1414   // retry:
 1415   //   ldaxr<x>  rval raddr
 1416   //   cmp       rval rold
 1417   //   b.ne done
 1418   //   stlxr<x>  rval, rnew, rold
 1419   //   cbnz      rval retry
 1420   // done:
 1421   //   cset      r0, eq
 1422   //
 1423   // We don't need the leading dmb ish since the stlxr guarantees
 1424   // visibility of prior writes in the case that the swap is
 1425   // successful. Crucially we don't have to worry about the case where
 1426   // the swap is not successful since no valid program should be
 1427   // relying on visibility of prior changes by the attempting thread
 1428   // in the case where the CAS fails.
 1429   //
 1430   // Similarly, we don't need the trailing dmb ishld if we substitute
 1431   // an ldaxr instruction since that will provide all the guarantees we
 1432   // require regarding observation of changes made by other threads
 1433   // before any change to the CAS address observed by the load.
 1434   //
 1435   // In order to generate the desired instruction sequence we need to
 1436   // be able to identify specific 'signature' ideal graph node
 1437   // sequences which i) occur as a translation of a volatile reads or
 1438   // writes or CAS operations and ii) do not occur through any other
 1439   // translation or graph transformation. We can then provide
 1440   // alternative aldc matching rules which translate these node
 1441   // sequences to the desired machine code sequences. Selection of the
 1442   // alternative rules can be implemented by predicates which identify
 1443   // the relevant node sequences.
 1444   //
 1445   // The ideal graph generator translates a volatile read to the node
 1446   // sequence
 1447   //
 1448   //   LoadX[mo_acquire]
 1449   //   MemBarAcquire
 1450   //
 1451   // As a special case when using the compressed oops optimization we
 1452   // may also see this variant
 1453   //
 1454   //   LoadN[mo_acquire]
 1455   //   DecodeN
 1456   //   MemBarAcquire
 1457   //
 1458   // A volatile write is translated to the node sequence
 1459   //
 1460   //   MemBarRelease
 1461   //   StoreX[mo_release] {CardMark}-optional
 1462   //   MemBarVolatile
 1463   //
 1464   // n.b. the above node patterns are generated with a strict
 1465   // 'signature' configuration of input and output dependencies (see
 1466   // the predicates below for exact details). The card mark may be as
 1467   // simple as a few extra nodes or, in a few GC configurations, may
 1468   // include more complex control flow between the leading and
 1469   // trailing memory barriers. However, whatever the card mark
 1470   // configuration these signatures are unique to translated volatile
 1471   // reads/stores -- they will not appear as a result of any other
 1472   // bytecode translation or inlining nor as a consequence of
 1473   // optimizing transforms.
 1474   //
 1475   // We also want to catch inlined unsafe volatile gets and puts and
 1476   // be able to implement them using either ldar<x>/stlr<x> or some
 1477   // combination of ldr<x>/stlr<x> and dmb instructions.
 1478   //
 1479   // Inlined unsafe volatiles puts manifest as a minor variant of the
 1480   // normal volatile put node sequence containing an extra cpuorder
 1481   // membar
 1482   //
 1483   //   MemBarRelease
 1484   //   MemBarCPUOrder
 1485   //   StoreX[mo_release] {CardMark}-optional
 1486   //   MemBarCPUOrder
 1487   //   MemBarVolatile
 1488   //
 1489   // n.b. as an aside, a cpuorder membar is not itself subject to
 1490   // matching and translation by adlc rules.  However, the rule
 1491   // predicates need to detect its presence in order to correctly
 1492   // select the desired adlc rules.
 1493   //
 1494   // Inlined unsafe volatile gets manifest as a slightly different
 1495   // node sequence to a normal volatile get because of the
 1496   // introduction of some CPUOrder memory barriers to bracket the
 1497   // Load. However, but the same basic skeleton of a LoadX feeding a
 1498   // MemBarAcquire, possibly thorugh an optional DecodeN, is still
 1499   // present
 1500   //
 1501   //   MemBarCPUOrder
 1502   //        ||       \\
 1503   //   MemBarCPUOrder LoadX[mo_acquire]
 1504   //        ||            |
 1505   //        ||       {DecodeN} optional
 1506   //        ||       /
 1507   //     MemBarAcquire
 1508   //
 1509   // In this case the acquire membar does not directly depend on the
 1510   // load. However, we can be sure that the load is generated from an
 1511   // inlined unsafe volatile get if we see it dependent on this unique
 1512   // sequence of membar nodes. Similarly, given an acquire membar we
 1513   // can know that it was added because of an inlined unsafe volatile
 1514   // get if it is fed and feeds a cpuorder membar and if its feed
 1515   // membar also feeds an acquiring load.
 1516   //
 1517   // Finally an inlined (Unsafe) CAS operation is translated to the
 1518   // following ideal graph
 1519   //
 1520   //   MemBarRelease
 1521   //   MemBarCPUOrder
 1522   //   CompareAndSwapX {CardMark}-optional
 1523   //   MemBarCPUOrder
 1524   //   MemBarAcquire
 1525   //
 1526   // So, where we can identify these volatile read and write
 1527   // signatures we can choose to plant either of the above two code
 1528   // sequences. For a volatile read we can simply plant a normal
 1529   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
 1530   // also choose to inhibit translation of the MemBarAcquire and
 1531   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
 1532   //
 1533   // When we recognise a volatile store signature we can choose to
 1534   // plant at a dmb ish as a translation for the MemBarRelease, a
 1535   // normal str<x> and then a dmb ish for the MemBarVolatile.
 1536   // Alternatively, we can inhibit translation of the MemBarRelease
 1537   // and MemBarVolatile and instead plant a simple stlr<x>
 1538   // instruction.
 1539   //
 1540   // when we recognise a CAS signature we can choose to plant a dmb
 1541   // ish as a translation for the MemBarRelease, the conventional
 1542   // macro-instruction sequence for the CompareAndSwap node (which
 1543   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
 1544   // Alternatively, we can elide generation of the dmb instructions
 1545   // and plant the alternative CompareAndSwap macro-instruction
 1546   // sequence (which uses ldaxr<x>).
 1547   //
 1548   // Of course, the above only applies when we see these signature
 1549   // configurations. We still want to plant dmb instructions in any
 1550   // other cases where we may see a MemBarAcquire, MemBarRelease or
 1551   // MemBarVolatile. For example, at the end of a constructor which
 1552   // writes final/volatile fields we will see a MemBarRelease
 1553   // instruction and this needs a 'dmb ish' lest we risk the
 1554   // constructed object being visible without making the
 1555   // final/volatile field writes visible.
 1556   //
 1557   // n.b. the translation rules below which rely on detection of the
 1558   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
 1559   // If we see anything other than the signature configurations we
 1560   // always just translate the loads and stores to ldr<x> and str<x>
 1561   // and translate acquire, release and volatile membars to the
 1562   // relevant dmb instructions.
 1563   //
 1564 
 1565   // is_CAS(int opcode, bool maybe_volatile)
 1566   //
 1567   // return true if opcode is one of the possible CompareAndSwapX
 1568   // values otherwise false.
 1569 
 1570   bool is_CAS(int opcode, bool maybe_volatile)
 1571   {
 1572     switch(opcode) {
 1573       // We handle these
 1574     case Op_CompareAndSwapI:
 1575     case Op_CompareAndSwapL:
 1576     case Op_CompareAndSwapP:
 1577     case Op_CompareAndSwapN:
 1578     case Op_ShenandoahCompareAndSwapP:
 1579     case Op_ShenandoahCompareAndSwapN:
 1580     case Op_CompareAndSwapB:
 1581     case Op_CompareAndSwapS:
 1582     case Op_GetAndSetI:
 1583     case Op_GetAndSetL:
 1584     case Op_GetAndSetP:
 1585     case Op_GetAndSetN:
 1586     case Op_GetAndAddI:
 1587     case Op_GetAndAddL:
 1588       return true;
 1589     case Op_CompareAndExchangeI:
 1590     case Op_CompareAndExchangeN:
 1591     case Op_CompareAndExchangeB:
 1592     case Op_CompareAndExchangeS:
 1593     case Op_CompareAndExchangeL:
 1594     case Op_CompareAndExchangeP:
 1595     case Op_WeakCompareAndSwapB:
 1596     case Op_WeakCompareAndSwapS:
 1597     case Op_WeakCompareAndSwapI:
 1598     case Op_WeakCompareAndSwapL:
 1599     case Op_WeakCompareAndSwapP:
 1600     case Op_WeakCompareAndSwapN:
 1601     case Op_ShenandoahWeakCompareAndSwapP:
 1602     case Op_ShenandoahWeakCompareAndSwapN:
 1603     case Op_ShenandoahCompareAndExchangeP:
 1604     case Op_ShenandoahCompareAndExchangeN:
 1605       return maybe_volatile;
 1606     default:
 1607       return false;
 1608     }
 1609   }
 1610 
 1611   // helper to determine the maximum number of Phi nodes we may need to
 1612   // traverse when searching from a card mark membar for the merge mem
 1613   // feeding a trailing membar or vice versa
 1614 
 1615 // predicates controlling emit of ldr<x>/ldar<x>
 1616 
 1617 bool unnecessary_acquire(const Node *barrier)
 1618 {
 1619   assert(barrier->is_MemBar(), "expecting a membar");
 1620 
 1621   MemBarNode* mb = barrier->as_MemBar();
 1622 
 1623   if (mb->trailing_load()) {
 1624     return true;
 1625   }
 1626 
 1627   if (mb->trailing_load_store()) {
 1628     Node* load_store = mb->in(MemBarNode::Precedent);
 1629     assert(load_store->is_LoadStore(), "unexpected graph shape");
 1630     return is_CAS(load_store->Opcode(), true);
 1631   }
 1632 
 1633   return false;
 1634 }
 1635 
 1636 bool needs_acquiring_load(const Node *n)
 1637 {
 1638   assert(n->is_Load(), "expecting a load");
 1639   LoadNode *ld = n->as_Load();
 1640   return ld->is_acquire();
 1641 }
 1642 
 1643 bool unnecessary_release(const Node *n)
 1644 {
 1645   assert((n->is_MemBar() &&
 1646           n->Opcode() == Op_MemBarRelease),
 1647          "expecting a release membar");
 1648 
 1649   MemBarNode *barrier = n->as_MemBar();
 1650   if (!barrier->leading()) {
 1651     return false;
 1652   } else {
 1653     Node* trailing = barrier->trailing_membar();
 1654     MemBarNode* trailing_mb = trailing->as_MemBar();
 1655     assert(trailing_mb->trailing(), "Not a trailing membar?");
 1656     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
 1657 
 1658     Node* mem = trailing_mb->in(MemBarNode::Precedent);
 1659     if (mem->is_Store()) {
 1660       assert(mem->as_Store()->is_release(), "");
 1661       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
 1662       return true;
 1663     } else {
 1664       assert(mem->is_LoadStore(), "");
 1665       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
 1666       return is_CAS(mem->Opcode(), true);
 1667     }
 1668   }
 1669   return false;
 1670 }
 1671 
 1672 bool unnecessary_volatile(const Node *n)
 1673 {
 1674   // assert n->is_MemBar();
 1675   MemBarNode *mbvol = n->as_MemBar();
 1676 
 1677   bool release = mbvol->trailing_store();
 1678   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
 1679 #ifdef ASSERT
 1680   if (release) {
 1681     Node* leading = mbvol->leading_membar();
 1682     assert(leading->Opcode() == Op_MemBarRelease, "");
 1683     assert(leading->as_MemBar()->leading_store(), "");
 1684     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
 1685   }
 1686 #endif
 1687 
 1688   return release;
 1689 }
 1690 
 1691 // predicates controlling emit of str<x>/stlr<x>
 1692 
 1693 bool needs_releasing_store(const Node *n)
 1694 {
 1695   // assert n->is_Store();
 1696   StoreNode *st = n->as_Store();
 1697   return st->trailing_membar() != NULL;
 1698 }
 1699 
 1700 // predicate controlling translation of CAS
 1701 //
 1702 // returns true if CAS needs to use an acquiring load otherwise false
 1703 
 1704 bool needs_acquiring_load_exclusive(const Node *n)
 1705 {
 1706   assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
 1707   LoadStoreNode* ldst = n->as_LoadStore();
 1708   if (is_CAS(n->Opcode(), false)) {
 1709     assert(ldst->trailing_membar() != NULL, "expected trailing membar");
 1710   } else {
 1711     return ldst->trailing_membar() != NULL;
 1712   }
 1713 
 1714   // so we can just return true here
 1715   return true;
 1716 }
 1717 
 1718 #define __ _masm.
 1719 
 1720 // advance declarations for helper functions to convert register
 1721 // indices to register objects
 1722 
 1723 // the ad file has to provide implementations of certain methods
 1724 // expected by the generic code
 1725 //
 1726 // REQUIRED FUNCTIONALITY
 1727 
 1728 //=============================================================================
 1729 
 1730 // !!!!! Special hack to get all types of calls to specify the byte offset
 1731 //       from the start of the call to the point where the return address
 1732 //       will point.
 1733 
 1734 int MachCallStaticJavaNode::ret_addr_offset()
 1735 {
 1736   // call should be a simple bl
 1737   int off = 4;
 1738   return off;
 1739 }
 1740 
 1741 int MachCallDynamicJavaNode::ret_addr_offset()
 1742 {
 1743   return 16; // movz, movk, movk, bl
 1744 }
 1745 
 1746 int MachCallRuntimeNode::ret_addr_offset() {
 1747   // for generated stubs the call will be
 1748   //   far_call(addr)
 1749   // for real runtime callouts it will be six instructions
 1750   // see aarch64_enc_java_to_runtime
 1751   //   adr(rscratch2, retaddr)
 1752   //   lea(rscratch1, RuntimeAddress(addr)
 1753   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
 1754   //   blr(rscratch1)
 1755   CodeBlob *cb = CodeCache::find_blob(_entry_point);
 1756   if (cb) {
 1757     return MacroAssembler::far_branch_size();
 1758   } else {
 1759     return 6 * NativeInstruction::instruction_size;
 1760   }
 1761 }
 1762 
 1763 // Indicate if the safepoint node needs the polling page as an input
 1764 
 1765 // the shared code plants the oop data at the start of the generated
 1766 // code for the safepoint node and that needs ot be at the load
 1767 // instruction itself. so we cannot plant a mov of the safepoint poll
 1768 // address followed by a load. setting this to true means the mov is
 1769 // scheduled as a prior instruction. that's better for scheduling
 1770 // anyway.
 1771 
 1772 bool SafePointNode::needs_polling_address_input()
 1773 {
 1774   return true;
 1775 }
 1776 
 1777 //=============================================================================
 1778 
 1779 #ifndef PRODUCT
 1780 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1781   st->print("BREAKPOINT");
 1782 }
 1783 #endif
 1784 
 1785 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1786   C2_MacroAssembler _masm(&cbuf);
 1787   __ brk(0);
 1788 }
 1789 
 1790 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
 1791   return MachNode::size(ra_);
 1792 }
 1793 
 1794 //=============================================================================
 1795 
 1796 #ifndef PRODUCT
 1797   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
 1798     st->print("nop \t# %d bytes pad for loops and calls", _count);
 1799   }
 1800 #endif
 1801 
 1802   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
 1803     C2_MacroAssembler _masm(&cbuf);
 1804     for (int i = 0; i < _count; i++) {
 1805       __ nop();
 1806     }
 1807   }
 1808 
 1809   uint MachNopNode::size(PhaseRegAlloc*) const {
 1810     return _count * NativeInstruction::instruction_size;
 1811   }
 1812 
 1813 //=============================================================================
 1814 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
 1815 
 1816 int ConstantTable::calculate_table_base_offset() const {
 1817   return 0;  // absolute addressing, no offset
 1818 }
 1819 
 1820 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
 1821 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
 1822   ShouldNotReachHere();
 1823 }
 1824 
 1825 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
 1826   // Empty encoding
 1827 }
 1828 
 1829 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
 1830   return 0;
 1831 }
 1832 
 1833 #ifndef PRODUCT
 1834 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
 1835   st->print("-- \t// MachConstantBaseNode (empty encoding)");
 1836 }
 1837 #endif
 1838 
 1839 #ifndef PRODUCT
 1840 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1841   Compile* C = ra_->C;
 1842 
 1843   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1844 
 1845   if (C->output()->need_stack_bang(framesize))
 1846     st->print("# stack bang size=%d\n\t", framesize);
 1847 
 1848   if (framesize < ((1 << 9) + 2 * wordSize)) {
 1849     st->print("sub  sp, sp, #%d\n\t", framesize);
 1850     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
 1851     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
 1852   } else {
 1853     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
 1854     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
 1855     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1856     st->print("sub  sp, sp, rscratch1");
 1857   }
 1858   if (C->stub_function() == NULL && BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) {
 1859     st->print("\n\t");
 1860     st->print("ldr  rscratch1, [guard]\n\t");
 1861     st->print("dmb ishld\n\t");
 1862     st->print("ldr  rscratch2, [rthread, #thread_disarmed_offset]\n\t");
 1863     st->print("cmp  rscratch1, rscratch2\n\t");
 1864     st->print("b.eq skip");
 1865     st->print("\n\t");
 1866     st->print("blr #nmethod_entry_barrier_stub\n\t");
 1867     st->print("b skip\n\t");
 1868     st->print("guard: int\n\t");
 1869     st->print("\n\t");
 1870     st->print("skip:\n\t");
 1871   }
 1872 }
 1873 #endif
 1874 
 1875 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1876   Compile* C = ra_->C;
 1877   C2_MacroAssembler _masm(&cbuf);
 1878 
 1879   // n.b. frame size includes space for return pc and rfp
 1880   const int framesize = C->output()->frame_size_in_bytes();
 1881   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
 1882 
 1883   // insert a nop at the start of the prolog so we can patch in a
 1884   // branch if we need to invalidate the method later
 1885   __ nop();
 1886 
 1887   if (C->clinit_barrier_on_entry()) {
 1888     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
 1889 
 1890     Label L_skip_barrier;
 1891 
 1892     __ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
 1893     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 1894     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 1895     __ bind(L_skip_barrier);
 1896   }
 1897 
 1898   if (UseSVE > 0 && C->max_vector_size() >= 16) {
 1899     __ reinitialize_ptrue();
 1900   }
 1901 
 1902   int bangsize = C->output()->bang_size_in_bytes();
 1903   if (C->output()->need_stack_bang(bangsize) && UseStackBanging)
 1904     __ generate_stack_overflow_check(bangsize);
 1905 
 1906   __ build_frame(framesize);
 1907 
 1908   if (C->stub_function() == NULL) {
 1909     BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 1910     bs->nmethod_entry_barrier(&_masm);
 1911   }
 1912 
 1913   if (VerifyStackAtCalls) {
 1914     Unimplemented();
 1915   }
 1916 
 1917   C->output()->set_frame_complete(cbuf.insts_size());
 1918 
 1919   if (C->has_mach_constant_base_node()) {
 1920     // NOTE: We set the table base offset here because users might be
 1921     // emitted before MachConstantBaseNode.
 1922     ConstantTable& constant_table = C->output()->constant_table();
 1923     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
 1924   }
 1925 }
 1926 
 1927 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
 1928 {
 1929   return MachNode::size(ra_); // too many variables; just compute it
 1930                               // the hard way
 1931 }
 1932 
 1933 int MachPrologNode::reloc() const
 1934 {
 1935   return 0;
 1936 }
 1937 
 1938 //=============================================================================
 1939 
 1940 #ifndef PRODUCT
 1941 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1942   Compile* C = ra_->C;
 1943   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1944 
 1945   st->print("# pop frame %d\n\t",framesize);
 1946 
 1947   if (framesize == 0) {
 1948     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1949   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
 1950     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
 1951     st->print("add  sp, sp, #%d\n\t", framesize);
 1952   } else {
 1953     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1954     st->print("add  sp, sp, rscratch1\n\t");
 1955     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1956   }
 1957 
 1958   if (do_polling() && C->is_method_compilation()) {
 1959     st->print("# touch polling page\n\t");
 1960     st->print("ldr rscratch1, [rthread],#polling_page_offset\n\t");
 1961     st->print("ldr zr, [rscratch1]");
 1962   }
 1963 }
 1964 #endif
 1965 
 1966 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1967   Compile* C = ra_->C;
 1968   C2_MacroAssembler _masm(&cbuf);
 1969   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1970 
 1971   __ remove_frame(framesize);
 1972 
 1973   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
 1974     __ reserved_stack_check();
 1975   }
 1976 
 1977   if (do_polling() && C->is_method_compilation()) {
 1978     __ fetch_and_read_polling_page(rscratch1, relocInfo::poll_return_type);
 1979   }
 1980 }
 1981 
 1982 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 1983   // Variable size. Determine dynamically.
 1984   return MachNode::size(ra_);
 1985 }
 1986 
 1987 int MachEpilogNode::reloc() const {
 1988   // Return number of relocatable values contained in this instruction.
 1989   return 1; // 1 for polling page.
 1990 }
 1991 
 1992 const Pipeline * MachEpilogNode::pipeline() const {
 1993   return MachNode::pipeline_class();
 1994 }
 1995 
 1996 //=============================================================================
 1997 
 1998 // Figure out which register class each belongs in: rc_int, rc_float or
 1999 // rc_stack.
 2000 enum RC { rc_bad, rc_int, rc_float, rc_predicate, rc_stack };
 2001 
 2002 static enum RC rc_class(OptoReg::Name reg) {
 2003 
 2004   if (reg == OptoReg::Bad) {
 2005     return rc_bad;
 2006   }
 2007 
 2008   // we have 32 int registers * 2 halves
 2009   int slots_of_int_registers = RegisterImpl::max_slots_per_register * RegisterImpl::number_of_registers;
 2010 
 2011   if (reg < slots_of_int_registers) {
 2012     return rc_int;
 2013   }
 2014 
 2015   // we have 32 float register * 8 halves
 2016   int slots_of_float_registers = FloatRegisterImpl::max_slots_per_register * FloatRegisterImpl::number_of_registers;
 2017   if (reg < slots_of_int_registers + slots_of_float_registers) {
 2018     return rc_float;
 2019   }
 2020 
 2021   int slots_of_predicate_registers = PRegisterImpl::max_slots_per_register * PRegisterImpl::number_of_registers;
 2022   if (reg < slots_of_int_registers + slots_of_float_registers + slots_of_predicate_registers) {
 2023     return rc_predicate;
 2024   }
 2025 
 2026   // Between predicate regs & stack is the flags.
 2027   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
 2028 
 2029   return rc_stack;
 2030 }
 2031 
 2032 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
 2033   Compile* C = ra_->C;
 2034 
 2035   // Get registers to move.
 2036   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
 2037   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
 2038   OptoReg::Name dst_hi = ra_->get_reg_second(this);
 2039   OptoReg::Name dst_lo = ra_->get_reg_first(this);
 2040 
 2041   enum RC src_hi_rc = rc_class(src_hi);
 2042   enum RC src_lo_rc = rc_class(src_lo);
 2043   enum RC dst_hi_rc = rc_class(dst_hi);
 2044   enum RC dst_lo_rc = rc_class(dst_lo);
 2045 
 2046   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
 2047 
 2048   if (src_hi != OptoReg::Bad) {
 2049     assert((src_lo&1)==0 && src_lo+1==src_hi &&
 2050            (dst_lo&1)==0 && dst_lo+1==dst_hi,
 2051            "expected aligned-adjacent pairs");
 2052   }
 2053 
 2054   if (src_lo == dst_lo && src_hi == dst_hi) {
 2055     return 0;            // Self copy, no move.
 2056   }
 2057 
 2058   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
 2059               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
 2060   int src_offset = ra_->reg2offset(src_lo);
 2061   int dst_offset = ra_->reg2offset(dst_lo);
 2062 
 2063   if (bottom_type()->isa_vect() != NULL) {
 2064     uint ireg = ideal_reg();
 2065     if (ireg == Op_VecA && cbuf) {
 2066       C2_MacroAssembler _masm(cbuf);
 2067       int sve_vector_reg_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
 2068       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 2069         // stack->stack
 2070         __ spill_copy_sve_vector_stack_to_stack(src_offset, dst_offset,
 2071                                                 sve_vector_reg_size_in_bytes);
 2072       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 2073         __ spill_sve_vector(as_FloatRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo),
 2074                             sve_vector_reg_size_in_bytes);
 2075       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 2076         __ unspill_sve_vector(as_FloatRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo),
 2077                               sve_vector_reg_size_in_bytes);
 2078       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 2079         __ sve_orr(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2080                    as_FloatRegister(Matcher::_regEncode[src_lo]),
 2081                    as_FloatRegister(Matcher::_regEncode[src_lo]));
 2082       } else {
 2083         ShouldNotReachHere();
 2084       }
 2085     } else if (cbuf) {
 2086       assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
 2087       C2_MacroAssembler _masm(cbuf);
 2088       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
 2089       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 2090         // stack->stack
 2091         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
 2092         if (ireg == Op_VecD) {
 2093           __ unspill(rscratch1, true, src_offset);
 2094           __ spill(rscratch1, true, dst_offset);
 2095         } else {
 2096           __ spill_copy128(src_offset, dst_offset);
 2097         }
 2098       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 2099         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2100                ireg == Op_VecD ? __ T8B : __ T16B,
 2101                as_FloatRegister(Matcher::_regEncode[src_lo]));
 2102       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 2103         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 2104                  ireg == Op_VecD ? __ D : __ Q,
 2105                  ra_->reg2offset(dst_lo));
 2106       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 2107         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2108                    ireg == Op_VecD ? __ D : __ Q,
 2109                    ra_->reg2offset(src_lo));
 2110       } else {
 2111         ShouldNotReachHere();
 2112       }
 2113     }
 2114   } else if (cbuf) {
 2115     C2_MacroAssembler _masm(cbuf);
 2116     switch (src_lo_rc) {
 2117     case rc_int:
 2118       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
 2119         if (is64) {
 2120             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
 2121                    as_Register(Matcher::_regEncode[src_lo]));
 2122         } else {
 2123             C2_MacroAssembler _masm(cbuf);
 2124             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
 2125                     as_Register(Matcher::_regEncode[src_lo]));
 2126         }
 2127       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
 2128         if (is64) {
 2129             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2130                      as_Register(Matcher::_regEncode[src_lo]));
 2131         } else {
 2132             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2133                      as_Register(Matcher::_regEncode[src_lo]));
 2134         }
 2135       } else {                    // gpr --> stack spill
 2136         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2137         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
 2138       }
 2139       break;
 2140     case rc_float:
 2141       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
 2142         if (is64) {
 2143             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
 2144                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2145         } else {
 2146             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
 2147                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2148         }
 2149       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
 2150           if (cbuf) {
 2151             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2152                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2153         } else {
 2154             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2155                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2156         }
 2157       } else {                    // fpr --> stack spill
 2158         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2159         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 2160                  is64 ? __ D : __ S, dst_offset);
 2161       }
 2162       break;
 2163     case rc_stack:
 2164       if (dst_lo_rc == rc_int) {  // stack --> gpr load
 2165         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
 2166       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
 2167         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2168                    is64 ? __ D : __ S, src_offset);
 2169       } else {                    // stack --> stack copy
 2170         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2171         __ unspill(rscratch1, is64, src_offset);
 2172         __ spill(rscratch1, is64, dst_offset);
 2173       }
 2174       break;
 2175     default:
 2176       assert(false, "bad rc_class for spill");
 2177       ShouldNotReachHere();
 2178     }
 2179   }
 2180 
 2181   if (st) {
 2182     st->print("spill ");
 2183     if (src_lo_rc == rc_stack) {
 2184       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
 2185     } else {
 2186       st->print("%s -> ", Matcher::regName[src_lo]);
 2187     }
 2188     if (dst_lo_rc == rc_stack) {
 2189       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
 2190     } else {
 2191       st->print("%s", Matcher::regName[dst_lo]);
 2192     }
 2193     if (bottom_type()->isa_vect() != NULL) {
 2194       int vsize = 0;
 2195       switch (ideal_reg()) {
 2196       case Op_VecD:
 2197         vsize = 64;
 2198         break;
 2199       case Op_VecX:
 2200         vsize = 128;
 2201         break;
 2202       case Op_VecA:
 2203         vsize = Matcher::scalable_vector_reg_size(T_BYTE) * 8;
 2204         break;
 2205       default:
 2206         assert(false, "bad register type for spill");
 2207         ShouldNotReachHere();
 2208       }
 2209       st->print("\t# vector spill size = %d", vsize);
 2210     } else {
 2211       st->print("\t# spill size = %d", is64 ? 64 : 32);
 2212     }
 2213   }
 2214 
 2215   return 0;
 2216 
 2217 }
 2218 
 2219 #ifndef PRODUCT
 2220 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2221   if (!ra_)
 2222     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
 2223   else
 2224     implementation(NULL, ra_, false, st);
 2225 }
 2226 #endif
 2227 
 2228 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 2229   implementation(&cbuf, ra_, false, NULL);
 2230 }
 2231 
 2232 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
 2233   return MachNode::size(ra_);
 2234 }
 2235 
 2236 //=============================================================================
 2237 
 2238 #ifndef PRODUCT
 2239 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2240   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2241   int reg = ra_->get_reg_first(this);
 2242   st->print("add %s, rsp, #%d]\t# box lock",
 2243             Matcher::regName[reg], offset);
 2244 }
 2245 #endif
 2246 
 2247 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 2248   C2_MacroAssembler _masm(&cbuf);
 2249 
 2250   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2251   int reg    = ra_->get_encode(this);
 2252 
 2253   // This add will handle any 24-bit signed offset. 24 bits allows an
 2254   // 8 megabyte stack frame.
 2255   __ add(as_Register(reg), sp, offset);
 2256 }
 2257 
 2258 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
 2259   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
 2260   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2261 
 2262   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
 2263     return NativeInstruction::instruction_size;
 2264   } else {
 2265     return 2 * NativeInstruction::instruction_size;
 2266   }
 2267 }
 2268 
 2269 //=============================================================================
 2270 
 2271 #ifndef PRODUCT
 2272 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 2273 {
 2274   st->print_cr("# MachUEPNode");
 2275   if (UseCompressedClassPointers) {
 2276     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2277     if (CompressedKlassPointers::shift() != 0) {
 2278       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
 2279     }
 2280   } else {
 2281    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2282   }
 2283   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
 2284   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
 2285 }
 2286 #endif
 2287 
 2288 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
 2289 {
 2290   // This is the unverified entry point.
 2291   C2_MacroAssembler _masm(&cbuf);
 2292 
 2293   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
 2294   Label skip;
 2295   // TODO
 2296   // can we avoid this skip and still use a reloc?
 2297   __ br(Assembler::EQ, skip);
 2298   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 2299   __ bind(skip);
 2300 }
 2301 
 2302 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
 2303 {
 2304   return MachNode::size(ra_);
 2305 }
 2306 
 2307 // REQUIRED EMIT CODE
 2308 
 2309 //=============================================================================
 2310 
 2311 // Emit exception handler code.
 2312 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
 2313 {
 2314   // mov rscratch1 #exception_blob_entry_point
 2315   // br rscratch1
 2316   // Note that the code buffer's insts_mark is always relative to insts.
 2317   // That's why we must use the macroassembler to generate a handler.
 2318   C2_MacroAssembler _masm(&cbuf);
 2319   address base = __ start_a_stub(size_exception_handler());
 2320   if (base == NULL) {
 2321     ciEnv::current()->record_failure("CodeCache is full");
 2322     return 0;  // CodeBuffer::expand failed
 2323   }
 2324   int offset = __ offset();
 2325   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
 2326   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
 2327   __ end_a_stub();
 2328   return offset;
 2329 }
 2330 
 2331 // Emit deopt handler code.
 2332 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
 2333 {
 2334   // Note that the code buffer's insts_mark is always relative to insts.
 2335   // That's why we must use the macroassembler to generate a handler.
 2336   C2_MacroAssembler _masm(&cbuf);
 2337   address base = __ start_a_stub(size_deopt_handler());
 2338   if (base == NULL) {
 2339     ciEnv::current()->record_failure("CodeCache is full");
 2340     return 0;  // CodeBuffer::expand failed
 2341   }
 2342   int offset = __ offset();
 2343 
 2344   __ adr(lr, __ pc());
 2345   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 2346 
 2347   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
 2348   __ end_a_stub();
 2349   return offset;
 2350 }
 2351 
 2352 // REQUIRED MATCHER CODE
 2353 
 2354 //=============================================================================
 2355 
 2356 const bool Matcher::match_rule_supported(int opcode) {
 2357   if (!has_match_rule(opcode))
 2358     return false;
 2359 
 2360   bool ret_value = true;
 2361   switch (opcode) {
 2362     case Op_CacheWB:
 2363     case Op_CacheWBPreSync:
 2364     case Op_CacheWBPostSync:
 2365       if (!VM_Version::supports_data_cache_line_flush()) {
 2366         ret_value = false;
 2367       }
 2368       break;
 2369   }
 2370 
 2371   return ret_value; // Per default match rules are supported.
 2372 }
 2373 
 2374 // Identify extra cases that we might want to provide match rules for vector nodes and
 2375 // other intrinsics guarded with vector length (vlen) and element type (bt).
 2376 const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
 2377   if (!match_rule_supported(opcode) || !vector_size_supported(bt, vlen)) {
 2378     return false;
 2379   }
 2380   int bit_size = vlen * type2aelembytes(bt) * 8;
 2381   if (UseSVE == 0 && bit_size > 128) {
 2382     return false;
 2383   }
 2384   if (UseSVE > 0) {
 2385     return op_sve_supported(opcode);
 2386   } else { // NEON
 2387     // Special cases
 2388     switch (opcode) {
 2389     case Op_MulAddVS2VI:
 2390       if (bit_size < 128) {
 2391         return false;
 2392       }
 2393       break;
 2394     case Op_MulVL:
 2395       return false;
 2396     default:
 2397       break;
 2398     }
 2399   }
 2400   return true; // Per default match rules are supported.
 2401 }
 2402 
 2403 const bool Matcher::has_predicated_vectors(void) {
 2404   return UseSVE > 0;
 2405 }
 2406 
 2407 const int Matcher::float_pressure(int default_pressure_threshold) {
 2408   return default_pressure_threshold;
 2409 }
 2410 
 2411 int Matcher::regnum_to_fpu_offset(int regnum)
 2412 {
 2413   Unimplemented();
 2414   return 0;
 2415 }
 2416 
 2417 // Is this branch offset short enough that a short branch can be used?
 2418 //
 2419 // NOTE: If the platform does not provide any short branch variants, then
 2420 //       this method should return false for offset 0.
 2421 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
 2422   // The passed offset is relative to address of the branch.
 2423 
 2424   return (-32768 <= offset && offset < 32768);
 2425 }
 2426 
 2427 const bool Matcher::isSimpleConstant64(jlong value) {
 2428   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
 2429   // Probably always true, even if a temp register is required.
 2430   return true;
 2431 }
 2432 
 2433 // true just means we have fast l2f conversion
 2434 const bool Matcher::convL2FSupported(void) {
 2435   return true;
 2436 }
 2437 
 2438 // Vector width in bytes.
 2439 const int Matcher::vector_width_in_bytes(BasicType bt) {
 2440   // The MaxVectorSize should have been set by detecting SVE max vector register size.
 2441   int size = MIN2((UseSVE > 0) ? 256 : 16, (int)MaxVectorSize);
 2442   // Minimum 2 values in vector
 2443   if (size < 2*type2aelembytes(bt)) size = 0;
 2444   // But never < 4
 2445   if (size < 4) size = 0;
 2446   return size;
 2447 }
 2448 
 2449 // Limits on vector size (number of elements) loaded into vector.
 2450 const int Matcher::max_vector_size(const BasicType bt) {
 2451   return vector_width_in_bytes(bt)/type2aelembytes(bt);
 2452 }
 2453 const int Matcher::min_vector_size(const BasicType bt) {
 2454   int max_size = max_vector_size(bt);
 2455   if ((UseSVE > 0) && (MaxVectorSize >= 16)) {
 2456     // Currently vector length less than SVE vector register size is not supported.
 2457     return max_size;
 2458   } else {
 2459     //  For the moment limit the vector size to 8 bytes with NEON.
 2460     int size = 8 / type2aelembytes(bt);
 2461     if (size < 2) size = 2;
 2462     return size;
 2463   }
 2464 }
 2465 
 2466 const bool Matcher::supports_scalable_vector() {
 2467   return UseSVE > 0;
 2468 }
 2469 
 2470 // Actual max scalable vector register length.
 2471 const int Matcher::scalable_vector_reg_size(const BasicType bt) {
 2472   return Matcher::max_vector_size(bt);
 2473 }
 2474 
 2475 // Vector ideal reg.
 2476 const uint Matcher::vector_ideal_reg(int len) {
 2477   if (UseSVE > 0 && 16 <= len && len <= 256) {
 2478     return Op_VecA;
 2479   }
 2480   switch(len) {
 2481     case  8: return Op_VecD;
 2482     case 16: return Op_VecX;
 2483   }
 2484   ShouldNotReachHere();
 2485   return 0;
 2486 }
 2487 
 2488 // AES support not yet implemented
 2489 const bool Matcher::pass_original_key_for_aes() {
 2490   return false;
 2491 }
 2492 
 2493 // aarch64 supports misaligned vectors store/load.
 2494 const bool Matcher::misaligned_vectors_ok() {
 2495   return true;
 2496 }
 2497 
 2498 // false => size gets scaled to BytesPerLong, ok.
 2499 const bool Matcher::init_array_count_is_in_bytes = false;
 2500 
 2501 // Use conditional move (CMOVL)
 2502 const int Matcher::long_cmove_cost() {
 2503   // long cmoves are no more expensive than int cmoves
 2504   return 0;
 2505 }
 2506 
 2507 const int Matcher::float_cmove_cost() {
 2508   // float cmoves are no more expensive than int cmoves
 2509   return 0;
 2510 }
 2511 
 2512 // Does the CPU require late expand (see block.cpp for description of late expand)?
 2513 const bool Matcher::require_postalloc_expand = false;
 2514 
 2515 // Do we need to mask the count passed to shift instructions or does
 2516 // the cpu only look at the lower 5/6 bits anyway?
 2517 const bool Matcher::need_masked_shift_count = false;
 2518 
 2519 // No support for generic vector operands.
 2520 const bool Matcher::supports_generic_vector_operands  = false;
 2521 
 2522 MachOper* Matcher::pd_specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg, bool is_temp) {
 2523   ShouldNotReachHere(); // generic vector operands not supported
 2524   return NULL;
 2525 }
 2526 
 2527 bool Matcher::is_generic_reg2reg_move(MachNode* m) {
 2528   ShouldNotReachHere();  // generic vector operands not supported
 2529   return false;
 2530 }
 2531 
 2532 bool Matcher::is_generic_vector(MachOper* opnd)  {
 2533   ShouldNotReachHere();  // generic vector operands not supported
 2534   return false;
 2535 }
 2536 
 2537 // This affects two different things:
 2538 //  - how Decode nodes are matched
 2539 //  - how ImplicitNullCheck opportunities are recognized
 2540 // If true, the matcher will try to remove all Decodes and match them
 2541 // (as operands) into nodes. NullChecks are not prepared to deal with
 2542 // Decodes by final_graph_reshaping().
 2543 // If false, final_graph_reshaping() forces the decode behind the Cmp
 2544 // for a NullCheck. The matcher matches the Decode node into a register.
 2545 // Implicit_null_check optimization moves the Decode along with the
 2546 // memory operation back up before the NullCheck.
 2547 bool Matcher::narrow_oop_use_complex_address() {
 2548   return CompressedOops::shift() == 0;
 2549 }
 2550 
 2551 bool Matcher::narrow_klass_use_complex_address() {
 2552 // TODO
 2553 // decide whether we need to set this to true
 2554   return false;
 2555 }
 2556 
 2557 bool Matcher::const_oop_prefer_decode() {
 2558   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
 2559   return CompressedOops::base() == NULL;
 2560 }
 2561 
 2562 bool Matcher::const_klass_prefer_decode() {
 2563   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
 2564   return CompressedKlassPointers::base() == NULL;
 2565 }
 2566 
 2567 // Is it better to copy float constants, or load them directly from
 2568 // memory?  Intel can load a float constant from a direct address,
 2569 // requiring no extra registers.  Most RISCs will have to materialize
 2570 // an address into a register first, so they would do better to copy
 2571 // the constant from stack.
 2572 const bool Matcher::rematerialize_float_constants = false;
 2573 
 2574 // If CPU can load and store mis-aligned doubles directly then no
 2575 // fixup is needed.  Else we split the double into 2 integer pieces
 2576 // and move it piece-by-piece.  Only happens when passing doubles into
 2577 // C code as the Java calling convention forces doubles to be aligned.
 2578 const bool Matcher::misaligned_doubles_ok = true;
 2579 
 2580 // No-op on amd64
 2581 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
 2582   Unimplemented();
 2583 }
 2584 
 2585 // Advertise here if the CPU requires explicit rounding operations to implement strictfp mode.
 2586 const bool Matcher::strict_fp_requires_explicit_rounding = false;
 2587 
 2588 // Are floats converted to double when stored to stack during
 2589 // deoptimization?
 2590 bool Matcher::float_in_double() { return false; }
 2591 
 2592 // Do ints take an entire long register or just half?
 2593 // The relevant question is how the int is callee-saved:
 2594 // the whole long is written but de-opt'ing will have to extract
 2595 // the relevant 32 bits.
 2596 const bool Matcher::int_in_long = true;
 2597 
 2598 // Return whether or not this register is ever used as an argument.
 2599 // This function is used on startup to build the trampoline stubs in
 2600 // generateOptoStub.  Registers not mentioned will be killed by the VM
 2601 // call in the trampoline, and arguments in those registers not be
 2602 // available to the callee.
 2603 bool Matcher::can_be_java_arg(int reg)
 2604 {
 2605   return
 2606     reg ==  R0_num || reg == R0_H_num ||
 2607     reg ==  R1_num || reg == R1_H_num ||
 2608     reg ==  R2_num || reg == R2_H_num ||
 2609     reg ==  R3_num || reg == R3_H_num ||
 2610     reg ==  R4_num || reg == R4_H_num ||
 2611     reg ==  R5_num || reg == R5_H_num ||
 2612     reg ==  R6_num || reg == R6_H_num ||
 2613     reg ==  R7_num || reg == R7_H_num ||
 2614     reg ==  V0_num || reg == V0_H_num ||
 2615     reg ==  V1_num || reg == V1_H_num ||
 2616     reg ==  V2_num || reg == V2_H_num ||
 2617     reg ==  V3_num || reg == V3_H_num ||
 2618     reg ==  V4_num || reg == V4_H_num ||
 2619     reg ==  V5_num || reg == V5_H_num ||
 2620     reg ==  V6_num || reg == V6_H_num ||
 2621     reg ==  V7_num || reg == V7_H_num;
 2622 }
 2623 
 2624 bool Matcher::is_spillable_arg(int reg)
 2625 {
 2626   return can_be_java_arg(reg);
 2627 }
 2628 
 2629 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
 2630   return false;
 2631 }
 2632 
 2633 RegMask Matcher::divI_proj_mask() {
 2634   ShouldNotReachHere();
 2635   return RegMask();
 2636 }
 2637 
 2638 // Register for MODI projection of divmodI.
 2639 RegMask Matcher::modI_proj_mask() {
 2640   ShouldNotReachHere();
 2641   return RegMask();
 2642 }
 2643 
 2644 // Register for DIVL projection of divmodL.
 2645 RegMask Matcher::divL_proj_mask() {
 2646   ShouldNotReachHere();
 2647   return RegMask();
 2648 }
 2649 
 2650 // Register for MODL projection of divmodL.
 2651 RegMask Matcher::modL_proj_mask() {
 2652   ShouldNotReachHere();
 2653   return RegMask();
 2654 }
 2655 
 2656 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
 2657   return FP_REG_mask();
 2658 }
 2659 
 2660 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
 2661   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
 2662     Node* u = addp->fast_out(i);
 2663     if (u->is_Mem()) {
 2664       int opsize = u->as_Mem()->memory_size();
 2665       assert(opsize > 0, "unexpected memory operand size");
 2666       if (u->as_Mem()->memory_size() != (1<<shift)) {
 2667         return false;
 2668       }
 2669     }
 2670   }
 2671   return true;
 2672 }
 2673 
 2674 const bool Matcher::convi2l_type_required = false;
 2675 
 2676 // Should the matcher clone input 'm' of node 'n'?
 2677 bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
 2678   if (is_vshift_con_pattern(n, m)) { // ShiftV src (ShiftCntV con)
 2679     mstack.push(m, Visit);           // m = ShiftCntV
 2680     return true;
 2681   }
 2682   return false;
 2683 }
 2684 
 2685 // Should the Matcher clone shifts on addressing modes, expecting them
 2686 // to be subsumed into complex addressing expressions or compute them
 2687 // into registers?
 2688 bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
 2689   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
 2690     return true;
 2691   }
 2692 
 2693   Node *off = m->in(AddPNode::Offset);
 2694   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
 2695       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
 2696       // Are there other uses besides address expressions?
 2697       !is_visited(off)) {
 2698     address_visited.set(off->_idx); // Flag as address_visited
 2699     mstack.push(off->in(2), Visit);
 2700     Node *conv = off->in(1);
 2701     if (conv->Opcode() == Op_ConvI2L &&
 2702         // Are there other uses besides address expressions?
 2703         !is_visited(conv)) {
 2704       address_visited.set(conv->_idx); // Flag as address_visited
 2705       mstack.push(conv->in(1), Pre_Visit);
 2706     } else {
 2707       mstack.push(conv, Pre_Visit);
 2708     }
 2709     address_visited.test_set(m->_idx); // Flag as address_visited
 2710     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2711     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2712     return true;
 2713   } else if (off->Opcode() == Op_ConvI2L &&
 2714              // Are there other uses besides address expressions?
 2715              !is_visited(off)) {
 2716     address_visited.test_set(m->_idx); // Flag as address_visited
 2717     address_visited.set(off->_idx); // Flag as address_visited
 2718     mstack.push(off->in(1), Pre_Visit);
 2719     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2720     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2721     return true;
 2722   }
 2723   return false;
 2724 }
 2725 
 2726 void Compile::reshape_address(AddPNode* addp) {
 2727 }
 2728 
 2729 
 2730 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
 2731   C2_MacroAssembler _masm(&cbuf);                                       \
 2732   {                                                                     \
 2733     guarantee(INDEX == -1, "mode not permitted for volatile");          \
 2734     guarantee(DISP == 0, "mode not permitted for volatile");            \
 2735     guarantee(SCALE == 0, "mode not permitted for volatile");           \
 2736     __ INSN(REG, as_Register(BASE));                                    \
 2737   }
 2738 
 2739 
 2740 static Address mem2address(int opcode, Register base, int index, int size, int disp)
 2741   {
 2742     Address::extend scale;
 2743 
 2744     // Hooboy, this is fugly.  We need a way to communicate to the
 2745     // encoder that the index needs to be sign extended, so we have to
 2746     // enumerate all the cases.
 2747     switch (opcode) {
 2748     case INDINDEXSCALEDI2L:
 2749     case INDINDEXSCALEDI2LN:
 2750     case INDINDEXI2L:
 2751     case INDINDEXI2LN:
 2752       scale = Address::sxtw(size);
 2753       break;
 2754     default:
 2755       scale = Address::lsl(size);
 2756     }
 2757 
 2758     if (index == -1) {
 2759       return Address(base, disp);
 2760     } else {
 2761       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2762       return Address(base, as_Register(index), scale);
 2763     }
 2764   }
 2765 
 2766 
 2767 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
 2768 typedef void (MacroAssembler::* mem_insn2)(Register Rt, Register adr);
 2769 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
 2770 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
 2771                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
 2772 
 2773   // Used for all non-volatile memory accesses.  The use of
 2774   // $mem->opcode() to discover whether this pattern uses sign-extended
 2775   // offsets is something of a kludge.
 2776   static void loadStore(C2_MacroAssembler masm, mem_insn insn,
 2777                         Register reg, int opcode,
 2778                         Register base, int index, int scale, int disp,
 2779                         int size_in_memory)
 2780   {
 2781     Address addr = mem2address(opcode, base, index, scale, disp);
 2782     if (addr.getMode() == Address::base_plus_offset) {
 2783       /* If we get an out-of-range offset it is a bug in the compiler,
 2784          so we assert here. */
 2785       assert(Address::offset_ok_for_immed(addr.offset(), exact_log2(size_in_memory)),
 2786              "c2 compiler bug");
 2787       /* Fix up any out-of-range offsets. */
 2788       assert_different_registers(rscratch1, base);
 2789       assert_different_registers(rscratch1, reg);
 2790       addr = masm.legitimize_address(addr, size_in_memory, rscratch1);
 2791     }
 2792     (masm.*insn)(reg, addr);
 2793   }
 2794 
 2795   static void loadStore(C2_MacroAssembler masm, mem_float_insn insn,
 2796                         FloatRegister reg, int opcode,
 2797                         Register base, int index, int size, int disp,
 2798                         int size_in_memory)
 2799   {
 2800     Address::extend scale;
 2801 
 2802     switch (opcode) {
 2803     case INDINDEXSCALEDI2L:
 2804     case INDINDEXSCALEDI2LN:
 2805       scale = Address::sxtw(size);
 2806       break;
 2807     default:
 2808       scale = Address::lsl(size);
 2809     }
 2810 
 2811     if (index == -1) {
 2812       /* If we get an out-of-range offset it is a bug in the compiler,
 2813          so we assert here. */
 2814       assert(Address::offset_ok_for_immed(disp, exact_log2(size_in_memory)), "c2 compiler bug");
 2815       /* Fix up any out-of-range offsets. */
 2816       assert_different_registers(rscratch1, base);
 2817       Address addr = Address(base, disp);
 2818       addr = masm.legitimize_address(addr, size_in_memory, rscratch1);
 2819       (masm.*insn)(reg, addr);
 2820     } else {
 2821       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2822       (masm.*insn)(reg, Address(base, as_Register(index), scale));
 2823     }
 2824   }
 2825 
 2826   static void loadStore(C2_MacroAssembler masm, mem_vector_insn insn,
 2827                         FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
 2828                         int opcode, Register base, int index, int size, int disp)
 2829   {
 2830     if (index == -1) {
 2831       (masm.*insn)(reg, T, Address(base, disp));
 2832     } else {
 2833       assert(disp == 0, "unsupported address mode");
 2834       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
 2835     }
 2836   }
 2837 
 2838 %}
 2839 
 2840 
 2841 
 2842 //----------ENCODING BLOCK-----------------------------------------------------
 2843 // This block specifies the encoding classes used by the compiler to
 2844 // output byte streams.  Encoding classes are parameterized macros
 2845 // used by Machine Instruction Nodes in order to generate the bit
 2846 // encoding of the instruction.  Operands specify their base encoding
 2847 // interface with the interface keyword.  There are currently
 2848 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
 2849 // COND_INTER.  REG_INTER causes an operand to generate a function
 2850 // which returns its register number when queried.  CONST_INTER causes
 2851 // an operand to generate a function which returns the value of the
 2852 // constant when queried.  MEMORY_INTER causes an operand to generate
 2853 // four functions which return the Base Register, the Index Register,
 2854 // the Scale Value, and the Offset Value of the operand when queried.
 2855 // COND_INTER causes an operand to generate six functions which return
 2856 // the encoding code (ie - encoding bits for the instruction)
 2857 // associated with each basic boolean condition for a conditional
 2858 // instruction.
 2859 //
 2860 // Instructions specify two basic values for encoding.  Again, a
 2861 // function is available to check if the constant displacement is an
 2862 // oop. They use the ins_encode keyword to specify their encoding
 2863 // classes (which must be a sequence of enc_class names, and their
 2864 // parameters, specified in the encoding block), and they use the
 2865 // opcode keyword to specify, in order, their primary, secondary, and
 2866 // tertiary opcode.  Only the opcode sections which a particular
 2867 // instruction needs for encoding need to be specified.
 2868 encode %{
 2869   // Build emit functions for each basic byte or larger field in the
 2870   // intel encoding scheme (opcode, rm, sib, immediate), and call them
 2871   // from C++ code in the enc_class source block.  Emit functions will
 2872   // live in the main source block for now.  In future, we can
 2873   // generalize this by adding a syntax that specifies the sizes of
 2874   // fields in an order, so that the adlc can build the emit functions
 2875   // automagically
 2876 
 2877   // catch all for unimplemented encodings
 2878   enc_class enc_unimplemented %{
 2879     C2_MacroAssembler _masm(&cbuf);
 2880     __ unimplemented("C2 catch all");
 2881   %}
 2882 
 2883   // BEGIN Non-volatile memory access
 2884 
 2885   // This encoding class is generated automatically from ad_encode.m4.
 2886   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2887   enc_class aarch64_enc_ldrsbw(iRegI dst, memory1 mem) %{
 2888     Register dst_reg = as_Register($dst$$reg);
 2889     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
 2890                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2891   %}
 2892 
 2893   // This encoding class is generated automatically from ad_encode.m4.
 2894   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2895   enc_class aarch64_enc_ldrsb(iRegI dst, memory1 mem) %{
 2896     Register dst_reg = as_Register($dst$$reg);
 2897     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
 2898                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2899   %}
 2900 
 2901   // This encoding class is generated automatically from ad_encode.m4.
 2902   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2903   enc_class aarch64_enc_ldrb(iRegI dst, memory1 mem) %{
 2904     Register dst_reg = as_Register($dst$$reg);
 2905     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2906                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2907   %}
 2908 
 2909   // This encoding class is generated automatically from ad_encode.m4.
 2910   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2911   enc_class aarch64_enc_ldrb(iRegL dst, memory1 mem) %{
 2912     Register dst_reg = as_Register($dst$$reg);
 2913     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2914                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2915   %}
 2916 
 2917   // This encoding class is generated automatically from ad_encode.m4.
 2918   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2919   enc_class aarch64_enc_ldrshw(iRegI dst, memory2 mem) %{
 2920     Register dst_reg = as_Register($dst$$reg);
 2921     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
 2922                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2923   %}
 2924 
 2925   // This encoding class is generated automatically from ad_encode.m4.
 2926   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2927   enc_class aarch64_enc_ldrsh(iRegI dst, memory2 mem) %{
 2928     Register dst_reg = as_Register($dst$$reg);
 2929     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
 2930                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2931   %}
 2932 
 2933   // This encoding class is generated automatically from ad_encode.m4.
 2934   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2935   enc_class aarch64_enc_ldrh(iRegI dst, memory2 mem) %{
 2936     Register dst_reg = as_Register($dst$$reg);
 2937     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2938                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2939   %}
 2940 
 2941   // This encoding class is generated automatically from ad_encode.m4.
 2942   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2943   enc_class aarch64_enc_ldrh(iRegL dst, memory2 mem) %{
 2944     Register dst_reg = as_Register($dst$$reg);
 2945     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2946                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2947   %}
 2948 
 2949   // This encoding class is generated automatically from ad_encode.m4.
 2950   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2951   enc_class aarch64_enc_ldrw(iRegI dst, memory4 mem) %{
 2952     Register dst_reg = as_Register($dst$$reg);
 2953     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2954                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2955   %}
 2956 
 2957   // This encoding class is generated automatically from ad_encode.m4.
 2958   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2959   enc_class aarch64_enc_ldrw(iRegL dst, memory4 mem) %{
 2960     Register dst_reg = as_Register($dst$$reg);
 2961     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2962                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2963   %}
 2964 
 2965   // This encoding class is generated automatically from ad_encode.m4.
 2966   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2967   enc_class aarch64_enc_ldrsw(iRegL dst, memory4 mem) %{
 2968     Register dst_reg = as_Register($dst$$reg);
 2969     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
 2970                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2971   %}
 2972 
 2973   // This encoding class is generated automatically from ad_encode.m4.
 2974   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2975   enc_class aarch64_enc_ldr(iRegL dst, memory8 mem) %{
 2976     Register dst_reg = as_Register($dst$$reg);
 2977     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
 2978                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 2979   %}
 2980 
 2981   // This encoding class is generated automatically from ad_encode.m4.
 2982   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2983   enc_class aarch64_enc_ldrs(vRegF dst, memory4 mem) %{
 2984     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 2985     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
 2986                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2987   %}
 2988 
 2989   // This encoding class is generated automatically from ad_encode.m4.
 2990   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2991   enc_class aarch64_enc_ldrd(vRegD dst, memory8 mem) %{
 2992     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 2993     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
 2994                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 2995   %}
 2996 
 2997   // This encoding class is generated automatically from ad_encode.m4.
 2998   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2999   enc_class aarch64_enc_strb(iRegI src, memory1 mem) %{
 3000     Register src_reg = as_Register($src$$reg);
 3001     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
 3002                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 3003   %}
 3004 
 3005   // This encoding class is generated automatically from ad_encode.m4.
 3006   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3007   enc_class aarch64_enc_strb0(memory1 mem) %{
 3008     C2_MacroAssembler _masm(&cbuf);
 3009     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
 3010                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 3011   %}
 3012 
 3013   // This encoding class is generated automatically from ad_encode.m4.
 3014   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3015   enc_class aarch64_enc_strh(iRegI src, memory2 mem) %{
 3016     Register src_reg = as_Register($src$$reg);
 3017     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
 3018                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 3019   %}
 3020 
 3021   // This encoding class is generated automatically from ad_encode.m4.
 3022   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3023   enc_class aarch64_enc_strh0(memory2 mem) %{
 3024     C2_MacroAssembler _masm(&cbuf);
 3025     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
 3026                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 3027   %}
 3028 
 3029   // This encoding class is generated automatically from ad_encode.m4.
 3030   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3031   enc_class aarch64_enc_strw(iRegI src, memory4 mem) %{
 3032     Register src_reg = as_Register($src$$reg);
 3033     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
 3034                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3035   %}
 3036 
 3037   // This encoding class is generated automatically from ad_encode.m4.
 3038   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3039   enc_class aarch64_enc_strw0(memory4 mem) %{
 3040     C2_MacroAssembler _masm(&cbuf);
 3041     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
 3042                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3043   %}
 3044 
 3045   // This encoding class is generated automatically from ad_encode.m4.
 3046   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3047   enc_class aarch64_enc_str(iRegL src, memory8 mem) %{
 3048     Register src_reg = as_Register($src$$reg);
 3049     // we sometimes get asked to store the stack pointer into the
 3050     // current thread -- we cannot do that directly on AArch64
 3051     if (src_reg == r31_sp) {
 3052       C2_MacroAssembler _masm(&cbuf);
 3053       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 3054       __ mov(rscratch2, sp);
 3055       src_reg = rscratch2;
 3056     }
 3057     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
 3058                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3059   %}
 3060 
 3061   // This encoding class is generated automatically from ad_encode.m4.
 3062   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3063   enc_class aarch64_enc_str0(memory8 mem) %{
 3064     C2_MacroAssembler _masm(&cbuf);
 3065     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
 3066                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3067   %}
 3068 
 3069   // This encoding class is generated automatically from ad_encode.m4.
 3070   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3071   enc_class aarch64_enc_strs(vRegF src, memory4 mem) %{
 3072     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3073     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
 3074                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3075   %}
 3076 
 3077   // This encoding class is generated automatically from ad_encode.m4.
 3078   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3079   enc_class aarch64_enc_strd(vRegD src, memory8 mem) %{
 3080     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3081     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
 3082                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3083   %}
 3084 
 3085   // This encoding class is generated automatically from ad_encode.m4.
 3086   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3087   enc_class aarch64_enc_strw_immn(immN src, memory1 mem) %{
 3088     C2_MacroAssembler _masm(&cbuf);
 3089     address con = (address)$src$$constant;
 3090     // need to do this the hard way until we can manage relocs
 3091     // for 32 bit constants
 3092     __ movoop(rscratch2, (jobject)con);
 3093     if (con) __ encode_heap_oop_not_null(rscratch2);
 3094     loadStore(_masm, &MacroAssembler::strw, rscratch2, $mem->opcode(),
 3095                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3096   %}
 3097 
 3098   // This encoding class is generated automatically from ad_encode.m4.
 3099   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3100   enc_class aarch64_enc_strw_immnk(immN src, memory4 mem) %{
 3101     C2_MacroAssembler _masm(&cbuf);
 3102     address con = (address)$src$$constant;
 3103     // need to do this the hard way until we can manage relocs
 3104     // for 32 bit constants
 3105     __ movoop(rscratch2, (jobject)con);
 3106     __ encode_klass_not_null(rscratch2);
 3107     loadStore(_masm, &MacroAssembler::strw, rscratch2, $mem->opcode(),
 3108                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3109   %}
 3110 
 3111   // This encoding class is generated automatically from ad_encode.m4.
 3112   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3113   enc_class aarch64_enc_strb0_ordered(memory4 mem) %{
 3114       C2_MacroAssembler _masm(&cbuf);
 3115       __ membar(Assembler::StoreStore);
 3116       loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
 3117                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 3118   %}
 3119 
 3120   // END Non-volatile memory access
 3121 
 3122   // Vector loads and stores
 3123   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
 3124     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3125     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
 3126        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3127   %}
 3128 
 3129   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
 3130     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3131     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
 3132        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3133   %}
 3134 
 3135   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
 3136     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3137     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
 3138        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3139   %}
 3140 
 3141   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
 3142     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3143     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
 3144        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3145   %}
 3146 
 3147   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
 3148     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3149     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
 3150        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3151   %}
 3152 
 3153   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
 3154     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3155     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
 3156        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3157   %}
 3158 
 3159   // volatile loads and stores
 3160 
 3161   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
 3162     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3163                  rscratch1, stlrb);
 3164   %}
 3165 
 3166   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
 3167     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3168                  rscratch1, stlrh);
 3169   %}
 3170 
 3171   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
 3172     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3173                  rscratch1, stlrw);
 3174   %}
 3175 
 3176 
 3177   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
 3178     Register dst_reg = as_Register($dst$$reg);
 3179     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3180              rscratch1, ldarb);
 3181     __ sxtbw(dst_reg, dst_reg);
 3182   %}
 3183 
 3184   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
 3185     Register dst_reg = as_Register($dst$$reg);
 3186     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3187              rscratch1, ldarb);
 3188     __ sxtb(dst_reg, dst_reg);
 3189   %}
 3190 
 3191   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
 3192     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3193              rscratch1, ldarb);
 3194   %}
 3195 
 3196   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
 3197     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3198              rscratch1, ldarb);
 3199   %}
 3200 
 3201   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
 3202     Register dst_reg = as_Register($dst$$reg);
 3203     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3204              rscratch1, ldarh);
 3205     __ sxthw(dst_reg, dst_reg);
 3206   %}
 3207 
 3208   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
 3209     Register dst_reg = as_Register($dst$$reg);
 3210     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3211              rscratch1, ldarh);
 3212     __ sxth(dst_reg, dst_reg);
 3213   %}
 3214 
 3215   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
 3216     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3217              rscratch1, ldarh);
 3218   %}
 3219 
 3220   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
 3221     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3222              rscratch1, ldarh);
 3223   %}
 3224 
 3225   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
 3226     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3227              rscratch1, ldarw);
 3228   %}
 3229 
 3230   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
 3231     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3232              rscratch1, ldarw);
 3233   %}
 3234 
 3235   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
 3236     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3237              rscratch1, ldar);
 3238   %}
 3239 
 3240   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
 3241     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3242              rscratch1, ldarw);
 3243     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
 3244   %}
 3245 
 3246   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
 3247     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3248              rscratch1, ldar);
 3249     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
 3250   %}
 3251 
 3252   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
 3253     Register src_reg = as_Register($src$$reg);
 3254     // we sometimes get asked to store the stack pointer into the
 3255     // current thread -- we cannot do that directly on AArch64
 3256     if (src_reg == r31_sp) {
 3257       C2_MacroAssembler _masm(&cbuf);
 3258       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 3259       __ mov(rscratch2, sp);
 3260       src_reg = rscratch2;
 3261     }
 3262     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3263                  rscratch1, stlr);
 3264   %}
 3265 
 3266   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
 3267     {
 3268       C2_MacroAssembler _masm(&cbuf);
 3269       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3270       __ fmovs(rscratch2, src_reg);
 3271     }
 3272     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3273                  rscratch1, stlrw);
 3274   %}
 3275 
 3276   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
 3277     {
 3278       C2_MacroAssembler _masm(&cbuf);
 3279       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3280       __ fmovd(rscratch2, src_reg);
 3281     }
 3282     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3283                  rscratch1, stlr);
 3284   %}
 3285 
 3286   // synchronized read/update encodings
 3287 
 3288   enc_class aarch64_enc_ldaxr(iRegL dst, memory8 mem) %{
 3289     C2_MacroAssembler _masm(&cbuf);
 3290     Register dst_reg = as_Register($dst$$reg);
 3291     Register base = as_Register($mem$$base);
 3292     int index = $mem$$index;
 3293     int scale = $mem$$scale;
 3294     int disp = $mem$$disp;
 3295     if (index == -1) {
 3296        if (disp != 0) {
 3297         __ lea(rscratch1, Address(base, disp));
 3298         __ ldaxr(dst_reg, rscratch1);
 3299       } else {
 3300         // TODO
 3301         // should we ever get anything other than this case?
 3302         __ ldaxr(dst_reg, base);
 3303       }
 3304     } else {
 3305       Register index_reg = as_Register(index);
 3306       if (disp == 0) {
 3307         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
 3308         __ ldaxr(dst_reg, rscratch1);
 3309       } else {
 3310         __ lea(rscratch1, Address(base, disp));
 3311         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
 3312         __ ldaxr(dst_reg, rscratch1);
 3313       }
 3314     }
 3315   %}
 3316 
 3317   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory8 mem) %{
 3318     C2_MacroAssembler _masm(&cbuf);
 3319     Register src_reg = as_Register($src$$reg);
 3320     Register base = as_Register($mem$$base);
 3321     int index = $mem$$index;
 3322     int scale = $mem$$scale;
 3323     int disp = $mem$$disp;
 3324     if (index == -1) {
 3325        if (disp != 0) {
 3326         __ lea(rscratch2, Address(base, disp));
 3327         __ stlxr(rscratch1, src_reg, rscratch2);
 3328       } else {
 3329         // TODO
 3330         // should we ever get anything other than this case?
 3331         __ stlxr(rscratch1, src_reg, base);
 3332       }
 3333     } else {
 3334       Register index_reg = as_Register(index);
 3335       if (disp == 0) {
 3336         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
 3337         __ stlxr(rscratch1, src_reg, rscratch2);
 3338       } else {
 3339         __ lea(rscratch2, Address(base, disp));
 3340         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
 3341         __ stlxr(rscratch1, src_reg, rscratch2);
 3342       }
 3343     }
 3344     __ cmpw(rscratch1, zr);
 3345   %}
 3346 
 3347   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 3348     C2_MacroAssembler _masm(&cbuf);
 3349     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3350     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3351                Assembler::xword, /*acquire*/ false, /*release*/ true,
 3352                /*weak*/ false, noreg);
 3353   %}
 3354 
 3355   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3356     C2_MacroAssembler _masm(&cbuf);
 3357     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3358     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3359                Assembler::word, /*acquire*/ false, /*release*/ true,
 3360                /*weak*/ false, noreg);
 3361   %}
 3362 
 3363   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3364     C2_MacroAssembler _masm(&cbuf);
 3365     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3366     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3367                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 3368                /*weak*/ false, noreg);
 3369   %}
 3370 
 3371   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3372     C2_MacroAssembler _masm(&cbuf);
 3373     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3374     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3375                Assembler::byte, /*acquire*/ false, /*release*/ true,
 3376                /*weak*/ false, noreg);
 3377   %}
 3378 
 3379 
 3380   // The only difference between aarch64_enc_cmpxchg and
 3381   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
 3382   // CompareAndSwap sequence to serve as a barrier on acquiring a
 3383   // lock.
 3384   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 3385     C2_MacroAssembler _masm(&cbuf);
 3386     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3387     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3388                Assembler::xword, /*acquire*/ true, /*release*/ true,
 3389                /*weak*/ false, noreg);
 3390   %}
 3391 
 3392   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3393     C2_MacroAssembler _masm(&cbuf);
 3394     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3395     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3396                Assembler::word, /*acquire*/ true, /*release*/ true,
 3397                /*weak*/ false, noreg);
 3398   %}
 3399 
 3400   enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3401     C2_MacroAssembler _masm(&cbuf);
 3402     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3403     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3404                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 3405                /*weak*/ false, noreg);
 3406   %}
 3407 
 3408   enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3409     C2_MacroAssembler _masm(&cbuf);
 3410     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3411     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3412                Assembler::byte, /*acquire*/ true, /*release*/ true,
 3413                /*weak*/ false, noreg);
 3414   %}
 3415 
 3416   // auxiliary used for CompareAndSwapX to set result register
 3417   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
 3418     C2_MacroAssembler _masm(&cbuf);
 3419     Register res_reg = as_Register($res$$reg);
 3420     __ cset(res_reg, Assembler::EQ);
 3421   %}
 3422 
 3423   // prefetch encodings
 3424 
 3425   enc_class aarch64_enc_prefetchw(memory mem) %{
 3426     C2_MacroAssembler _masm(&cbuf);
 3427     Register base = as_Register($mem$$base);
 3428     int index = $mem$$index;
 3429     int scale = $mem$$scale;
 3430     int disp = $mem$$disp;
 3431     if (index == -1) {
 3432       __ prfm(Address(base, disp), PSTL1KEEP);
 3433     } else {
 3434       Register index_reg = as_Register(index);
 3435       if (disp == 0) {
 3436         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3437       } else {
 3438         __ lea(rscratch1, Address(base, disp));
 3439 	__ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3440       }
 3441     }
 3442   %}
 3443 
 3444   /// mov envcodings
 3445 
 3446   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
 3447     C2_MacroAssembler _masm(&cbuf);
 3448     uint32_t con = (uint32_t)$src$$constant;
 3449     Register dst_reg = as_Register($dst$$reg);
 3450     if (con == 0) {
 3451       __ movw(dst_reg, zr);
 3452     } else {
 3453       __ movw(dst_reg, con);
 3454     }
 3455   %}
 3456 
 3457   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
 3458     C2_MacroAssembler _masm(&cbuf);
 3459     Register dst_reg = as_Register($dst$$reg);
 3460     uint64_t con = (uint64_t)$src$$constant;
 3461     if (con == 0) {
 3462       __ mov(dst_reg, zr);
 3463     } else {
 3464       __ mov(dst_reg, con);
 3465     }
 3466   %}
 3467 
 3468   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
 3469     C2_MacroAssembler _masm(&cbuf);
 3470     Register dst_reg = as_Register($dst$$reg);
 3471     address con = (address)$src$$constant;
 3472     if (con == NULL || con == (address)1) {
 3473       ShouldNotReachHere();
 3474     } else {
 3475       relocInfo::relocType rtype = $src->constant_reloc();
 3476       if (rtype == relocInfo::oop_type) {
 3477         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
 3478       } else if (rtype == relocInfo::metadata_type) {
 3479         __ mov_metadata(dst_reg, (Metadata*)con);
 3480       } else {
 3481         assert(rtype == relocInfo::none, "unexpected reloc type");
 3482         if (con < (address)(uintptr_t)os::vm_page_size()) {
 3483           __ mov(dst_reg, con);
 3484         } else {
 3485           uintptr_t offset;
 3486           __ adrp(dst_reg, con, offset);
 3487           __ add(dst_reg, dst_reg, offset);
 3488         }
 3489       }
 3490     }
 3491   %}
 3492 
 3493   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
 3494     C2_MacroAssembler _masm(&cbuf);
 3495     Register dst_reg = as_Register($dst$$reg);
 3496     __ mov(dst_reg, zr);
 3497   %}
 3498 
 3499   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
 3500     C2_MacroAssembler _masm(&cbuf);
 3501     Register dst_reg = as_Register($dst$$reg);
 3502     __ mov(dst_reg, (uint64_t)1);
 3503   %}
 3504 
 3505   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
 3506     C2_MacroAssembler _masm(&cbuf);
 3507     __ load_byte_map_base($dst$$Register);
 3508   %}
 3509 
 3510   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
 3511     C2_MacroAssembler _masm(&cbuf);
 3512     Register dst_reg = as_Register($dst$$reg);
 3513     address con = (address)$src$$constant;
 3514     if (con == NULL) {
 3515       ShouldNotReachHere();
 3516     } else {
 3517       relocInfo::relocType rtype = $src->constant_reloc();
 3518       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
 3519       __ set_narrow_oop(dst_reg, (jobject)con);
 3520     }
 3521   %}
 3522 
 3523   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
 3524     C2_MacroAssembler _masm(&cbuf);
 3525     Register dst_reg = as_Register($dst$$reg);
 3526     __ mov(dst_reg, zr);
 3527   %}
 3528 
 3529   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
 3530     C2_MacroAssembler _masm(&cbuf);
 3531     Register dst_reg = as_Register($dst$$reg);
 3532     address con = (address)$src$$constant;
 3533     if (con == NULL) {
 3534       ShouldNotReachHere();
 3535     } else {
 3536       relocInfo::relocType rtype = $src->constant_reloc();
 3537       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
 3538       __ set_narrow_klass(dst_reg, (Klass *)con);
 3539     }
 3540   %}
 3541 
 3542   // arithmetic encodings
 3543 
 3544   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
 3545     C2_MacroAssembler _masm(&cbuf);
 3546     Register dst_reg = as_Register($dst$$reg);
 3547     Register src_reg = as_Register($src1$$reg);
 3548     int32_t con = (int32_t)$src2$$constant;
 3549     // add has primary == 0, subtract has primary == 1
 3550     if ($primary) { con = -con; }
 3551     if (con < 0) {
 3552       __ subw(dst_reg, src_reg, -con);
 3553     } else {
 3554       __ addw(dst_reg, src_reg, con);
 3555     }
 3556   %}
 3557 
 3558   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
 3559     C2_MacroAssembler _masm(&cbuf);
 3560     Register dst_reg = as_Register($dst$$reg);
 3561     Register src_reg = as_Register($src1$$reg);
 3562     int32_t con = (int32_t)$src2$$constant;
 3563     // add has primary == 0, subtract has primary == 1
 3564     if ($primary) { con = -con; }
 3565     if (con < 0) {
 3566       __ sub(dst_reg, src_reg, -con);
 3567     } else {
 3568       __ add(dst_reg, src_reg, con);
 3569     }
 3570   %}
 3571 
 3572   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
 3573     C2_MacroAssembler _masm(&cbuf);
 3574    Register dst_reg = as_Register($dst$$reg);
 3575    Register src1_reg = as_Register($src1$$reg);
 3576    Register src2_reg = as_Register($src2$$reg);
 3577     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3578   %}
 3579 
 3580   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
 3581     C2_MacroAssembler _masm(&cbuf);
 3582    Register dst_reg = as_Register($dst$$reg);
 3583    Register src1_reg = as_Register($src1$$reg);
 3584    Register src2_reg = as_Register($src2$$reg);
 3585     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3586   %}
 3587 
 3588   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
 3589     C2_MacroAssembler _masm(&cbuf);
 3590    Register dst_reg = as_Register($dst$$reg);
 3591    Register src1_reg = as_Register($src1$$reg);
 3592    Register src2_reg = as_Register($src2$$reg);
 3593     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3594   %}
 3595 
 3596   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
 3597     C2_MacroAssembler _masm(&cbuf);
 3598    Register dst_reg = as_Register($dst$$reg);
 3599    Register src1_reg = as_Register($src1$$reg);
 3600    Register src2_reg = as_Register($src2$$reg);
 3601     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3602   %}
 3603 
 3604   // compare instruction encodings
 3605 
 3606   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
 3607     C2_MacroAssembler _masm(&cbuf);
 3608     Register reg1 = as_Register($src1$$reg);
 3609     Register reg2 = as_Register($src2$$reg);
 3610     __ cmpw(reg1, reg2);
 3611   %}
 3612 
 3613   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
 3614     C2_MacroAssembler _masm(&cbuf);
 3615     Register reg = as_Register($src1$$reg);
 3616     int32_t val = $src2$$constant;
 3617     if (val >= 0) {
 3618       __ subsw(zr, reg, val);
 3619     } else {
 3620       __ addsw(zr, reg, -val);
 3621     }
 3622   %}
 3623 
 3624   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
 3625     C2_MacroAssembler _masm(&cbuf);
 3626     Register reg1 = as_Register($src1$$reg);
 3627     uint32_t val = (uint32_t)$src2$$constant;
 3628     __ movw(rscratch1, val);
 3629     __ cmpw(reg1, rscratch1);
 3630   %}
 3631 
 3632   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
 3633     C2_MacroAssembler _masm(&cbuf);
 3634     Register reg1 = as_Register($src1$$reg);
 3635     Register reg2 = as_Register($src2$$reg);
 3636     __ cmp(reg1, reg2);
 3637   %}
 3638 
 3639   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
 3640     C2_MacroAssembler _masm(&cbuf);
 3641     Register reg = as_Register($src1$$reg);
 3642     int64_t val = $src2$$constant;
 3643     if (val >= 0) {
 3644       __ subs(zr, reg, val);
 3645     } else if (val != -val) {
 3646       __ adds(zr, reg, -val);
 3647     } else {
 3648     // aargh, Long.MIN_VALUE is a special case
 3649       __ orr(rscratch1, zr, (uint64_t)val);
 3650       __ subs(zr, reg, rscratch1);
 3651     }
 3652   %}
 3653 
 3654   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
 3655     C2_MacroAssembler _masm(&cbuf);
 3656     Register reg1 = as_Register($src1$$reg);
 3657     uint64_t val = (uint64_t)$src2$$constant;
 3658     __ mov(rscratch1, val);
 3659     __ cmp(reg1, rscratch1);
 3660   %}
 3661 
 3662   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
 3663     C2_MacroAssembler _masm(&cbuf);
 3664     Register reg1 = as_Register($src1$$reg);
 3665     Register reg2 = as_Register($src2$$reg);
 3666     __ cmp(reg1, reg2);
 3667   %}
 3668 
 3669   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
 3670     C2_MacroAssembler _masm(&cbuf);
 3671     Register reg1 = as_Register($src1$$reg);
 3672     Register reg2 = as_Register($src2$$reg);
 3673     __ cmpw(reg1, reg2);
 3674   %}
 3675 
 3676   enc_class aarch64_enc_testp(iRegP src) %{
 3677     C2_MacroAssembler _masm(&cbuf);
 3678     Register reg = as_Register($src$$reg);
 3679     __ cmp(reg, zr);
 3680   %}
 3681 
 3682   enc_class aarch64_enc_testn(iRegN src) %{
 3683     C2_MacroAssembler _masm(&cbuf);
 3684     Register reg = as_Register($src$$reg);
 3685     __ cmpw(reg, zr);
 3686   %}
 3687 
 3688   enc_class aarch64_enc_b(label lbl) %{
 3689     C2_MacroAssembler _masm(&cbuf);
 3690     Label *L = $lbl$$label;
 3691     __ b(*L);
 3692   %}
 3693 
 3694   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
 3695     C2_MacroAssembler _masm(&cbuf);
 3696     Label *L = $lbl$$label;
 3697     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3698   %}
 3699 
 3700   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
 3701     C2_MacroAssembler _masm(&cbuf);
 3702     Label *L = $lbl$$label;
 3703     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3704   %}
 3705 
 3706   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
 3707   %{
 3708      Register sub_reg = as_Register($sub$$reg);
 3709      Register super_reg = as_Register($super$$reg);
 3710      Register temp_reg = as_Register($temp$$reg);
 3711      Register result_reg = as_Register($result$$reg);
 3712 
 3713      Label miss;
 3714      C2_MacroAssembler _masm(&cbuf);
 3715      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
 3716                                      NULL, &miss,
 3717                                      /*set_cond_codes:*/ true);
 3718      if ($primary) {
 3719        __ mov(result_reg, zr);
 3720      }
 3721      __ bind(miss);
 3722   %}
 3723 
 3724   enc_class aarch64_enc_java_static_call(method meth) %{
 3725     C2_MacroAssembler _masm(&cbuf);
 3726 
 3727     address addr = (address)$meth$$method;
 3728     address call;
 3729     if (!_method) {
 3730       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
 3731       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
 3732     } else {
 3733       int method_index = resolved_method_index(cbuf);
 3734       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
 3735                                                   : static_call_Relocation::spec(method_index);
 3736       call = __ trampoline_call(Address(addr, rspec), &cbuf);
 3737 
 3738       // Emit stub for static call
 3739       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
 3740       if (stub == NULL) {
 3741         ciEnv::current()->record_failure("CodeCache is full");
 3742         return;
 3743       }
 3744     }
 3745     if (call == NULL) {
 3746       ciEnv::current()->record_failure("CodeCache is full");
 3747       return;
 3748     } else if (UseSVE > 0 && Compile::current()->max_vector_size() >= 16) {
 3749       // Only non uncommon_trap calls need to reinitialize ptrue.
 3750       if (uncommon_trap_request() == 0) {
 3751         __ reinitialize_ptrue();
 3752       }
 3753     }
 3754   %}
 3755 
 3756   enc_class aarch64_enc_java_dynamic_call(method meth) %{
 3757     C2_MacroAssembler _masm(&cbuf);
 3758     int method_index = resolved_method_index(cbuf);
 3759     address call = __ ic_call((address)$meth$$method, method_index);
 3760     if (call == NULL) {
 3761       ciEnv::current()->record_failure("CodeCache is full");
 3762       return;
 3763     } else if (UseSVE > 0 && Compile::current()->max_vector_size() >= 16) {
 3764       __ reinitialize_ptrue();
 3765     }
 3766   %}
 3767 
 3768   enc_class aarch64_enc_call_epilog() %{
 3769     C2_MacroAssembler _masm(&cbuf);
 3770     if (VerifyStackAtCalls) {
 3771       // Check that stack depth is unchanged: find majik cookie on stack
 3772       __ call_Unimplemented();
 3773     }
 3774   %}
 3775 
 3776   enc_class aarch64_enc_java_to_runtime(method meth) %{
 3777     C2_MacroAssembler _masm(&cbuf);
 3778 
 3779     // some calls to generated routines (arraycopy code) are scheduled
 3780     // by C2 as runtime calls. if so we can call them using a br (they
 3781     // will be in a reachable segment) otherwise we have to use a blr
 3782     // which loads the absolute address into a register.
 3783     address entry = (address)$meth$$method;
 3784     CodeBlob *cb = CodeCache::find_blob(entry);
 3785     if (cb) {
 3786       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
 3787       if (call == NULL) {
 3788         ciEnv::current()->record_failure("CodeCache is full");
 3789         return;
 3790       }
 3791     } else {
 3792       Label retaddr;
 3793       __ adr(rscratch2, retaddr);
 3794       __ lea(rscratch1, RuntimeAddress(entry));
 3795       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
 3796       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
 3797       __ blr(rscratch1);
 3798       __ bind(retaddr);
 3799       __ add(sp, sp, 2 * wordSize);
 3800     }
 3801     if (UseSVE > 0 && Compile::current()->max_vector_size() >= 16) {
 3802       __ reinitialize_ptrue();
 3803     }
 3804   %}
 3805 
 3806   enc_class aarch64_enc_rethrow() %{
 3807     C2_MacroAssembler _masm(&cbuf);
 3808     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
 3809   %}
 3810 
 3811   enc_class aarch64_enc_ret() %{
 3812     C2_MacroAssembler _masm(&cbuf);
 3813 #ifdef ASSERT
 3814     if (UseSVE > 0 && Compile::current()->max_vector_size() >= 16) {
 3815       __ verify_ptrue();
 3816     }
 3817 #endif
 3818     __ ret(lr);
 3819   %}
 3820 
 3821   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
 3822     C2_MacroAssembler _masm(&cbuf);
 3823     Register target_reg = as_Register($jump_target$$reg);
 3824     __ br(target_reg);
 3825   %}
 3826 
 3827   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
 3828     C2_MacroAssembler _masm(&cbuf);
 3829     Register target_reg = as_Register($jump_target$$reg);
 3830     // exception oop should be in r0
 3831     // ret addr has been popped into lr
 3832     // callee expects it in r3
 3833     __ mov(r3, lr);
 3834     __ br(target_reg);
 3835   %}
 3836 
 3837   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
 3838     C2_MacroAssembler _masm(&cbuf);
 3839     Register oop = as_Register($object$$reg);
 3840     Register box = as_Register($box$$reg);
 3841     Register disp_hdr = as_Register($tmp$$reg);
 3842     Register tmp = as_Register($tmp2$$reg);
 3843     Label cont;
 3844     Label object_has_monitor;
 3845     Label cas_failed;
 3846 
 3847     assert_different_registers(oop, box, tmp, disp_hdr);
 3848 
 3849     // Load markWord from object into displaced_header.
 3850     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
 3851 
 3852     if (UseBiasedLocking && !UseOptoBiasInlining) {
 3853       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
 3854     }
 3855 
 3856     // Check for existing monitor
 3857     __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
 3858 
 3859     // Set tmp to be (markWord of object | UNLOCK_VALUE).
 3860     __ orr(tmp, disp_hdr, markWord::unlocked_value);
 3861 
 3862     // Initialize the box. (Must happen before we update the object mark!)
 3863     __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 3864 
 3865     // Compare object markWord with an unlocked value (tmp) and if
 3866     // equal exchange the stack address of our box with object markWord.
 3867     // On failure disp_hdr contains the possibly locked markWord.
 3868     __ cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
 3869                /*release*/ true, /*weak*/ false, disp_hdr);
 3870     __ br(Assembler::EQ, cont);
 3871 
 3872     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
 3873 
 3874     // If the compare-and-exchange succeeded, then we found an unlocked
 3875     // object, will have now locked it will continue at label cont
 3876 
 3877     __ bind(cas_failed);
 3878     // We did not see an unlocked object so try the fast recursive case.
 3879 
 3880     // Check if the owner is self by comparing the value in the
 3881     // markWord of object (disp_hdr) with the stack pointer.
 3882     __ mov(rscratch1, sp);
 3883     __ sub(disp_hdr, disp_hdr, rscratch1);
 3884     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
 3885     // If condition is true we are cont and hence we can store 0 as the
 3886     // displaced header in the box, which indicates that it is a recursive lock.
 3887     __ ands(tmp/*==0?*/, disp_hdr, tmp);   // Sets flags for result
 3888     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 3889 
 3890     __ b(cont);
 3891 
 3892     // Handle existing monitor.
 3893     __ bind(object_has_monitor);
 3894 
 3895     // The object's monitor m is unlocked iff m->owner == NULL,
 3896     // otherwise m->owner may contain a thread or a stack address.
 3897     //
 3898     // Try to CAS m->owner from NULL to current thread.
 3899     __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markWord::monitor_value));
 3900     __ cmpxchg(tmp, zr, rthread, Assembler::xword, /*acquire*/ true,
 3901                /*release*/ true, /*weak*/ false, noreg); // Sets flags for result
 3902 
 3903     // Store a non-null value into the box to avoid looking like a re-entrant
 3904     // lock. The fast-path monitor unlock code checks for
 3905     // markWord::monitor_value so use markWord::unused_mark which has the
 3906     // relevant bit set, and also matches ObjectSynchronizer::enter.
 3907     __ mov(tmp, (address)markWord::unused_mark().value());
 3908     __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 3909 
 3910     __ bind(cont);
 3911     // flag == EQ indicates success
 3912     // flag == NE indicates failure
 3913   %}
 3914 
 3915   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
 3916     C2_MacroAssembler _masm(&cbuf);
 3917     Register oop = as_Register($object$$reg);
 3918     Register box = as_Register($box$$reg);
 3919     Register disp_hdr = as_Register($tmp$$reg);
 3920     Register tmp = as_Register($tmp2$$reg);
 3921     Label cont;
 3922     Label object_has_monitor;
 3923 
 3924     assert_different_registers(oop, box, tmp, disp_hdr);
 3925 
 3926     if (UseBiasedLocking && !UseOptoBiasInlining) {
 3927       __ biased_locking_exit(oop, tmp, cont);
 3928     }
 3929 
 3930     // Find the lock address and load the displaced header from the stack.
 3931     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
 3932 
 3933     // If the displaced header is 0, we have a recursive unlock.
 3934     __ cmp(disp_hdr, zr);
 3935     __ br(Assembler::EQ, cont);
 3936 
 3937     // Handle existing monitor.
 3938     __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
 3939     __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
 3940 
 3941     // Check if it is still a light weight lock, this is is true if we
 3942     // see the stack address of the basicLock in the markWord of the
 3943     // object.
 3944 
 3945     __ cmpxchg(oop, box, disp_hdr, Assembler::xword, /*acquire*/ false,
 3946                /*release*/ true, /*weak*/ false, tmp);
 3947     __ b(cont);
 3948 
 3949     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
 3950 
 3951     // Handle existing monitor.
 3952     __ bind(object_has_monitor);
 3953     STATIC_ASSERT(markWord::monitor_value <= INT_MAX);
 3954     __ add(tmp, tmp, -(int)markWord::monitor_value); // monitor
 3955     __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
 3956     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
 3957     __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
 3958     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
 3959     __ cmp(rscratch1, zr); // Sets flags for result
 3960     __ br(Assembler::NE, cont);
 3961 
 3962     __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
 3963     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
 3964     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
 3965     __ cmp(rscratch1, zr); // Sets flags for result
 3966     __ cbnz(rscratch1, cont);
 3967     // need a release store here
 3968     __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
 3969     __ stlr(zr, tmp); // set unowned
 3970 
 3971     __ bind(cont);
 3972     // flag == EQ indicates success
 3973     // flag == NE indicates failure
 3974   %}
 3975 
 3976 %}
 3977 
 3978 //----------FRAME--------------------------------------------------------------
 3979 // Definition of frame structure and management information.
 3980 //
 3981 //  S T A C K   L A Y O U T    Allocators stack-slot number
 3982 //                             |   (to get allocators register number
 3983 //  G  Owned by    |        |  v    add OptoReg::stack0())
 3984 //  r   CALLER     |        |
 3985 //  o     |        +--------+      pad to even-align allocators stack-slot
 3986 //  w     V        |  pad0  |        numbers; owned by CALLER
 3987 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
 3988 //  h     ^        |   in   |  5
 3989 //        |        |  args  |  4   Holes in incoming args owned by SELF
 3990 //  |     |        |        |  3
 3991 //  |     |        +--------+
 3992 //  V     |        | old out|      Empty on Intel, window on Sparc
 3993 //        |    old |preserve|      Must be even aligned.
 3994 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
 3995 //        |        |   in   |  3   area for Intel ret address
 3996 //     Owned by    |preserve|      Empty on Sparc.
 3997 //       SELF      +--------+
 3998 //        |        |  pad2  |  2   pad to align old SP
 3999 //        |        +--------+  1
 4000 //        |        | locks  |  0
 4001 //        |        +--------+----> OptoReg::stack0(), even aligned
 4002 //        |        |  pad1  | 11   pad to align new SP
 4003 //        |        +--------+
 4004 //        |        |        | 10
 4005 //        |        | spills |  9   spills
 4006 //        V        |        |  8   (pad0 slot for callee)
 4007 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
 4008 //        ^        |  out   |  7
 4009 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
 4010 //     Owned by    +--------+
 4011 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
 4012 //        |    new |preserve|      Must be even-aligned.
 4013 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
 4014 //        |        |        |
 4015 //
 4016 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
 4017 //         known from SELF's arguments and the Java calling convention.
 4018 //         Region 6-7 is determined per call site.
 4019 // Note 2: If the calling convention leaves holes in the incoming argument
 4020 //         area, those holes are owned by SELF.  Holes in the outgoing area
 4021 //         are owned by the CALLEE.  Holes should not be nessecary in the
 4022 //         incoming area, as the Java calling convention is completely under
 4023 //         the control of the AD file.  Doubles can be sorted and packed to
 4024 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
 4025 //         varargs C calling conventions.
 4026 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
 4027 //         even aligned with pad0 as needed.
 4028 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
 4029 //           (the latter is true on Intel but is it false on AArch64?)
 4030 //         region 6-11 is even aligned; it may be padded out more so that
 4031 //         the region from SP to FP meets the minimum stack alignment.
 4032 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
 4033 //         alignment.  Region 11, pad1, may be dynamically extended so that
 4034 //         SP meets the minimum alignment.
 4035 
 4036 frame %{
 4037   // What direction does stack grow in (assumed to be same for C & Java)
 4038   stack_direction(TOWARDS_LOW);
 4039 
 4040   // These three registers define part of the calling convention
 4041   // between compiled code and the interpreter.
 4042 
 4043   // Inline Cache Register or Method for I2C.
 4044   inline_cache_reg(R12);
 4045 
 4046   // Method Oop Register when calling interpreter.
 4047   interpreter_method_oop_reg(R12);
 4048 
 4049   // Number of stack slots consumed by locking an object
 4050   sync_stack_slots(2);
 4051 
 4052   // Compiled code's Frame Pointer
 4053   frame_pointer(R31);
 4054 
 4055   // Interpreter stores its frame pointer in a register which is
 4056   // stored to the stack by I2CAdaptors.
 4057   // I2CAdaptors convert from interpreted java to compiled java.
 4058   interpreter_frame_pointer(R29);
 4059 
 4060   // Stack alignment requirement
 4061   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
 4062 
 4063   // Number of stack slots between incoming argument block and the start of
 4064   // a new frame.  The PROLOG must add this many slots to the stack.  The
 4065   // EPILOG must remove this many slots. aarch64 needs two slots for
 4066   // return address and fp.
 4067   // TODO think this is correct but check
 4068   in_preserve_stack_slots(4);
 4069 
 4070   // Number of outgoing stack slots killed above the out_preserve_stack_slots
 4071   // for calls to C.  Supports the var-args backing area for register parms.
 4072   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
 4073 
 4074   // The after-PROLOG location of the return address.  Location of
 4075   // return address specifies a type (REG or STACK) and a number
 4076   // representing the register number (i.e. - use a register name) or
 4077   // stack slot.
 4078   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
 4079   // Otherwise, it is above the locks and verification slot and alignment word
 4080   // TODO this may well be correct but need to check why that - 2 is there
 4081   // ppc port uses 0 but we definitely need to allow for fixed_slots
 4082   // which folds in the space used for monitors
 4083   return_addr(STACK - 2 +
 4084               align_up((Compile::current()->in_preserve_stack_slots() +
 4085                         Compile::current()->fixed_slots()),
 4086                        stack_alignment_in_slots()));
 4087 
 4088   // Body of function which returns an integer array locating
 4089   // arguments either in registers or in stack slots.  Passed an array
 4090   // of ideal registers called "sig" and a "length" count.  Stack-slot
 4091   // offsets are based on outgoing arguments, i.e. a CALLER setting up
 4092   // arguments for a CALLEE.  Incoming stack arguments are
 4093   // automatically biased by the preserve_stack_slots field above.
 4094 
 4095   calling_convention
 4096   %{
 4097     // No difference between ingoing/outgoing just pass false
 4098     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
 4099   %}
 4100 
 4101   c_calling_convention
 4102   %{
 4103     // This is obviously always outgoing
 4104     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
 4105   %}
 4106 
 4107   // Location of compiled Java return values.  Same as C for now.
 4108   return_value
 4109   %{
 4110     // TODO do we allow ideal_reg == Op_RegN???
 4111     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
 4112            "only return normal values");
 4113 
 4114     static const int lo[Op_RegL + 1] = { // enum name
 4115       0,                                 // Op_Node
 4116       0,                                 // Op_Set
 4117       R0_num,                            // Op_RegN
 4118       R0_num,                            // Op_RegI
 4119       R0_num,                            // Op_RegP
 4120       V0_num,                            // Op_RegF
 4121       V0_num,                            // Op_RegD
 4122       R0_num                             // Op_RegL
 4123     };
 4124 
 4125     static const int hi[Op_RegL + 1] = { // enum name
 4126       0,                                 // Op_Node
 4127       0,                                 // Op_Set
 4128       OptoReg::Bad,                      // Op_RegN
 4129       OptoReg::Bad,                      // Op_RegI
 4130       R0_H_num,                          // Op_RegP
 4131       OptoReg::Bad,                      // Op_RegF
 4132       V0_H_num,                          // Op_RegD
 4133       R0_H_num                           // Op_RegL
 4134     };
 4135 
 4136     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
 4137   %}
 4138 %}
 4139 
 4140 //----------ATTRIBUTES---------------------------------------------------------
 4141 //----------Operand Attributes-------------------------------------------------
 4142 op_attrib op_cost(1);        // Required cost attribute
 4143 
 4144 //----------Instruction Attributes---------------------------------------------
 4145 ins_attrib ins_cost(INSN_COST); // Required cost attribute
 4146 ins_attrib ins_size(32);        // Required size attribute (in bits)
 4147 ins_attrib ins_short_branch(0); // Required flag: is this instruction
 4148                                 // a non-matching short branch variant
 4149                                 // of some long branch?
 4150 ins_attrib ins_alignment(4);    // Required alignment attribute (must
 4151                                 // be a power of 2) specifies the
 4152                                 // alignment that some part of the
 4153                                 // instruction (not necessarily the
 4154                                 // start) requires.  If > 1, a
 4155                                 // compute_padding() function must be
 4156                                 // provided for the instruction
 4157 
 4158 //----------OPERANDS-----------------------------------------------------------
 4159 // Operand definitions must precede instruction definitions for correct parsing
 4160 // in the ADLC because operands constitute user defined types which are used in
 4161 // instruction definitions.
 4162 
 4163 //----------Simple Operands----------------------------------------------------
 4164 
 4165 // Integer operands 32 bit
 4166 // 32 bit immediate
 4167 operand immI()
 4168 %{
 4169   match(ConI);
 4170 
 4171   op_cost(0);
 4172   format %{ %}
 4173   interface(CONST_INTER);
 4174 %}
 4175 
 4176 // 32 bit zero
 4177 operand immI0()
 4178 %{
 4179   predicate(n->get_int() == 0);
 4180   match(ConI);
 4181 
 4182   op_cost(0);
 4183   format %{ %}
 4184   interface(CONST_INTER);
 4185 %}
 4186 
 4187 // 32 bit unit increment
 4188 operand immI_1()
 4189 %{
 4190   predicate(n->get_int() == 1);
 4191   match(ConI);
 4192 
 4193   op_cost(0);
 4194   format %{ %}
 4195   interface(CONST_INTER);
 4196 %}
 4197 
 4198 // 32 bit unit decrement
 4199 operand immI_M1()
 4200 %{
 4201   predicate(n->get_int() == -1);
 4202   match(ConI);
 4203 
 4204   op_cost(0);
 4205   format %{ %}
 4206   interface(CONST_INTER);
 4207 %}
 4208 
 4209 // Shift values for add/sub extension shift
 4210 operand immIExt()
 4211 %{
 4212   predicate(0 <= n->get_int() && (n->get_int() <= 4));
 4213   match(ConI);
 4214 
 4215   op_cost(0);
 4216   format %{ %}
 4217   interface(CONST_INTER);
 4218 %}
 4219 
 4220 operand immI_le_4()
 4221 %{
 4222   predicate(n->get_int() <= 4);
 4223   match(ConI);
 4224 
 4225   op_cost(0);
 4226   format %{ %}
 4227   interface(CONST_INTER);
 4228 %}
 4229 
 4230 operand immI_31()
 4231 %{
 4232   predicate(n->get_int() == 31);
 4233   match(ConI);
 4234 
 4235   op_cost(0);
 4236   format %{ %}
 4237   interface(CONST_INTER);
 4238 %}
 4239 
 4240 operand immI_8()
 4241 %{
 4242   predicate(n->get_int() == 8);
 4243   match(ConI);
 4244 
 4245   op_cost(0);
 4246   format %{ %}
 4247   interface(CONST_INTER);
 4248 %}
 4249 
 4250 operand immI_16()
 4251 %{
 4252   predicate(n->get_int() == 16);
 4253   match(ConI);
 4254 
 4255   op_cost(0);
 4256   format %{ %}
 4257   interface(CONST_INTER);
 4258 %}
 4259 
 4260 operand immI_24()
 4261 %{
 4262   predicate(n->get_int() == 24);
 4263   match(ConI);
 4264 
 4265   op_cost(0);
 4266   format %{ %}
 4267   interface(CONST_INTER);
 4268 %}
 4269 
 4270 operand immI_32()
 4271 %{
 4272   predicate(n->get_int() == 32);
 4273   match(ConI);
 4274 
 4275   op_cost(0);
 4276   format %{ %}
 4277   interface(CONST_INTER);
 4278 %}
 4279 
 4280 operand immI_48()
 4281 %{
 4282   predicate(n->get_int() == 48);
 4283   match(ConI);
 4284 
 4285   op_cost(0);
 4286   format %{ %}
 4287   interface(CONST_INTER);
 4288 %}
 4289 
 4290 operand immI_56()
 4291 %{
 4292   predicate(n->get_int() == 56);
 4293   match(ConI);
 4294 
 4295   op_cost(0);
 4296   format %{ %}
 4297   interface(CONST_INTER);
 4298 %}
 4299 
 4300 operand immI_63()
 4301 %{
 4302   predicate(n->get_int() == 63);
 4303   match(ConI);
 4304 
 4305   op_cost(0);
 4306   format %{ %}
 4307   interface(CONST_INTER);
 4308 %}
 4309 
 4310 operand immI_64()
 4311 %{
 4312   predicate(n->get_int() == 64);
 4313   match(ConI);
 4314 
 4315   op_cost(0);
 4316   format %{ %}
 4317   interface(CONST_INTER);
 4318 %}
 4319 
 4320 operand immI_255()
 4321 %{
 4322   predicate(n->get_int() == 255);
 4323   match(ConI);
 4324 
 4325   op_cost(0);
 4326   format %{ %}
 4327   interface(CONST_INTER);
 4328 %}
 4329 
 4330 operand immI_65535()
 4331 %{
 4332   predicate(n->get_int() == 65535);
 4333   match(ConI);
 4334 
 4335   op_cost(0);
 4336   format %{ %}
 4337   interface(CONST_INTER);
 4338 %}
 4339 
 4340 operand immL_255()
 4341 %{
 4342   predicate(n->get_long() == 255L);
 4343   match(ConL);
 4344 
 4345   op_cost(0);
 4346   format %{ %}
 4347   interface(CONST_INTER);
 4348 %}
 4349 
 4350 operand immL_65535()
 4351 %{
 4352   predicate(n->get_long() == 65535L);
 4353   match(ConL);
 4354 
 4355   op_cost(0);
 4356   format %{ %}
 4357   interface(CONST_INTER);
 4358 %}
 4359 
 4360 operand immL_4294967295()
 4361 %{
 4362   predicate(n->get_long() == 4294967295L);
 4363   match(ConL);
 4364 
 4365   op_cost(0);
 4366   format %{ %}
 4367   interface(CONST_INTER);
 4368 %}
 4369 
 4370 operand immL_bitmask()
 4371 %{
 4372   predicate((n->get_long() != 0)
 4373             && ((n->get_long() & 0xc000000000000000l) == 0)
 4374             && is_power_of_2(n->get_long() + 1));
 4375   match(ConL);
 4376 
 4377   op_cost(0);
 4378   format %{ %}
 4379   interface(CONST_INTER);
 4380 %}
 4381 
 4382 operand immI_bitmask()
 4383 %{
 4384   predicate((n->get_int() != 0)
 4385             && ((n->get_int() & 0xc0000000) == 0)
 4386             && is_power_of_2(n->get_int() + 1));
 4387   match(ConI);
 4388 
 4389   op_cost(0);
 4390   format %{ %}
 4391   interface(CONST_INTER);
 4392 %}
 4393 
 4394 operand immL_positive_bitmaskI()
 4395 %{
 4396   predicate((n->get_long() != 0)
 4397             && ((julong)n->get_long() < 0x80000000ULL)
 4398             && is_power_of_2(n->get_long() + 1));
 4399   match(ConL);
 4400 
 4401   op_cost(0);
 4402   format %{ %}
 4403   interface(CONST_INTER);
 4404 %}
 4405 
 4406 // Scale values for scaled offset addressing modes (up to long but not quad)
 4407 operand immIScale()
 4408 %{
 4409   predicate(0 <= n->get_int() && (n->get_int() <= 3));
 4410   match(ConI);
 4411 
 4412   op_cost(0);
 4413   format %{ %}
 4414   interface(CONST_INTER);
 4415 %}
 4416 
 4417 // 26 bit signed offset -- for pc-relative branches
 4418 operand immI26()
 4419 %{
 4420   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
 4421   match(ConI);
 4422 
 4423   op_cost(0);
 4424   format %{ %}
 4425   interface(CONST_INTER);
 4426 %}
 4427 
 4428 // 19 bit signed offset -- for pc-relative loads
 4429 operand immI19()
 4430 %{
 4431   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
 4432   match(ConI);
 4433 
 4434   op_cost(0);
 4435   format %{ %}
 4436   interface(CONST_INTER);
 4437 %}
 4438 
 4439 // 12 bit unsigned offset -- for base plus immediate loads
 4440 operand immIU12()
 4441 %{
 4442   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
 4443   match(ConI);
 4444 
 4445   op_cost(0);
 4446   format %{ %}
 4447   interface(CONST_INTER);
 4448 %}
 4449 
 4450 operand immLU12()
 4451 %{
 4452   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
 4453   match(ConL);
 4454 
 4455   op_cost(0);
 4456   format %{ %}
 4457   interface(CONST_INTER);
 4458 %}
 4459 
 4460 // Offset for scaled or unscaled immediate loads and stores
 4461 operand immIOffset()
 4462 %{
 4463   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4464   match(ConI);
 4465 
 4466   op_cost(0);
 4467   format %{ %}
 4468   interface(CONST_INTER);
 4469 %}
 4470 
 4471 operand immIOffset1()
 4472 %{
 4473   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4474   match(ConI);
 4475 
 4476   op_cost(0);
 4477   format %{ %}
 4478   interface(CONST_INTER);
 4479 %}
 4480 
 4481 operand immIOffset2()
 4482 %{
 4483   predicate(Address::offset_ok_for_immed(n->get_int(), 1));
 4484   match(ConI);
 4485 
 4486   op_cost(0);
 4487   format %{ %}
 4488   interface(CONST_INTER);
 4489 %}
 4490 
 4491 operand immIOffset4()
 4492 %{
 4493   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
 4494   match(ConI);
 4495 
 4496   op_cost(0);
 4497   format %{ %}
 4498   interface(CONST_INTER);
 4499 %}
 4500 
 4501 operand immIOffset8()
 4502 %{
 4503   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
 4504   match(ConI);
 4505 
 4506   op_cost(0);
 4507   format %{ %}
 4508   interface(CONST_INTER);
 4509 %}
 4510 
 4511 operand immIOffset16()
 4512 %{
 4513   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
 4514   match(ConI);
 4515 
 4516   op_cost(0);
 4517   format %{ %}
 4518   interface(CONST_INTER);
 4519 %}
 4520 
 4521 operand immLoffset()
 4522 %{
 4523   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
 4524   match(ConL);
 4525 
 4526   op_cost(0);
 4527   format %{ %}
 4528   interface(CONST_INTER);
 4529 %}
 4530 
 4531 operand immLoffset1()
 4532 %{
 4533   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
 4534   match(ConL);
 4535 
 4536   op_cost(0);
 4537   format %{ %}
 4538   interface(CONST_INTER);
 4539 %}
 4540 
 4541 operand immLoffset2()
 4542 %{
 4543   predicate(Address::offset_ok_for_immed(n->get_long(), 1));
 4544   match(ConL);
 4545 
 4546   op_cost(0);
 4547   format %{ %}
 4548   interface(CONST_INTER);
 4549 %}
 4550 
 4551 operand immLoffset4()
 4552 %{
 4553   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
 4554   match(ConL);
 4555 
 4556   op_cost(0);
 4557   format %{ %}
 4558   interface(CONST_INTER);
 4559 %}
 4560 
 4561 operand immLoffset8()
 4562 %{
 4563   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
 4564   match(ConL);
 4565 
 4566   op_cost(0);
 4567   format %{ %}
 4568   interface(CONST_INTER);
 4569 %}
 4570 
 4571 operand immLoffset16()
 4572 %{
 4573   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
 4574   match(ConL);
 4575 
 4576   op_cost(0);
 4577   format %{ %}
 4578   interface(CONST_INTER);
 4579 %}
 4580 
 4581 // 8 bit signed value.
 4582 operand immI8()
 4583 %{
 4584   predicate(n->get_int() <= 127 && n->get_int() >= -128);
 4585   match(ConI);
 4586 
 4587   op_cost(0);
 4588   format %{ %}
 4589   interface(CONST_INTER);
 4590 %}
 4591 
 4592 // 8 bit signed value (simm8), or #simm8 LSL 8.
 4593 operand immI8_shift8()
 4594 %{
 4595   predicate((n->get_int() <= 127 && n->get_int() >= -128) ||
 4596             (n->get_int() <= 32512 && n->get_int() >= -32768 && (n->get_int() & 0xff) == 0));
 4597   match(ConI);
 4598 
 4599   op_cost(0);
 4600   format %{ %}
 4601   interface(CONST_INTER);
 4602 %}
 4603 
 4604 // 8 bit signed value (simm8), or #simm8 LSL 8.
 4605 operand immL8_shift8()
 4606 %{
 4607   predicate((n->get_long() <= 127 && n->get_long() >= -128) ||
 4608             (n->get_long() <= 32512 && n->get_long() >= -32768 && (n->get_long() & 0xff) == 0));
 4609   match(ConL);
 4610 
 4611   op_cost(0);
 4612   format %{ %}
 4613   interface(CONST_INTER);
 4614 %}
 4615 
 4616 // 32 bit integer valid for add sub immediate
 4617 operand immIAddSub()
 4618 %{
 4619   predicate(Assembler::operand_valid_for_add_sub_immediate((int64_t)n->get_int()));
 4620   match(ConI);
 4621   op_cost(0);
 4622   format %{ %}
 4623   interface(CONST_INTER);
 4624 %}
 4625 
 4626 // 32 bit unsigned integer valid for logical immediate
 4627 // TODO -- check this is right when e.g the mask is 0x80000000
 4628 operand immILog()
 4629 %{
 4630   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (uint64_t)n->get_int()));
 4631   match(ConI);
 4632 
 4633   op_cost(0);
 4634   format %{ %}
 4635   interface(CONST_INTER);
 4636 %}
 4637 
 4638 // Integer operands 64 bit
 4639 // 64 bit immediate
 4640 operand immL()
 4641 %{
 4642   match(ConL);
 4643 
 4644   op_cost(0);
 4645   format %{ %}
 4646   interface(CONST_INTER);
 4647 %}
 4648 
 4649 // 64 bit zero
 4650 operand immL0()
 4651 %{
 4652   predicate(n->get_long() == 0);
 4653   match(ConL);
 4654 
 4655   op_cost(0);
 4656   format %{ %}
 4657   interface(CONST_INTER);
 4658 %}
 4659 
 4660 // 64 bit unit increment
 4661 operand immL_1()
 4662 %{
 4663   predicate(n->get_long() == 1);
 4664   match(ConL);
 4665 
 4666   op_cost(0);
 4667   format %{ %}
 4668   interface(CONST_INTER);
 4669 %}
 4670 
 4671 // 64 bit unit decrement
 4672 operand immL_M1()
 4673 %{
 4674   predicate(n->get_long() == -1);
 4675   match(ConL);
 4676 
 4677   op_cost(0);
 4678   format %{ %}
 4679   interface(CONST_INTER);
 4680 %}
 4681 
 4682 // 32 bit offset of pc in thread anchor
 4683 
 4684 operand immL_pc_off()
 4685 %{
 4686   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
 4687                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
 4688   match(ConL);
 4689 
 4690   op_cost(0);
 4691   format %{ %}
 4692   interface(CONST_INTER);
 4693 %}
 4694 
 4695 // 64 bit integer valid for add sub immediate
 4696 operand immLAddSub()
 4697 %{
 4698   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
 4699   match(ConL);
 4700   op_cost(0);
 4701   format %{ %}
 4702   interface(CONST_INTER);
 4703 %}
 4704 
 4705 // 64 bit integer valid for logical immediate
 4706 operand immLLog()
 4707 %{
 4708   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (uint64_t)n->get_long()));
 4709   match(ConL);
 4710   op_cost(0);
 4711   format %{ %}
 4712   interface(CONST_INTER);
 4713 %}
 4714 
 4715 // Long Immediate: low 32-bit mask
 4716 operand immL_32bits()
 4717 %{
 4718   predicate(n->get_long() == 0xFFFFFFFFL);
 4719   match(ConL);
 4720   op_cost(0);
 4721   format %{ %}
 4722   interface(CONST_INTER);
 4723 %}
 4724 
 4725 // Pointer operands
 4726 // Pointer Immediate
 4727 operand immP()
 4728 %{
 4729   match(ConP);
 4730 
 4731   op_cost(0);
 4732   format %{ %}
 4733   interface(CONST_INTER);
 4734 %}
 4735 
 4736 // NULL Pointer Immediate
 4737 operand immP0()
 4738 %{
 4739   predicate(n->get_ptr() == 0);
 4740   match(ConP);
 4741 
 4742   op_cost(0);
 4743   format %{ %}
 4744   interface(CONST_INTER);
 4745 %}
 4746 
 4747 // Pointer Immediate One
 4748 // this is used in object initialization (initial object header)
 4749 operand immP_1()
 4750 %{
 4751   predicate(n->get_ptr() == 1);
 4752   match(ConP);
 4753 
 4754   op_cost(0);
 4755   format %{ %}
 4756   interface(CONST_INTER);
 4757 %}
 4758 
 4759 // Card Table Byte Map Base
 4760 operand immByteMapBase()
 4761 %{
 4762   // Get base of card map
 4763   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
 4764             (CardTable::CardValue*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
 4765   match(ConP);
 4766 
 4767   op_cost(0);
 4768   format %{ %}
 4769   interface(CONST_INTER);
 4770 %}
 4771 
 4772 // Pointer Immediate Minus One
 4773 // this is used when we want to write the current PC to the thread anchor
 4774 operand immP_M1()
 4775 %{
 4776   predicate(n->get_ptr() == -1);
 4777   match(ConP);
 4778 
 4779   op_cost(0);
 4780   format %{ %}
 4781   interface(CONST_INTER);
 4782 %}
 4783 
 4784 // Pointer Immediate Minus Two
 4785 // this is used when we want to write the current PC to the thread anchor
 4786 operand immP_M2()
 4787 %{
 4788   predicate(n->get_ptr() == -2);
 4789   match(ConP);
 4790 
 4791   op_cost(0);
 4792   format %{ %}
 4793   interface(CONST_INTER);
 4794 %}
 4795 
 4796 // Float and Double operands
 4797 // Double Immediate
 4798 operand immD()
 4799 %{
 4800   match(ConD);
 4801   op_cost(0);
 4802   format %{ %}
 4803   interface(CONST_INTER);
 4804 %}
 4805 
 4806 // Double Immediate: +0.0d
 4807 operand immD0()
 4808 %{
 4809   predicate(jlong_cast(n->getd()) == 0);
 4810   match(ConD);
 4811 
 4812   op_cost(0);
 4813   format %{ %}
 4814   interface(CONST_INTER);
 4815 %}
 4816 
 4817 // constant 'double +0.0'.
 4818 operand immDPacked()
 4819 %{
 4820   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
 4821   match(ConD);
 4822   op_cost(0);
 4823   format %{ %}
 4824   interface(CONST_INTER);
 4825 %}
 4826 
 4827 // Float Immediate
 4828 operand immF()
 4829 %{
 4830   match(ConF);
 4831   op_cost(0);
 4832   format %{ %}
 4833   interface(CONST_INTER);
 4834 %}
 4835 
 4836 // Float Immediate: +0.0f.
 4837 operand immF0()
 4838 %{
 4839   predicate(jint_cast(n->getf()) == 0);
 4840   match(ConF);
 4841 
 4842   op_cost(0);
 4843   format %{ %}
 4844   interface(CONST_INTER);
 4845 %}
 4846 
 4847 //
 4848 operand immFPacked()
 4849 %{
 4850   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
 4851   match(ConF);
 4852   op_cost(0);
 4853   format %{ %}
 4854   interface(CONST_INTER);
 4855 %}
 4856 
 4857 // Narrow pointer operands
 4858 // Narrow Pointer Immediate
 4859 operand immN()
 4860 %{
 4861   match(ConN);
 4862 
 4863   op_cost(0);
 4864   format %{ %}
 4865   interface(CONST_INTER);
 4866 %}
 4867 
 4868 // Narrow NULL Pointer Immediate
 4869 operand immN0()
 4870 %{
 4871   predicate(n->get_narrowcon() == 0);
 4872   match(ConN);
 4873 
 4874   op_cost(0);
 4875   format %{ %}
 4876   interface(CONST_INTER);
 4877 %}
 4878 
 4879 operand immNKlass()
 4880 %{
 4881   match(ConNKlass);
 4882 
 4883   op_cost(0);
 4884   format %{ %}
 4885   interface(CONST_INTER);
 4886 %}
 4887 
 4888 // Integer 32 bit Register Operands
 4889 // Integer 32 bitRegister (excludes SP)
 4890 operand iRegI()
 4891 %{
 4892   constraint(ALLOC_IN_RC(any_reg32));
 4893   match(RegI);
 4894   match(iRegINoSp);
 4895   op_cost(0);
 4896   format %{ %}
 4897   interface(REG_INTER);
 4898 %}
 4899 
 4900 // Integer 32 bit Register not Special
 4901 operand iRegINoSp()
 4902 %{
 4903   constraint(ALLOC_IN_RC(no_special_reg32));
 4904   match(RegI);
 4905   op_cost(0);
 4906   format %{ %}
 4907   interface(REG_INTER);
 4908 %}
 4909 
 4910 // Integer 64 bit Register Operands
 4911 // Integer 64 bit Register (includes SP)
 4912 operand iRegL()
 4913 %{
 4914   constraint(ALLOC_IN_RC(any_reg));
 4915   match(RegL);
 4916   match(iRegLNoSp);
 4917   op_cost(0);
 4918   format %{ %}
 4919   interface(REG_INTER);
 4920 %}
 4921 
 4922 // Integer 64 bit Register not Special
 4923 operand iRegLNoSp()
 4924 %{
 4925   constraint(ALLOC_IN_RC(no_special_reg));
 4926   match(RegL);
 4927   match(iRegL_R0);
 4928   format %{ %}
 4929   interface(REG_INTER);
 4930 %}
 4931 
 4932 // Pointer Register Operands
 4933 // Pointer Register
 4934 operand iRegP()
 4935 %{
 4936   constraint(ALLOC_IN_RC(ptr_reg));
 4937   match(RegP);
 4938   match(iRegPNoSp);
 4939   match(iRegP_R0);
 4940   //match(iRegP_R2);
 4941   //match(iRegP_R4);
 4942   //match(iRegP_R5);
 4943   match(thread_RegP);
 4944   op_cost(0);
 4945   format %{ %}
 4946   interface(REG_INTER);
 4947 %}
 4948 
 4949 // Pointer 64 bit Register not Special
 4950 operand iRegPNoSp()
 4951 %{
 4952   constraint(ALLOC_IN_RC(no_special_ptr_reg));
 4953   match(RegP);
 4954   // match(iRegP);
 4955   // match(iRegP_R0);
 4956   // match(iRegP_R2);
 4957   // match(iRegP_R4);
 4958   // match(iRegP_R5);
 4959   // match(thread_RegP);
 4960   op_cost(0);
 4961   format %{ %}
 4962   interface(REG_INTER);
 4963 %}
 4964 
 4965 // Pointer 64 bit Register R0 only
 4966 operand iRegP_R0()
 4967 %{
 4968   constraint(ALLOC_IN_RC(r0_reg));
 4969   match(RegP);
 4970   // match(iRegP);
 4971   match(iRegPNoSp);
 4972   op_cost(0);
 4973   format %{ %}
 4974   interface(REG_INTER);
 4975 %}
 4976 
 4977 // Pointer 64 bit Register R1 only
 4978 operand iRegP_R1()
 4979 %{
 4980   constraint(ALLOC_IN_RC(r1_reg));
 4981   match(RegP);
 4982   // match(iRegP);
 4983   match(iRegPNoSp);
 4984   op_cost(0);
 4985   format %{ %}
 4986   interface(REG_INTER);
 4987 %}
 4988 
 4989 // Pointer 64 bit Register R2 only
 4990 operand iRegP_R2()
 4991 %{
 4992   constraint(ALLOC_IN_RC(r2_reg));
 4993   match(RegP);
 4994   // match(iRegP);
 4995   match(iRegPNoSp);
 4996   op_cost(0);
 4997   format %{ %}
 4998   interface(REG_INTER);
 4999 %}
 5000 
 5001 // Pointer 64 bit Register R3 only
 5002 operand iRegP_R3()
 5003 %{
 5004   constraint(ALLOC_IN_RC(r3_reg));
 5005   match(RegP);
 5006   // match(iRegP);
 5007   match(iRegPNoSp);
 5008   op_cost(0);
 5009   format %{ %}
 5010   interface(REG_INTER);
 5011 %}
 5012 
 5013 // Pointer 64 bit Register R4 only
 5014 operand iRegP_R4()
 5015 %{
 5016   constraint(ALLOC_IN_RC(r4_reg));
 5017   match(RegP);
 5018   // match(iRegP);
 5019   match(iRegPNoSp);
 5020   op_cost(0);
 5021   format %{ %}
 5022   interface(REG_INTER);
 5023 %}
 5024 
 5025 // Pointer 64 bit Register R5 only
 5026 operand iRegP_R5()
 5027 %{
 5028   constraint(ALLOC_IN_RC(r5_reg));
 5029   match(RegP);
 5030   // match(iRegP);
 5031   match(iRegPNoSp);
 5032   op_cost(0);
 5033   format %{ %}
 5034   interface(REG_INTER);
 5035 %}
 5036 
 5037 // Pointer 64 bit Register R10 only
 5038 operand iRegP_R10()
 5039 %{
 5040   constraint(ALLOC_IN_RC(r10_reg));
 5041   match(RegP);
 5042   // match(iRegP);
 5043   match(iRegPNoSp);
 5044   op_cost(0);
 5045   format %{ %}
 5046   interface(REG_INTER);
 5047 %}
 5048 
 5049 // Long 64 bit Register R0 only
 5050 operand iRegL_R0()
 5051 %{
 5052   constraint(ALLOC_IN_RC(r0_reg));
 5053   match(RegL);
 5054   match(iRegLNoSp);
 5055   op_cost(0);
 5056   format %{ %}
 5057   interface(REG_INTER);
 5058 %}
 5059 
 5060 // Long 64 bit Register R2 only
 5061 operand iRegL_R2()
 5062 %{
 5063   constraint(ALLOC_IN_RC(r2_reg));
 5064   match(RegL);
 5065   match(iRegLNoSp);
 5066   op_cost(0);
 5067   format %{ %}
 5068   interface(REG_INTER);
 5069 %}
 5070 
 5071 // Long 64 bit Register R3 only
 5072 operand iRegL_R3()
 5073 %{
 5074   constraint(ALLOC_IN_RC(r3_reg));
 5075   match(RegL);
 5076   match(iRegLNoSp);
 5077   op_cost(0);
 5078   format %{ %}
 5079   interface(REG_INTER);
 5080 %}
 5081 
 5082 // Long 64 bit Register R11 only
 5083 operand iRegL_R11()
 5084 %{
 5085   constraint(ALLOC_IN_RC(r11_reg));
 5086   match(RegL);
 5087   match(iRegLNoSp);
 5088   op_cost(0);
 5089   format %{ %}
 5090   interface(REG_INTER);
 5091 %}
 5092 
 5093 // Pointer 64 bit Register FP only
 5094 operand iRegP_FP()
 5095 %{
 5096   constraint(ALLOC_IN_RC(fp_reg));
 5097   match(RegP);
 5098   // match(iRegP);
 5099   op_cost(0);
 5100   format %{ %}
 5101   interface(REG_INTER);
 5102 %}
 5103 
 5104 // Register R0 only
 5105 operand iRegI_R0()
 5106 %{
 5107   constraint(ALLOC_IN_RC(int_r0_reg));
 5108   match(RegI);
 5109   match(iRegINoSp);
 5110   op_cost(0);
 5111   format %{ %}
 5112   interface(REG_INTER);
 5113 %}
 5114 
 5115 // Register R2 only
 5116 operand iRegI_R2()
 5117 %{
 5118   constraint(ALLOC_IN_RC(int_r2_reg));
 5119   match(RegI);
 5120   match(iRegINoSp);
 5121   op_cost(0);
 5122   format %{ %}
 5123   interface(REG_INTER);
 5124 %}
 5125 
 5126 // Register R3 only
 5127 operand iRegI_R3()
 5128 %{
 5129   constraint(ALLOC_IN_RC(int_r3_reg));
 5130   match(RegI);
 5131   match(iRegINoSp);
 5132   op_cost(0);
 5133   format %{ %}
 5134   interface(REG_INTER);
 5135 %}
 5136 
 5137 
 5138 // Register R4 only
 5139 operand iRegI_R4()
 5140 %{
 5141   constraint(ALLOC_IN_RC(int_r4_reg));
 5142   match(RegI);
 5143   match(iRegINoSp);
 5144   op_cost(0);
 5145   format %{ %}
 5146   interface(REG_INTER);
 5147 %}
 5148 
 5149 
 5150 // Pointer Register Operands
 5151 // Narrow Pointer Register
 5152 operand iRegN()
 5153 %{
 5154   constraint(ALLOC_IN_RC(any_reg32));
 5155   match(RegN);
 5156   match(iRegNNoSp);
 5157   op_cost(0);
 5158   format %{ %}
 5159   interface(REG_INTER);
 5160 %}
 5161 
 5162 operand iRegN_R0()
 5163 %{
 5164   constraint(ALLOC_IN_RC(r0_reg));
 5165   match(iRegN);
 5166   op_cost(0);
 5167   format %{ %}
 5168   interface(REG_INTER);
 5169 %}
 5170 
 5171 operand iRegN_R2()
 5172 %{
 5173   constraint(ALLOC_IN_RC(r2_reg));
 5174   match(iRegN);
 5175   op_cost(0);
 5176   format %{ %}
 5177   interface(REG_INTER);
 5178 %}
 5179 
 5180 operand iRegN_R3()
 5181 %{
 5182   constraint(ALLOC_IN_RC(r3_reg));
 5183   match(iRegN);
 5184   op_cost(0);
 5185   format %{ %}
 5186   interface(REG_INTER);
 5187 %}
 5188 
 5189 // Integer 64 bit Register not Special
 5190 operand iRegNNoSp()
 5191 %{
 5192   constraint(ALLOC_IN_RC(no_special_reg32));
 5193   match(RegN);
 5194   op_cost(0);
 5195   format %{ %}
 5196   interface(REG_INTER);
 5197 %}
 5198 
 5199 // heap base register -- used for encoding immN0
 5200 
 5201 operand iRegIHeapbase()
 5202 %{
 5203   constraint(ALLOC_IN_RC(heapbase_reg));
 5204   match(RegI);
 5205   op_cost(0);
 5206   format %{ %}
 5207   interface(REG_INTER);
 5208 %}
 5209 
 5210 // Float Register
 5211 // Float register operands
 5212 operand vRegF()
 5213 %{
 5214   constraint(ALLOC_IN_RC(float_reg));
 5215   match(RegF);
 5216 
 5217   op_cost(0);
 5218   format %{ %}
 5219   interface(REG_INTER);
 5220 %}
 5221 
 5222 // Double Register
 5223 // Double register operands
 5224 operand vRegD()
 5225 %{
 5226   constraint(ALLOC_IN_RC(double_reg));
 5227   match(RegD);
 5228 
 5229   op_cost(0);
 5230   format %{ %}
 5231   interface(REG_INTER);
 5232 %}
 5233 
 5234 operand vecA()
 5235 %{
 5236   constraint(ALLOC_IN_RC(vectora_reg));
 5237   match(VecA);
 5238   op_cost(0);
 5239   format %{ %}
 5240   interface(REG_INTER);
 5241 %}
 5242 
 5243 operand vecD()
 5244 %{
 5245   constraint(ALLOC_IN_RC(vectord_reg));
 5246   match(VecD);
 5247 
 5248   op_cost(0);
 5249   format %{ %}
 5250   interface(REG_INTER);
 5251 %}
 5252 
 5253 operand vecX()
 5254 %{
 5255   constraint(ALLOC_IN_RC(vectorx_reg));
 5256   match(VecX);
 5257 
 5258   op_cost(0);
 5259   format %{ %}
 5260   interface(REG_INTER);
 5261 %}
 5262 
 5263 operand vRegD_V0()
 5264 %{
 5265   constraint(ALLOC_IN_RC(v0_reg));
 5266   match(RegD);
 5267   op_cost(0);
 5268   format %{ %}
 5269   interface(REG_INTER);
 5270 %}
 5271 
 5272 operand vRegD_V1()
 5273 %{
 5274   constraint(ALLOC_IN_RC(v1_reg));
 5275   match(RegD);
 5276   op_cost(0);
 5277   format %{ %}
 5278   interface(REG_INTER);
 5279 %}
 5280 
 5281 operand vRegD_V2()
 5282 %{
 5283   constraint(ALLOC_IN_RC(v2_reg));
 5284   match(RegD);
 5285   op_cost(0);
 5286   format %{ %}
 5287   interface(REG_INTER);
 5288 %}
 5289 
 5290 operand vRegD_V3()
 5291 %{
 5292   constraint(ALLOC_IN_RC(v3_reg));
 5293   match(RegD);
 5294   op_cost(0);
 5295   format %{ %}
 5296   interface(REG_INTER);
 5297 %}
 5298 
 5299 operand vRegD_V4()
 5300 %{
 5301   constraint(ALLOC_IN_RC(v4_reg));
 5302   match(RegD);
 5303   op_cost(0);
 5304   format %{ %}
 5305   interface(REG_INTER);
 5306 %}
 5307 
 5308 operand vRegD_V5()
 5309 %{
 5310   constraint(ALLOC_IN_RC(v5_reg));
 5311   match(RegD);
 5312   op_cost(0);
 5313   format %{ %}
 5314   interface(REG_INTER);
 5315 %}
 5316 
 5317 operand vRegD_V6()
 5318 %{
 5319   constraint(ALLOC_IN_RC(v6_reg));
 5320   match(RegD);
 5321   op_cost(0);
 5322   format %{ %}
 5323   interface(REG_INTER);
 5324 %}
 5325 
 5326 operand vRegD_V7()
 5327 %{
 5328   constraint(ALLOC_IN_RC(v7_reg));
 5329   match(RegD);
 5330   op_cost(0);
 5331   format %{ %}
 5332   interface(REG_INTER);
 5333 %}
 5334 
 5335 operand vRegD_V8()
 5336 %{
 5337   constraint(ALLOC_IN_RC(v8_reg));
 5338   match(RegD);
 5339   op_cost(0);
 5340   format %{ %}
 5341   interface(REG_INTER);
 5342 %}
 5343 
 5344 operand vRegD_V9()
 5345 %{
 5346   constraint(ALLOC_IN_RC(v9_reg));
 5347   match(RegD);
 5348   op_cost(0);
 5349   format %{ %}
 5350   interface(REG_INTER);
 5351 %}
 5352 
 5353 operand vRegD_V10()
 5354 %{
 5355   constraint(ALLOC_IN_RC(v10_reg));
 5356   match(RegD);
 5357   op_cost(0);
 5358   format %{ %}
 5359   interface(REG_INTER);
 5360 %}
 5361 
 5362 operand vRegD_V11()
 5363 %{
 5364   constraint(ALLOC_IN_RC(v11_reg));
 5365   match(RegD);
 5366   op_cost(0);
 5367   format %{ %}
 5368   interface(REG_INTER);
 5369 %}
 5370 
 5371 operand vRegD_V12()
 5372 %{
 5373   constraint(ALLOC_IN_RC(v12_reg));
 5374   match(RegD);
 5375   op_cost(0);
 5376   format %{ %}
 5377   interface(REG_INTER);
 5378 %}
 5379 
 5380 operand vRegD_V13()
 5381 %{
 5382   constraint(ALLOC_IN_RC(v13_reg));
 5383   match(RegD);
 5384   op_cost(0);
 5385   format %{ %}
 5386   interface(REG_INTER);
 5387 %}
 5388 
 5389 operand vRegD_V14()
 5390 %{
 5391   constraint(ALLOC_IN_RC(v14_reg));
 5392   match(RegD);
 5393   op_cost(0);
 5394   format %{ %}
 5395   interface(REG_INTER);
 5396 %}
 5397 
 5398 operand vRegD_V15()
 5399 %{
 5400   constraint(ALLOC_IN_RC(v15_reg));
 5401   match(RegD);
 5402   op_cost(0);
 5403   format %{ %}
 5404   interface(REG_INTER);
 5405 %}
 5406 
 5407 operand vRegD_V16()
 5408 %{
 5409   constraint(ALLOC_IN_RC(v16_reg));
 5410   match(RegD);
 5411   op_cost(0);
 5412   format %{ %}
 5413   interface(REG_INTER);
 5414 %}
 5415 
 5416 operand vRegD_V17()
 5417 %{
 5418   constraint(ALLOC_IN_RC(v17_reg));
 5419   match(RegD);
 5420   op_cost(0);
 5421   format %{ %}
 5422   interface(REG_INTER);
 5423 %}
 5424 
 5425 operand vRegD_V18()
 5426 %{
 5427   constraint(ALLOC_IN_RC(v18_reg));
 5428   match(RegD);
 5429   op_cost(0);
 5430   format %{ %}
 5431   interface(REG_INTER);
 5432 %}
 5433 
 5434 operand vRegD_V19()
 5435 %{
 5436   constraint(ALLOC_IN_RC(v19_reg));
 5437   match(RegD);
 5438   op_cost(0);
 5439   format %{ %}
 5440   interface(REG_INTER);
 5441 %}
 5442 
 5443 operand vRegD_V20()
 5444 %{
 5445   constraint(ALLOC_IN_RC(v20_reg));
 5446   match(RegD);
 5447   op_cost(0);
 5448   format %{ %}
 5449   interface(REG_INTER);
 5450 %}
 5451 
 5452 operand vRegD_V21()
 5453 %{
 5454   constraint(ALLOC_IN_RC(v21_reg));
 5455   match(RegD);
 5456   op_cost(0);
 5457   format %{ %}
 5458   interface(REG_INTER);
 5459 %}
 5460 
 5461 operand vRegD_V22()
 5462 %{
 5463   constraint(ALLOC_IN_RC(v22_reg));
 5464   match(RegD);
 5465   op_cost(0);
 5466   format %{ %}
 5467   interface(REG_INTER);
 5468 %}
 5469 
 5470 operand vRegD_V23()
 5471 %{
 5472   constraint(ALLOC_IN_RC(v23_reg));
 5473   match(RegD);
 5474   op_cost(0);
 5475   format %{ %}
 5476   interface(REG_INTER);
 5477 %}
 5478 
 5479 operand vRegD_V24()
 5480 %{
 5481   constraint(ALLOC_IN_RC(v24_reg));
 5482   match(RegD);
 5483   op_cost(0);
 5484   format %{ %}
 5485   interface(REG_INTER);
 5486 %}
 5487 
 5488 operand vRegD_V25()
 5489 %{
 5490   constraint(ALLOC_IN_RC(v25_reg));
 5491   match(RegD);
 5492   op_cost(0);
 5493   format %{ %}
 5494   interface(REG_INTER);
 5495 %}
 5496 
 5497 operand vRegD_V26()
 5498 %{
 5499   constraint(ALLOC_IN_RC(v26_reg));
 5500   match(RegD);
 5501   op_cost(0);
 5502   format %{ %}
 5503   interface(REG_INTER);
 5504 %}
 5505 
 5506 operand vRegD_V27()
 5507 %{
 5508   constraint(ALLOC_IN_RC(v27_reg));
 5509   match(RegD);
 5510   op_cost(0);
 5511   format %{ %}
 5512   interface(REG_INTER);
 5513 %}
 5514 
 5515 operand vRegD_V28()
 5516 %{
 5517   constraint(ALLOC_IN_RC(v28_reg));
 5518   match(RegD);
 5519   op_cost(0);
 5520   format %{ %}
 5521   interface(REG_INTER);
 5522 %}
 5523 
 5524 operand vRegD_V29()
 5525 %{
 5526   constraint(ALLOC_IN_RC(v29_reg));
 5527   match(RegD);
 5528   op_cost(0);
 5529   format %{ %}
 5530   interface(REG_INTER);
 5531 %}
 5532 
 5533 operand vRegD_V30()
 5534 %{
 5535   constraint(ALLOC_IN_RC(v30_reg));
 5536   match(RegD);
 5537   op_cost(0);
 5538   format %{ %}
 5539   interface(REG_INTER);
 5540 %}
 5541 
 5542 operand vRegD_V31()
 5543 %{
 5544   constraint(ALLOC_IN_RC(v31_reg));
 5545   match(RegD);
 5546   op_cost(0);
 5547   format %{ %}
 5548   interface(REG_INTER);
 5549 %}
 5550 
 5551 operand pRegGov()
 5552 %{
 5553   constraint(ALLOC_IN_RC(gov_pr));
 5554   match(RegVMask);
 5555   op_cost(0);
 5556   format %{ %}
 5557   interface(REG_INTER);
 5558 %}
 5559 
 5560 // Flags register, used as output of signed compare instructions
 5561 
 5562 // note that on AArch64 we also use this register as the output for
 5563 // for floating point compare instructions (CmpF CmpD). this ensures
 5564 // that ordered inequality tests use GT, GE, LT or LE none of which
 5565 // pass through cases where the result is unordered i.e. one or both
 5566 // inputs to the compare is a NaN. this means that the ideal code can
 5567 // replace e.g. a GT with an LE and not end up capturing the NaN case
 5568 // (where the comparison should always fail). EQ and NE tests are
 5569 // always generated in ideal code so that unordered folds into the NE
 5570 // case, matching the behaviour of AArch64 NE.
 5571 //
 5572 // This differs from x86 where the outputs of FP compares use a
 5573 // special FP flags registers and where compares based on this
 5574 // register are distinguished into ordered inequalities (cmpOpUCF) and
 5575 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
 5576 // to explicitly handle the unordered case in branches. x86 also has
 5577 // to include extra CMoveX rules to accept a cmpOpUCF input.
 5578 
 5579 operand rFlagsReg()
 5580 %{
 5581   constraint(ALLOC_IN_RC(int_flags));
 5582   match(RegFlags);
 5583 
 5584   op_cost(0);
 5585   format %{ "RFLAGS" %}
 5586   interface(REG_INTER);
 5587 %}
 5588 
 5589 // Flags register, used as output of unsigned compare instructions
 5590 operand rFlagsRegU()
 5591 %{
 5592   constraint(ALLOC_IN_RC(int_flags));
 5593   match(RegFlags);
 5594 
 5595   op_cost(0);
 5596   format %{ "RFLAGSU" %}
 5597   interface(REG_INTER);
 5598 %}
 5599 
 5600 // Special Registers
 5601 
 5602 // Method Register
 5603 operand inline_cache_RegP(iRegP reg)
 5604 %{
 5605   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
 5606   match(reg);
 5607   match(iRegPNoSp);
 5608   op_cost(0);
 5609   format %{ %}
 5610   interface(REG_INTER);
 5611 %}
 5612 
 5613 operand interpreter_method_oop_RegP(iRegP reg)
 5614 %{
 5615   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
 5616   match(reg);
 5617   match(iRegPNoSp);
 5618   op_cost(0);
 5619   format %{ %}
 5620   interface(REG_INTER);
 5621 %}
 5622 
 5623 // Thread Register
 5624 operand thread_RegP(iRegP reg)
 5625 %{
 5626   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
 5627   match(reg);
 5628   op_cost(0);
 5629   format %{ %}
 5630   interface(REG_INTER);
 5631 %}
 5632 
 5633 operand lr_RegP(iRegP reg)
 5634 %{
 5635   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
 5636   match(reg);
 5637   op_cost(0);
 5638   format %{ %}
 5639   interface(REG_INTER);
 5640 %}
 5641 
 5642 //----------Memory Operands----------------------------------------------------
 5643 
 5644 operand indirect(iRegP reg)
 5645 %{
 5646   constraint(ALLOC_IN_RC(ptr_reg));
 5647   match(reg);
 5648   op_cost(0);
 5649   format %{ "[$reg]" %}
 5650   interface(MEMORY_INTER) %{
 5651     base($reg);
 5652     index(0xffffffff);
 5653     scale(0x0);
 5654     disp(0x0);
 5655   %}
 5656 %}
 5657 
 5658 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
 5659 %{
 5660   constraint(ALLOC_IN_RC(ptr_reg));
 5661   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5662   match(AddP reg (LShiftL (ConvI2L ireg) scale));
 5663   op_cost(0);
 5664   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
 5665   interface(MEMORY_INTER) %{
 5666     base($reg);
 5667     index($ireg);
 5668     scale($scale);
 5669     disp(0x0);
 5670   %}
 5671 %}
 5672 
 5673 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
 5674 %{
 5675   constraint(ALLOC_IN_RC(ptr_reg));
 5676   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5677   match(AddP reg (LShiftL lreg scale));
 5678   op_cost(0);
 5679   format %{ "$reg, $lreg lsl($scale)" %}
 5680   interface(MEMORY_INTER) %{
 5681     base($reg);
 5682     index($lreg);
 5683     scale($scale);
 5684     disp(0x0);
 5685   %}
 5686 %}
 5687 
 5688 operand indIndexI2L(iRegP reg, iRegI ireg)
 5689 %{
 5690   constraint(ALLOC_IN_RC(ptr_reg));
 5691   match(AddP reg (ConvI2L ireg));
 5692   op_cost(0);
 5693   format %{ "$reg, $ireg, 0, I2L" %}
 5694   interface(MEMORY_INTER) %{
 5695     base($reg);
 5696     index($ireg);
 5697     scale(0x0);
 5698     disp(0x0);
 5699   %}
 5700 %}
 5701 
 5702 operand indIndex(iRegP reg, iRegL lreg)
 5703 %{
 5704   constraint(ALLOC_IN_RC(ptr_reg));
 5705   match(AddP reg lreg);
 5706   op_cost(0);
 5707   format %{ "$reg, $lreg" %}
 5708   interface(MEMORY_INTER) %{
 5709     base($reg);
 5710     index($lreg);
 5711     scale(0x0);
 5712     disp(0x0);
 5713   %}
 5714 %}
 5715 
 5716 operand indOffI(iRegP reg, immIOffset off)
 5717 %{
 5718   constraint(ALLOC_IN_RC(ptr_reg));
 5719   match(AddP reg off);
 5720   op_cost(0);
 5721   format %{ "[$reg, $off]" %}
 5722   interface(MEMORY_INTER) %{
 5723     base($reg);
 5724     index(0xffffffff);
 5725     scale(0x0);
 5726     disp($off);
 5727   %}
 5728 %}
 5729 
 5730 operand indOffI1(iRegP reg, immIOffset1 off)
 5731 %{
 5732   constraint(ALLOC_IN_RC(ptr_reg));
 5733   match(AddP reg off);
 5734   op_cost(0);
 5735   format %{ "[$reg, $off]" %}
 5736   interface(MEMORY_INTER) %{
 5737     base($reg);
 5738     index(0xffffffff);
 5739     scale(0x0);
 5740     disp($off);
 5741   %}
 5742 %}
 5743 
 5744 operand indOffI2(iRegP reg, immIOffset2 off)
 5745 %{
 5746   constraint(ALLOC_IN_RC(ptr_reg));
 5747   match(AddP reg off);
 5748   op_cost(0);
 5749   format %{ "[$reg, $off]" %}
 5750   interface(MEMORY_INTER) %{
 5751     base($reg);
 5752     index(0xffffffff);
 5753     scale(0x0);
 5754     disp($off);
 5755   %}
 5756 %}
 5757 
 5758 operand indOffI4(iRegP reg, immIOffset4 off)
 5759 %{
 5760   constraint(ALLOC_IN_RC(ptr_reg));
 5761   match(AddP reg off);
 5762   op_cost(0);
 5763   format %{ "[$reg, $off]" %}
 5764   interface(MEMORY_INTER) %{
 5765     base($reg);
 5766     index(0xffffffff);
 5767     scale(0x0);
 5768     disp($off);
 5769   %}
 5770 %}
 5771 
 5772 operand indOffI8(iRegP reg, immIOffset8 off)
 5773 %{
 5774   constraint(ALLOC_IN_RC(ptr_reg));
 5775   match(AddP reg off);
 5776   op_cost(0);
 5777   format %{ "[$reg, $off]" %}
 5778   interface(MEMORY_INTER) %{
 5779     base($reg);
 5780     index(0xffffffff);
 5781     scale(0x0);
 5782     disp($off);
 5783   %}
 5784 %}
 5785 
 5786 operand indOffI16(iRegP reg, immIOffset16 off)
 5787 %{
 5788   constraint(ALLOC_IN_RC(ptr_reg));
 5789   match(AddP reg off);
 5790   op_cost(0);
 5791   format %{ "[$reg, $off]" %}
 5792   interface(MEMORY_INTER) %{
 5793     base($reg);
 5794     index(0xffffffff);
 5795     scale(0x0);
 5796     disp($off);
 5797   %}
 5798 %}
 5799 
 5800 operand indOffL(iRegP reg, immLoffset off)
 5801 %{
 5802   constraint(ALLOC_IN_RC(ptr_reg));
 5803   match(AddP reg off);
 5804   op_cost(0);
 5805   format %{ "[$reg, $off]" %}
 5806   interface(MEMORY_INTER) %{
 5807     base($reg);
 5808     index(0xffffffff);
 5809     scale(0x0);
 5810     disp($off);
 5811   %}
 5812 %}
 5813 
 5814 operand indOffL1(iRegP reg, immLoffset1 off)
 5815 %{
 5816   constraint(ALLOC_IN_RC(ptr_reg));
 5817   match(AddP reg off);
 5818   op_cost(0);
 5819   format %{ "[$reg, $off]" %}
 5820   interface(MEMORY_INTER) %{
 5821     base($reg);
 5822     index(0xffffffff);
 5823     scale(0x0);
 5824     disp($off);
 5825   %}
 5826 %}
 5827 
 5828 operand indOffL2(iRegP reg, immLoffset2 off)
 5829 %{
 5830   constraint(ALLOC_IN_RC(ptr_reg));
 5831   match(AddP reg off);
 5832   op_cost(0);
 5833   format %{ "[$reg, $off]" %}
 5834   interface(MEMORY_INTER) %{
 5835     base($reg);
 5836     index(0xffffffff);
 5837     scale(0x0);
 5838     disp($off);
 5839   %}
 5840 %}
 5841 
 5842 operand indOffL4(iRegP reg, immLoffset4 off)
 5843 %{
 5844   constraint(ALLOC_IN_RC(ptr_reg));
 5845   match(AddP reg off);
 5846   op_cost(0);
 5847   format %{ "[$reg, $off]" %}
 5848   interface(MEMORY_INTER) %{
 5849     base($reg);
 5850     index(0xffffffff);
 5851     scale(0x0);
 5852     disp($off);
 5853   %}
 5854 %}
 5855 
 5856 operand indOffL8(iRegP reg, immLoffset8 off)
 5857 %{
 5858   constraint(ALLOC_IN_RC(ptr_reg));
 5859   match(AddP reg off);
 5860   op_cost(0);
 5861   format %{ "[$reg, $off]" %}
 5862   interface(MEMORY_INTER) %{
 5863     base($reg);
 5864     index(0xffffffff);
 5865     scale(0x0);
 5866     disp($off);
 5867   %}
 5868 %}
 5869 
 5870 operand indOffL16(iRegP reg, immLoffset16 off)
 5871 %{
 5872   constraint(ALLOC_IN_RC(ptr_reg));
 5873   match(AddP reg off);
 5874   op_cost(0);
 5875   format %{ "[$reg, $off]" %}
 5876   interface(MEMORY_INTER) %{
 5877     base($reg);
 5878     index(0xffffffff);
 5879     scale(0x0);
 5880     disp($off);
 5881   %}
 5882 %}
 5883 
 5884 operand indirectN(iRegN reg)
 5885 %{
 5886   predicate(CompressedOops::shift() == 0);
 5887   constraint(ALLOC_IN_RC(ptr_reg));
 5888   match(DecodeN reg);
 5889   op_cost(0);
 5890   format %{ "[$reg]\t# narrow" %}
 5891   interface(MEMORY_INTER) %{
 5892     base($reg);
 5893     index(0xffffffff);
 5894     scale(0x0);
 5895     disp(0x0);
 5896   %}
 5897 %}
 5898 
 5899 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
 5900 %{
 5901   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5902   constraint(ALLOC_IN_RC(ptr_reg));
 5903   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
 5904   op_cost(0);
 5905   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
 5906   interface(MEMORY_INTER) %{
 5907     base($reg);
 5908     index($ireg);
 5909     scale($scale);
 5910     disp(0x0);
 5911   %}
 5912 %}
 5913 
 5914 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
 5915 %{
 5916   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5917   constraint(ALLOC_IN_RC(ptr_reg));
 5918   match(AddP (DecodeN reg) (LShiftL lreg scale));
 5919   op_cost(0);
 5920   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
 5921   interface(MEMORY_INTER) %{
 5922     base($reg);
 5923     index($lreg);
 5924     scale($scale);
 5925     disp(0x0);
 5926   %}
 5927 %}
 5928 
 5929 operand indIndexI2LN(iRegN reg, iRegI ireg)
 5930 %{
 5931   predicate(CompressedOops::shift() == 0);
 5932   constraint(ALLOC_IN_RC(ptr_reg));
 5933   match(AddP (DecodeN reg) (ConvI2L ireg));
 5934   op_cost(0);
 5935   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
 5936   interface(MEMORY_INTER) %{
 5937     base($reg);
 5938     index($ireg);
 5939     scale(0x0);
 5940     disp(0x0);
 5941   %}
 5942 %}
 5943 
 5944 operand indIndexN(iRegN reg, iRegL lreg)
 5945 %{
 5946   predicate(CompressedOops::shift() == 0);
 5947   constraint(ALLOC_IN_RC(ptr_reg));
 5948   match(AddP (DecodeN reg) lreg);
 5949   op_cost(0);
 5950   format %{ "$reg, $lreg\t# narrow" %}
 5951   interface(MEMORY_INTER) %{
 5952     base($reg);
 5953     index($lreg);
 5954     scale(0x0);
 5955     disp(0x0);
 5956   %}
 5957 %}
 5958 
 5959 operand indOffIN(iRegN reg, immIOffset off)
 5960 %{
 5961   predicate(CompressedOops::shift() == 0);
 5962   constraint(ALLOC_IN_RC(ptr_reg));
 5963   match(AddP (DecodeN reg) off);
 5964   op_cost(0);
 5965   format %{ "[$reg, $off]\t# narrow" %}
 5966   interface(MEMORY_INTER) %{
 5967     base($reg);
 5968     index(0xffffffff);
 5969     scale(0x0);
 5970     disp($off);
 5971   %}
 5972 %}
 5973 
 5974 operand indOffLN(iRegN reg, immLoffset off)
 5975 %{
 5976   predicate(CompressedOops::shift() == 0);
 5977   constraint(ALLOC_IN_RC(ptr_reg));
 5978   match(AddP (DecodeN reg) off);
 5979   op_cost(0);
 5980   format %{ "[$reg, $off]\t# narrow" %}
 5981   interface(MEMORY_INTER) %{
 5982     base($reg);
 5983     index(0xffffffff);
 5984     scale(0x0);
 5985     disp($off);
 5986   %}
 5987 %}
 5988 
 5989 
 5990 
 5991 // AArch64 opto stubs need to write to the pc slot in the thread anchor
 5992 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
 5993 %{
 5994   constraint(ALLOC_IN_RC(ptr_reg));
 5995   match(AddP reg off);
 5996   op_cost(0);
 5997   format %{ "[$reg, $off]" %}
 5998   interface(MEMORY_INTER) %{
 5999     base($reg);
 6000     index(0xffffffff);
 6001     scale(0x0);
 6002     disp($off);
 6003   %}
 6004 %}
 6005 
 6006 //----------Special Memory Operands--------------------------------------------
 6007 // Stack Slot Operand - This operand is used for loading and storing temporary
 6008 //                      values on the stack where a match requires a value to
 6009 //                      flow through memory.
 6010 operand stackSlotP(sRegP reg)
 6011 %{
 6012   constraint(ALLOC_IN_RC(stack_slots));
 6013   op_cost(100);
 6014   // No match rule because this operand is only generated in matching
 6015   // match(RegP);
 6016   format %{ "[$reg]" %}
 6017   interface(MEMORY_INTER) %{
 6018     base(0x1e);  // RSP
 6019     index(0x0);  // No Index
 6020     scale(0x0);  // No Scale
 6021     disp($reg);  // Stack Offset
 6022   %}
 6023 %}
 6024 
 6025 operand stackSlotI(sRegI reg)
 6026 %{
 6027   constraint(ALLOC_IN_RC(stack_slots));
 6028   // No match rule because this operand is only generated in matching
 6029   // match(RegI);
 6030   format %{ "[$reg]" %}
 6031   interface(MEMORY_INTER) %{
 6032     base(0x1e);  // RSP
 6033     index(0x0);  // No Index
 6034     scale(0x0);  // No Scale
 6035     disp($reg);  // Stack Offset
 6036   %}
 6037 %}
 6038 
 6039 operand stackSlotF(sRegF reg)
 6040 %{
 6041   constraint(ALLOC_IN_RC(stack_slots));
 6042   // No match rule because this operand is only generated in matching
 6043   // match(RegF);
 6044   format %{ "[$reg]" %}
 6045   interface(MEMORY_INTER) %{
 6046     base(0x1e);  // RSP
 6047     index(0x0);  // No Index
 6048     scale(0x0);  // No Scale
 6049     disp($reg);  // Stack Offset
 6050   %}
 6051 %}
 6052 
 6053 operand stackSlotD(sRegD reg)
 6054 %{
 6055   constraint(ALLOC_IN_RC(stack_slots));
 6056   // No match rule because this operand is only generated in matching
 6057   // match(RegD);
 6058   format %{ "[$reg]" %}
 6059   interface(MEMORY_INTER) %{
 6060     base(0x1e);  // RSP
 6061     index(0x0);  // No Index
 6062     scale(0x0);  // No Scale
 6063     disp($reg);  // Stack Offset
 6064   %}
 6065 %}
 6066 
 6067 operand stackSlotL(sRegL reg)
 6068 %{
 6069   constraint(ALLOC_IN_RC(stack_slots));
 6070   // No match rule because this operand is only generated in matching
 6071   // match(RegL);
 6072   format %{ "[$reg]" %}
 6073   interface(MEMORY_INTER) %{
 6074     base(0x1e);  // RSP
 6075     index(0x0);  // No Index
 6076     scale(0x0);  // No Scale
 6077     disp($reg);  // Stack Offset
 6078   %}
 6079 %}
 6080 
 6081 // Operands for expressing Control Flow
 6082 // NOTE: Label is a predefined operand which should not be redefined in
 6083 //       the AD file. It is generically handled within the ADLC.
 6084 
 6085 //----------Conditional Branch Operands----------------------------------------
 6086 // Comparison Op  - This is the operation of the comparison, and is limited to
 6087 //                  the following set of codes:
 6088 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
 6089 //
 6090 // Other attributes of the comparison, such as unsignedness, are specified
 6091 // by the comparison instruction that sets a condition code flags register.
 6092 // That result is represented by a flags operand whose subtype is appropriate
 6093 // to the unsignedness (etc.) of the comparison.
 6094 //
 6095 // Later, the instruction which matches both the Comparison Op (a Bool) and
 6096 // the flags (produced by the Cmp) specifies the coding of the comparison op
 6097 // by matching a specific subtype of Bool operand below, such as cmpOpU.
 6098 
 6099 // used for signed integral comparisons and fp comparisons
 6100 
 6101 operand cmpOp()
 6102 %{
 6103   match(Bool);
 6104 
 6105   format %{ "" %}
 6106   interface(COND_INTER) %{
 6107     equal(0x0, "eq");
 6108     not_equal(0x1, "ne");
 6109     less(0xb, "lt");
 6110     greater_equal(0xa, "ge");
 6111     less_equal(0xd, "le");
 6112     greater(0xc, "gt");
 6113     overflow(0x6, "vs");
 6114     no_overflow(0x7, "vc");
 6115   %}
 6116 %}
 6117 
 6118 // used for unsigned integral comparisons
 6119 
 6120 operand cmpOpU()
 6121 %{
 6122   match(Bool);
 6123 
 6124   format %{ "" %}
 6125   interface(COND_INTER) %{
 6126     equal(0x0, "eq");
 6127     not_equal(0x1, "ne");
 6128     less(0x3, "lo");
 6129     greater_equal(0x2, "hs");
 6130     less_equal(0x9, "ls");
 6131     greater(0x8, "hi");
 6132     overflow(0x6, "vs");
 6133     no_overflow(0x7, "vc");
 6134   %}
 6135 %}
 6136 
 6137 // used for certain integral comparisons which can be
 6138 // converted to cbxx or tbxx instructions
 6139 
 6140 operand cmpOpEqNe()
 6141 %{
 6142   match(Bool);
 6143   op_cost(0);
 6144   predicate(n->as_Bool()->_test._test == BoolTest::ne
 6145             || n->as_Bool()->_test._test == BoolTest::eq);
 6146 
 6147   format %{ "" %}
 6148   interface(COND_INTER) %{
 6149     equal(0x0, "eq");
 6150     not_equal(0x1, "ne");
 6151     less(0xb, "lt");
 6152     greater_equal(0xa, "ge");
 6153     less_equal(0xd, "le");
 6154     greater(0xc, "gt");
 6155     overflow(0x6, "vs");
 6156     no_overflow(0x7, "vc");
 6157   %}
 6158 %}
 6159 
 6160 // used for certain integral comparisons which can be
 6161 // converted to cbxx or tbxx instructions
 6162 
 6163 operand cmpOpLtGe()
 6164 %{
 6165   match(Bool);
 6166   op_cost(0);
 6167 
 6168   predicate(n->as_Bool()->_test._test == BoolTest::lt
 6169             || n->as_Bool()->_test._test == BoolTest::ge);
 6170 
 6171   format %{ "" %}
 6172   interface(COND_INTER) %{
 6173     equal(0x0, "eq");
 6174     not_equal(0x1, "ne");
 6175     less(0xb, "lt");
 6176     greater_equal(0xa, "ge");
 6177     less_equal(0xd, "le");
 6178     greater(0xc, "gt");
 6179     overflow(0x6, "vs");
 6180     no_overflow(0x7, "vc");
 6181   %}
 6182 %}
 6183 
 6184 // used for certain unsigned integral comparisons which can be
 6185 // converted to cbxx or tbxx instructions
 6186 
 6187 operand cmpOpUEqNeLtGe()
 6188 %{
 6189   match(Bool);
 6190   op_cost(0);
 6191 
 6192   predicate(n->as_Bool()->_test._test == BoolTest::eq
 6193             || n->as_Bool()->_test._test == BoolTest::ne
 6194             || n->as_Bool()->_test._test == BoolTest::lt
 6195             || n->as_Bool()->_test._test == BoolTest::ge);
 6196 
 6197   format %{ "" %}
 6198   interface(COND_INTER) %{
 6199     equal(0x0, "eq");
 6200     not_equal(0x1, "ne");
 6201     less(0xb, "lt");
 6202     greater_equal(0xa, "ge");
 6203     less_equal(0xd, "le");
 6204     greater(0xc, "gt");
 6205     overflow(0x6, "vs");
 6206     no_overflow(0x7, "vc");
 6207   %}
 6208 %}
 6209 
 6210 // Special operand allowing long args to int ops to be truncated for free
 6211 
 6212 operand iRegL2I(iRegL reg) %{
 6213 
 6214   op_cost(0);
 6215 
 6216   match(ConvL2I reg);
 6217 
 6218   format %{ "l2i($reg)" %}
 6219 
 6220   interface(REG_INTER)
 6221 %}
 6222 
 6223 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
 6224 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
 6225 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
 6226 
 6227 //----------OPERAND CLASSES----------------------------------------------------
 6228 // Operand Classes are groups of operands that are used as to simplify
 6229 // instruction definitions by not requiring the AD writer to specify
 6230 // separate instructions for every form of operand when the
 6231 // instruction accepts multiple operand types with the same basic
 6232 // encoding and format. The classic case of this is memory operands.
 6233 
 6234 // memory is used to define read/write location for load/store
 6235 // instruction defs. we can turn a memory op into an Address
 6236 
 6237 opclass memory1(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI1, indOffL1,
 6238                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN);
 6239 
 6240 opclass memory2(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI2, indOffL2,
 6241                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN);
 6242 
 6243 opclass memory4(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI4, indOffL4,
 6244                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
 6245 
 6246 opclass memory8(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI8, indOffL8,
 6247                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
 6248 
 6249 // All of the memory operands. For the pipeline description.
 6250 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex,
 6251                indOffI1, indOffL1, indOffI2, indOffL2, indOffI4, indOffL4, indOffI8, indOffL8,
 6252                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
 6253 
 6254 
 6255 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
 6256 // operations. it allows the src to be either an iRegI or a (ConvL2I
 6257 // iRegL). in the latter case the l2i normally planted for a ConvL2I
 6258 // can be elided because the 32-bit instruction will just employ the
 6259 // lower 32 bits anyway.
 6260 //
 6261 // n.b. this does not elide all L2I conversions. if the truncated
 6262 // value is consumed by more than one operation then the ConvL2I
 6263 // cannot be bundled into the consuming nodes so an l2i gets planted
 6264 // (actually a movw $dst $src) and the downstream instructions consume
 6265 // the result of the l2i as an iRegI input. That's a shame since the
 6266 // movw is actually redundant but its not too costly.
 6267 
 6268 opclass iRegIorL2I(iRegI, iRegL2I);
 6269 
 6270 //----------PIPELINE-----------------------------------------------------------
 6271 // Rules which define the behavior of the target architectures pipeline.
 6272 
 6273 // For specific pipelines, eg A53, define the stages of that pipeline
 6274 //pipe_desc(ISS, EX1, EX2, WR);
 6275 #define ISS S0
 6276 #define EX1 S1
 6277 #define EX2 S2
 6278 #define WR  S3
 6279 
 6280 // Integer ALU reg operation
 6281 pipeline %{
 6282 
 6283 attributes %{
 6284   // ARM instructions are of fixed length
 6285   fixed_size_instructions;        // Fixed size instructions TODO does
 6286   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
 6287   // ARM instructions come in 32-bit word units
 6288   instruction_unit_size = 4;         // An instruction is 4 bytes long
 6289   instruction_fetch_unit_size = 64;  // The processor fetches one line
 6290   instruction_fetch_units = 1;       // of 64 bytes
 6291 
 6292   // List of nop instructions
 6293   nops( MachNop );
 6294 %}
 6295 
 6296 // We don't use an actual pipeline model so don't care about resources
 6297 // or description. we do use pipeline classes to introduce fixed
 6298 // latencies
 6299 
 6300 //----------RESOURCES----------------------------------------------------------
 6301 // Resources are the functional units available to the machine
 6302 
 6303 resources( INS0, INS1, INS01 = INS0 | INS1,
 6304            ALU0, ALU1, ALU = ALU0 | ALU1,
 6305            MAC,
 6306            DIV,
 6307            BRANCH,
 6308            LDST,
 6309            NEON_FP);
 6310 
 6311 //----------PIPELINE DESCRIPTION-----------------------------------------------
 6312 // Pipeline Description specifies the stages in the machine's pipeline
 6313 
 6314 // Define the pipeline as a generic 6 stage pipeline
 6315 pipe_desc(S0, S1, S2, S3, S4, S5);
 6316 
 6317 //----------PIPELINE CLASSES---------------------------------------------------
 6318 // Pipeline Classes describe the stages in which input and output are
 6319 // referenced by the hardware pipeline.
 6320 
 6321 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
 6322 %{
 6323   single_instruction;
 6324   src1   : S1(read);
 6325   src2   : S2(read);
 6326   dst    : S5(write);
 6327   INS01  : ISS;
 6328   NEON_FP : S5;
 6329 %}
 6330 
 6331 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
 6332 %{
 6333   single_instruction;
 6334   src1   : S1(read);
 6335   src2   : S2(read);
 6336   dst    : S5(write);
 6337   INS01  : ISS;
 6338   NEON_FP : S5;
 6339 %}
 6340 
 6341 pipe_class fp_uop_s(vRegF dst, vRegF src)
 6342 %{
 6343   single_instruction;
 6344   src    : S1(read);
 6345   dst    : S5(write);
 6346   INS01  : ISS;
 6347   NEON_FP : S5;
 6348 %}
 6349 
 6350 pipe_class fp_uop_d(vRegD dst, vRegD src)
 6351 %{
 6352   single_instruction;
 6353   src    : S1(read);
 6354   dst    : S5(write);
 6355   INS01  : ISS;
 6356   NEON_FP : S5;
 6357 %}
 6358 
 6359 pipe_class fp_d2f(vRegF dst, vRegD src)
 6360 %{
 6361   single_instruction;
 6362   src    : S1(read);
 6363   dst    : S5(write);
 6364   INS01  : ISS;
 6365   NEON_FP : S5;
 6366 %}
 6367 
 6368 pipe_class fp_f2d(vRegD dst, vRegF src)
 6369 %{
 6370   single_instruction;
 6371   src    : S1(read);
 6372   dst    : S5(write);
 6373   INS01  : ISS;
 6374   NEON_FP : S5;
 6375 %}
 6376 
 6377 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
 6378 %{
 6379   single_instruction;
 6380   src    : S1(read);
 6381   dst    : S5(write);
 6382   INS01  : ISS;
 6383   NEON_FP : S5;
 6384 %}
 6385 
 6386 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
 6387 %{
 6388   single_instruction;
 6389   src    : S1(read);
 6390   dst    : S5(write);
 6391   INS01  : ISS;
 6392   NEON_FP : S5;
 6393 %}
 6394 
 6395 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
 6396 %{
 6397   single_instruction;
 6398   src    : S1(read);
 6399   dst    : S5(write);
 6400   INS01  : ISS;
 6401   NEON_FP : S5;
 6402 %}
 6403 
 6404 pipe_class fp_l2f(vRegF dst, iRegL src)
 6405 %{
 6406   single_instruction;
 6407   src    : S1(read);
 6408   dst    : S5(write);
 6409   INS01  : ISS;
 6410   NEON_FP : S5;
 6411 %}
 6412 
 6413 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
 6414 %{
 6415   single_instruction;
 6416   src    : S1(read);
 6417   dst    : S5(write);
 6418   INS01  : ISS;
 6419   NEON_FP : S5;
 6420 %}
 6421 
 6422 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
 6423 %{
 6424   single_instruction;
 6425   src    : S1(read);
 6426   dst    : S5(write);
 6427   INS01  : ISS;
 6428   NEON_FP : S5;
 6429 %}
 6430 
 6431 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
 6432 %{
 6433   single_instruction;
 6434   src    : S1(read);
 6435   dst    : S5(write);
 6436   INS01  : ISS;
 6437   NEON_FP : S5;
 6438 %}
 6439 
 6440 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
 6441 %{
 6442   single_instruction;
 6443   src    : S1(read);
 6444   dst    : S5(write);
 6445   INS01  : ISS;
 6446   NEON_FP : S5;
 6447 %}
 6448 
 6449 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
 6450 %{
 6451   single_instruction;
 6452   src1   : S1(read);
 6453   src2   : S2(read);
 6454   dst    : S5(write);
 6455   INS0   : ISS;
 6456   NEON_FP : S5;
 6457 %}
 6458 
 6459 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
 6460 %{
 6461   single_instruction;
 6462   src1   : S1(read);
 6463   src2   : S2(read);
 6464   dst    : S5(write);
 6465   INS0   : ISS;
 6466   NEON_FP : S5;
 6467 %}
 6468 
 6469 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
 6470 %{
 6471   single_instruction;
 6472   cr     : S1(read);
 6473   src1   : S1(read);
 6474   src2   : S1(read);
 6475   dst    : S3(write);
 6476   INS01  : ISS;
 6477   NEON_FP : S3;
 6478 %}
 6479 
 6480 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
 6481 %{
 6482   single_instruction;
 6483   cr     : S1(read);
 6484   src1   : S1(read);
 6485   src2   : S1(read);
 6486   dst    : S3(write);
 6487   INS01  : ISS;
 6488   NEON_FP : S3;
 6489 %}
 6490 
 6491 pipe_class fp_imm_s(vRegF dst)
 6492 %{
 6493   single_instruction;
 6494   dst    : S3(write);
 6495   INS01  : ISS;
 6496   NEON_FP : S3;
 6497 %}
 6498 
 6499 pipe_class fp_imm_d(vRegD dst)
 6500 %{
 6501   single_instruction;
 6502   dst    : S3(write);
 6503   INS01  : ISS;
 6504   NEON_FP : S3;
 6505 %}
 6506 
 6507 pipe_class fp_load_constant_s(vRegF dst)
 6508 %{
 6509   single_instruction;
 6510   dst    : S4(write);
 6511   INS01  : ISS;
 6512   NEON_FP : S4;
 6513 %}
 6514 
 6515 pipe_class fp_load_constant_d(vRegD dst)
 6516 %{
 6517   single_instruction;
 6518   dst    : S4(write);
 6519   INS01  : ISS;
 6520   NEON_FP : S4;
 6521 %}
 6522 
 6523 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
 6524 %{
 6525   single_instruction;
 6526   dst    : S5(write);
 6527   src1   : S1(read);
 6528   src2   : S1(read);
 6529   INS01  : ISS;
 6530   NEON_FP : S5;
 6531 %}
 6532 
 6533 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
 6534 %{
 6535   single_instruction;
 6536   dst    : S5(write);
 6537   src1   : S1(read);
 6538   src2   : S1(read);
 6539   INS0   : ISS;
 6540   NEON_FP : S5;
 6541 %}
 6542 
 6543 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
 6544 %{
 6545   single_instruction;
 6546   dst    : S5(write);
 6547   src1   : S1(read);
 6548   src2   : S1(read);
 6549   dst    : S1(read);
 6550   INS01  : ISS;
 6551   NEON_FP : S5;
 6552 %}
 6553 
 6554 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
 6555 %{
 6556   single_instruction;
 6557   dst    : S5(write);
 6558   src1   : S1(read);
 6559   src2   : S1(read);
 6560   dst    : S1(read);
 6561   INS0   : ISS;
 6562   NEON_FP : S5;
 6563 %}
 6564 
 6565 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
 6566 %{
 6567   single_instruction;
 6568   dst    : S4(write);
 6569   src1   : S2(read);
 6570   src2   : S2(read);
 6571   INS01  : ISS;
 6572   NEON_FP : S4;
 6573 %}
 6574 
 6575 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
 6576 %{
 6577   single_instruction;
 6578   dst    : S4(write);
 6579   src1   : S2(read);
 6580   src2   : S2(read);
 6581   INS0   : ISS;
 6582   NEON_FP : S4;
 6583 %}
 6584 
 6585 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
 6586 %{
 6587   single_instruction;
 6588   dst    : S3(write);
 6589   src1   : S2(read);
 6590   src2   : S2(read);
 6591   INS01  : ISS;
 6592   NEON_FP : S3;
 6593 %}
 6594 
 6595 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
 6596 %{
 6597   single_instruction;
 6598   dst    : S3(write);
 6599   src1   : S2(read);
 6600   src2   : S2(read);
 6601   INS0   : ISS;
 6602   NEON_FP : S3;
 6603 %}
 6604 
 6605 pipe_class vshift64(vecD dst, vecD src, vecX shift)
 6606 %{
 6607   single_instruction;
 6608   dst    : S3(write);
 6609   src    : S1(read);
 6610   shift  : S1(read);
 6611   INS01  : ISS;
 6612   NEON_FP : S3;
 6613 %}
 6614 
 6615 pipe_class vshift128(vecX dst, vecX src, vecX shift)
 6616 %{
 6617   single_instruction;
 6618   dst    : S3(write);
 6619   src    : S1(read);
 6620   shift  : S1(read);
 6621   INS0   : ISS;
 6622   NEON_FP : S3;
 6623 %}
 6624 
 6625 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
 6626 %{
 6627   single_instruction;
 6628   dst    : S3(write);
 6629   src    : S1(read);
 6630   INS01  : ISS;
 6631   NEON_FP : S3;
 6632 %}
 6633 
 6634 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
 6635 %{
 6636   single_instruction;
 6637   dst    : S3(write);
 6638   src    : S1(read);
 6639   INS0   : ISS;
 6640   NEON_FP : S3;
 6641 %}
 6642 
 6643 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
 6644 %{
 6645   single_instruction;
 6646   dst    : S5(write);
 6647   src1   : S1(read);
 6648   src2   : S1(read);
 6649   INS01  : ISS;
 6650   NEON_FP : S5;
 6651 %}
 6652 
 6653 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
 6654 %{
 6655   single_instruction;
 6656   dst    : S5(write);
 6657   src1   : S1(read);
 6658   src2   : S1(read);
 6659   INS0   : ISS;
 6660   NEON_FP : S5;
 6661 %}
 6662 
 6663 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
 6664 %{
 6665   single_instruction;
 6666   dst    : S5(write);
 6667   src1   : S1(read);
 6668   src2   : S1(read);
 6669   INS0   : ISS;
 6670   NEON_FP : S5;
 6671 %}
 6672 
 6673 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
 6674 %{
 6675   single_instruction;
 6676   dst    : S5(write);
 6677   src1   : S1(read);
 6678   src2   : S1(read);
 6679   INS0   : ISS;
 6680   NEON_FP : S5;
 6681 %}
 6682 
 6683 pipe_class vsqrt_fp128(vecX dst, vecX src)
 6684 %{
 6685   single_instruction;
 6686   dst    : S5(write);
 6687   src    : S1(read);
 6688   INS0   : ISS;
 6689   NEON_FP : S5;
 6690 %}
 6691 
 6692 pipe_class vunop_fp64(vecD dst, vecD src)
 6693 %{
 6694   single_instruction;
 6695   dst    : S5(write);
 6696   src    : S1(read);
 6697   INS01  : ISS;
 6698   NEON_FP : S5;
 6699 %}
 6700 
 6701 pipe_class vunop_fp128(vecX dst, vecX src)
 6702 %{
 6703   single_instruction;
 6704   dst    : S5(write);
 6705   src    : S1(read);
 6706   INS0   : ISS;
 6707   NEON_FP : S5;
 6708 %}
 6709 
 6710 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
 6711 %{
 6712   single_instruction;
 6713   dst    : S3(write);
 6714   src    : S1(read);
 6715   INS01  : ISS;
 6716   NEON_FP : S3;
 6717 %}
 6718 
 6719 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
 6720 %{
 6721   single_instruction;
 6722   dst    : S3(write);
 6723   src    : S1(read);
 6724   INS01  : ISS;
 6725   NEON_FP : S3;
 6726 %}
 6727 
 6728 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
 6729 %{
 6730   single_instruction;
 6731   dst    : S3(write);
 6732   src    : S1(read);
 6733   INS01  : ISS;
 6734   NEON_FP : S3;
 6735 %}
 6736 
 6737 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
 6738 %{
 6739   single_instruction;
 6740   dst    : S3(write);
 6741   src    : S1(read);
 6742   INS01  : ISS;
 6743   NEON_FP : S3;
 6744 %}
 6745 
 6746 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
 6747 %{
 6748   single_instruction;
 6749   dst    : S3(write);
 6750   src    : S1(read);
 6751   INS01  : ISS;
 6752   NEON_FP : S3;
 6753 %}
 6754 
 6755 pipe_class vmovi_reg_imm64(vecD dst)
 6756 %{
 6757   single_instruction;
 6758   dst    : S3(write);
 6759   INS01  : ISS;
 6760   NEON_FP : S3;
 6761 %}
 6762 
 6763 pipe_class vmovi_reg_imm128(vecX dst)
 6764 %{
 6765   single_instruction;
 6766   dst    : S3(write);
 6767   INS0   : ISS;
 6768   NEON_FP : S3;
 6769 %}
 6770 
 6771 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
 6772 %{
 6773   single_instruction;
 6774   dst    : S5(write);
 6775   mem    : ISS(read);
 6776   INS01  : ISS;
 6777   NEON_FP : S3;
 6778 %}
 6779 
 6780 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
 6781 %{
 6782   single_instruction;
 6783   dst    : S5(write);
 6784   mem    : ISS(read);
 6785   INS01  : ISS;
 6786   NEON_FP : S3;
 6787 %}
 6788 
 6789 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
 6790 %{
 6791   single_instruction;
 6792   mem    : ISS(read);
 6793   src    : S2(read);
 6794   INS01  : ISS;
 6795   NEON_FP : S3;
 6796 %}
 6797 
 6798 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
 6799 %{
 6800   single_instruction;
 6801   mem    : ISS(read);
 6802   src    : S2(read);
 6803   INS01  : ISS;
 6804   NEON_FP : S3;
 6805 %}
 6806 
 6807 //------- Integer ALU operations --------------------------
 6808 
 6809 // Integer ALU reg-reg operation
 6810 // Operands needed in EX1, result generated in EX2
 6811 // Eg.  ADD     x0, x1, x2
 6812 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6813 %{
 6814   single_instruction;
 6815   dst    : EX2(write);
 6816   src1   : EX1(read);
 6817   src2   : EX1(read);
 6818   INS01  : ISS; // Dual issue as instruction 0 or 1
 6819   ALU    : EX2;
 6820 %}
 6821 
 6822 // Integer ALU reg-reg operation with constant shift
 6823 // Shifted register must be available in LATE_ISS instead of EX1
 6824 // Eg.  ADD     x0, x1, x2, LSL #2
 6825 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
 6826 %{
 6827   single_instruction;
 6828   dst    : EX2(write);
 6829   src1   : EX1(read);
 6830   src2   : ISS(read);
 6831   INS01  : ISS;
 6832   ALU    : EX2;
 6833 %}
 6834 
 6835 // Integer ALU reg operation with constant shift
 6836 // Eg.  LSL     x0, x1, #shift
 6837 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
 6838 %{
 6839   single_instruction;
 6840   dst    : EX2(write);
 6841   src1   : ISS(read);
 6842   INS01  : ISS;
 6843   ALU    : EX2;
 6844 %}
 6845 
 6846 // Integer ALU reg-reg operation with variable shift
 6847 // Both operands must be available in LATE_ISS instead of EX1
 6848 // Result is available in EX1 instead of EX2
 6849 // Eg.  LSLV    x0, x1, x2
 6850 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
 6851 %{
 6852   single_instruction;
 6853   dst    : EX1(write);
 6854   src1   : ISS(read);
 6855   src2   : ISS(read);
 6856   INS01  : ISS;
 6857   ALU    : EX1;
 6858 %}
 6859 
 6860 // Integer ALU reg-reg operation with extract
 6861 // As for _vshift above, but result generated in EX2
 6862 // Eg.  EXTR    x0, x1, x2, #N
 6863 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
 6864 %{
 6865   single_instruction;
 6866   dst    : EX2(write);
 6867   src1   : ISS(read);
 6868   src2   : ISS(read);
 6869   INS1   : ISS; // Can only dual issue as Instruction 1
 6870   ALU    : EX1;
 6871 %}
 6872 
 6873 // Integer ALU reg operation
 6874 // Eg.  NEG     x0, x1
 6875 pipe_class ialu_reg(iRegI dst, iRegI src)
 6876 %{
 6877   single_instruction;
 6878   dst    : EX2(write);
 6879   src    : EX1(read);
 6880   INS01  : ISS;
 6881   ALU    : EX2;
 6882 %}
 6883 
 6884 // Integer ALU reg mmediate operation
 6885 // Eg.  ADD     x0, x1, #N
 6886 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
 6887 %{
 6888   single_instruction;
 6889   dst    : EX2(write);
 6890   src1   : EX1(read);
 6891   INS01  : ISS;
 6892   ALU    : EX2;
 6893 %}
 6894 
 6895 // Integer ALU immediate operation (no source operands)
 6896 // Eg.  MOV     x0, #N
 6897 pipe_class ialu_imm(iRegI dst)
 6898 %{
 6899   single_instruction;
 6900   dst    : EX1(write);
 6901   INS01  : ISS;
 6902   ALU    : EX1;
 6903 %}
 6904 
 6905 //------- Compare operation -------------------------------
 6906 
 6907 // Compare reg-reg
 6908 // Eg.  CMP     x0, x1
 6909 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
 6910 %{
 6911   single_instruction;
 6912 //  fixed_latency(16);
 6913   cr     : EX2(write);
 6914   op1    : EX1(read);
 6915   op2    : EX1(read);
 6916   INS01  : ISS;
 6917   ALU    : EX2;
 6918 %}
 6919 
 6920 // Compare reg-reg
 6921 // Eg.  CMP     x0, #N
 6922 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
 6923 %{
 6924   single_instruction;
 6925 //  fixed_latency(16);
 6926   cr     : EX2(write);
 6927   op1    : EX1(read);
 6928   INS01  : ISS;
 6929   ALU    : EX2;
 6930 %}
 6931 
 6932 //------- Conditional instructions ------------------------
 6933 
 6934 // Conditional no operands
 6935 // Eg.  CSINC   x0, zr, zr, <cond>
 6936 pipe_class icond_none(iRegI dst, rFlagsReg cr)
 6937 %{
 6938   single_instruction;
 6939   cr     : EX1(read);
 6940   dst    : EX2(write);
 6941   INS01  : ISS;
 6942   ALU    : EX2;
 6943 %}
 6944 
 6945 // Conditional 2 operand
 6946 // EG.  CSEL    X0, X1, X2, <cond>
 6947 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
 6948 %{
 6949   single_instruction;
 6950   cr     : EX1(read);
 6951   src1   : EX1(read);
 6952   src2   : EX1(read);
 6953   dst    : EX2(write);
 6954   INS01  : ISS;
 6955   ALU    : EX2;
 6956 %}
 6957 
 6958 // Conditional 2 operand
 6959 // EG.  CSEL    X0, X1, X2, <cond>
 6960 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
 6961 %{
 6962   single_instruction;
 6963   cr     : EX1(read);
 6964   src    : EX1(read);
 6965   dst    : EX2(write);
 6966   INS01  : ISS;
 6967   ALU    : EX2;
 6968 %}
 6969 
 6970 //------- Multiply pipeline operations --------------------
 6971 
 6972 // Multiply reg-reg
 6973 // Eg.  MUL     w0, w1, w2
 6974 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6975 %{
 6976   single_instruction;
 6977   dst    : WR(write);
 6978   src1   : ISS(read);
 6979   src2   : ISS(read);
 6980   INS01  : ISS;
 6981   MAC    : WR;
 6982 %}
 6983 
 6984 // Multiply accumulate
 6985 // Eg.  MADD    w0, w1, w2, w3
 6986 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 6987 %{
 6988   single_instruction;
 6989   dst    : WR(write);
 6990   src1   : ISS(read);
 6991   src2   : ISS(read);
 6992   src3   : ISS(read);
 6993   INS01  : ISS;
 6994   MAC    : WR;
 6995 %}
 6996 
 6997 // Eg.  MUL     w0, w1, w2
 6998 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6999 %{
 7000   single_instruction;
 7001   fixed_latency(3); // Maximum latency for 64 bit mul
 7002   dst    : WR(write);
 7003   src1   : ISS(read);
 7004   src2   : ISS(read);
 7005   INS01  : ISS;
 7006   MAC    : WR;
 7007 %}
 7008 
 7009 // Multiply accumulate
 7010 // Eg.  MADD    w0, w1, w2, w3
 7011 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 7012 %{
 7013   single_instruction;
 7014   fixed_latency(3); // Maximum latency for 64 bit mul
 7015   dst    : WR(write);
 7016   src1   : ISS(read);
 7017   src2   : ISS(read);
 7018   src3   : ISS(read);
 7019   INS01  : ISS;
 7020   MAC    : WR;
 7021 %}
 7022 
 7023 //------- Divide pipeline operations --------------------
 7024 
 7025 // Eg.  SDIV    w0, w1, w2
 7026 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 7027 %{
 7028   single_instruction;
 7029   fixed_latency(8); // Maximum latency for 32 bit divide
 7030   dst    : WR(write);
 7031   src1   : ISS(read);
 7032   src2   : ISS(read);
 7033   INS0   : ISS; // Can only dual issue as instruction 0
 7034   DIV    : WR;
 7035 %}
 7036 
 7037 // Eg.  SDIV    x0, x1, x2
 7038 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 7039 %{
 7040   single_instruction;
 7041   fixed_latency(16); // Maximum latency for 64 bit divide
 7042   dst    : WR(write);
 7043   src1   : ISS(read);
 7044   src2   : ISS(read);
 7045   INS0   : ISS; // Can only dual issue as instruction 0
 7046   DIV    : WR;
 7047 %}
 7048 
 7049 //------- Load pipeline operations ------------------------
 7050 
 7051 // Load - prefetch
 7052 // Eg.  PFRM    <mem>
 7053 pipe_class iload_prefetch(memory mem)
 7054 %{
 7055   single_instruction;
 7056   mem    : ISS(read);
 7057   INS01  : ISS;
 7058   LDST   : WR;
 7059 %}
 7060 
 7061 // Load - reg, mem
 7062 // Eg.  LDR     x0, <mem>
 7063 pipe_class iload_reg_mem(iRegI dst, memory mem)
 7064 %{
 7065   single_instruction;
 7066   dst    : WR(write);
 7067   mem    : ISS(read);
 7068   INS01  : ISS;
 7069   LDST   : WR;
 7070 %}
 7071 
 7072 // Load - reg, reg
 7073 // Eg.  LDR     x0, [sp, x1]
 7074 pipe_class iload_reg_reg(iRegI dst, iRegI src)
 7075 %{
 7076   single_instruction;
 7077   dst    : WR(write);
 7078   src    : ISS(read);
 7079   INS01  : ISS;
 7080   LDST   : WR;
 7081 %}
 7082 
 7083 //------- Store pipeline operations -----------------------
 7084 
 7085 // Store - zr, mem
 7086 // Eg.  STR     zr, <mem>
 7087 pipe_class istore_mem(memory mem)
 7088 %{
 7089   single_instruction;
 7090   mem    : ISS(read);
 7091   INS01  : ISS;
 7092   LDST   : WR;
 7093 %}
 7094 
 7095 // Store - reg, mem
 7096 // Eg.  STR     x0, <mem>
 7097 pipe_class istore_reg_mem(iRegI src, memory mem)
 7098 %{
 7099   single_instruction;
 7100   mem    : ISS(read);
 7101   src    : EX2(read);
 7102   INS01  : ISS;
 7103   LDST   : WR;
 7104 %}
 7105 
 7106 // Store - reg, reg
 7107 // Eg. STR      x0, [sp, x1]
 7108 pipe_class istore_reg_reg(iRegI dst, iRegI src)
 7109 %{
 7110   single_instruction;
 7111   dst    : ISS(read);
 7112   src    : EX2(read);
 7113   INS01  : ISS;
 7114   LDST   : WR;
 7115 %}
 7116 
 7117 //------- Store pipeline operations -----------------------
 7118 
 7119 // Branch
 7120 pipe_class pipe_branch()
 7121 %{
 7122   single_instruction;
 7123   INS01  : ISS;
 7124   BRANCH : EX1;
 7125 %}
 7126 
 7127 // Conditional branch
 7128 pipe_class pipe_branch_cond(rFlagsReg cr)
 7129 %{
 7130   single_instruction;
 7131   cr     : EX1(read);
 7132   INS01  : ISS;
 7133   BRANCH : EX1;
 7134 %}
 7135 
 7136 // Compare & Branch
 7137 // EG.  CBZ/CBNZ
 7138 pipe_class pipe_cmp_branch(iRegI op1)
 7139 %{
 7140   single_instruction;
 7141   op1    : EX1(read);
 7142   INS01  : ISS;
 7143   BRANCH : EX1;
 7144 %}
 7145 
 7146 //------- Synchronisation operations ----------------------
 7147 
 7148 // Any operation requiring serialization.
 7149 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
 7150 pipe_class pipe_serial()
 7151 %{
 7152   single_instruction;
 7153   force_serialization;
 7154   fixed_latency(16);
 7155   INS01  : ISS(2); // Cannot dual issue with any other instruction
 7156   LDST   : WR;
 7157 %}
 7158 
 7159 // Generic big/slow expanded idiom - also serialized
 7160 pipe_class pipe_slow()
 7161 %{
 7162   instruction_count(10);
 7163   multiple_bundles;
 7164   force_serialization;
 7165   fixed_latency(16);
 7166   INS01  : ISS(2); // Cannot dual issue with any other instruction
 7167   LDST   : WR;
 7168 %}
 7169 
 7170 // Empty pipeline class
 7171 pipe_class pipe_class_empty()
 7172 %{
 7173   single_instruction;
 7174   fixed_latency(0);
 7175 %}
 7176 
 7177 // Default pipeline class.
 7178 pipe_class pipe_class_default()
 7179 %{
 7180   single_instruction;
 7181   fixed_latency(2);
 7182 %}
 7183 
 7184 // Pipeline class for compares.
 7185 pipe_class pipe_class_compare()
 7186 %{
 7187   single_instruction;
 7188   fixed_latency(16);
 7189 %}
 7190 
 7191 // Pipeline class for memory operations.
 7192 pipe_class pipe_class_memory()
 7193 %{
 7194   single_instruction;
 7195   fixed_latency(16);
 7196 %}
 7197 
 7198 // Pipeline class for call.
 7199 pipe_class pipe_class_call()
 7200 %{
 7201   single_instruction;
 7202   fixed_latency(100);
 7203 %}
 7204 
 7205 // Define the class for the Nop node.
 7206 define %{
 7207    MachNop = pipe_class_empty;
 7208 %}
 7209 
 7210 %}
 7211 //----------INSTRUCTIONS-------------------------------------------------------
 7212 //
 7213 // match      -- States which machine-independent subtree may be replaced
 7214 //               by this instruction.
 7215 // ins_cost   -- The estimated cost of this instruction is used by instruction
 7216 //               selection to identify a minimum cost tree of machine
 7217 //               instructions that matches a tree of machine-independent
 7218 //               instructions.
 7219 // format     -- A string providing the disassembly for this instruction.
 7220 //               The value of an instruction's operand may be inserted
 7221 //               by referring to it with a '$' prefix.
 7222 // opcode     -- Three instruction opcodes may be provided.  These are referred
 7223 //               to within an encode class as $primary, $secondary, and $tertiary
 7224 //               rrspectively.  The primary opcode is commonly used to
 7225 //               indicate the type of machine instruction, while secondary
 7226 //               and tertiary are often used for prefix options or addressing
 7227 //               modes.
 7228 // ins_encode -- A list of encode classes with parameters. The encode class
 7229 //               name must have been defined in an 'enc_class' specification
 7230 //               in the encode section of the architecture description.
 7231 
 7232 // ============================================================================
 7233 // Memory (Load/Store) Instructions
 7234 
 7235 // Load Instructions
 7236 
 7237 // Load Byte (8 bit signed)
 7238 instruct loadB(iRegINoSp dst, memory1 mem)
 7239 %{
 7240   match(Set dst (LoadB mem));
 7241   predicate(!needs_acquiring_load(n));
 7242 
 7243   ins_cost(4 * INSN_COST);
 7244   format %{ "ldrsbw  $dst, $mem\t# byte" %}
 7245 
 7246   ins_encode(aarch64_enc_ldrsbw(dst, mem));
 7247 
 7248   ins_pipe(iload_reg_mem);
 7249 %}
 7250 
 7251 // Load Byte (8 bit signed) into long
 7252 instruct loadB2L(iRegLNoSp dst, memory1 mem)
 7253 %{
 7254   match(Set dst (ConvI2L (LoadB mem)));
 7255   predicate(!needs_acquiring_load(n->in(1)));
 7256 
 7257   ins_cost(4 * INSN_COST);
 7258   format %{ "ldrsb  $dst, $mem\t# byte" %}
 7259 
 7260   ins_encode(aarch64_enc_ldrsb(dst, mem));
 7261 
 7262   ins_pipe(iload_reg_mem);
 7263 %}
 7264 
 7265 // Load Byte (8 bit unsigned)
 7266 instruct loadUB(iRegINoSp dst, memory1 mem)
 7267 %{
 7268   match(Set dst (LoadUB mem));
 7269   predicate(!needs_acquiring_load(n));
 7270 
 7271   ins_cost(4 * INSN_COST);
 7272   format %{ "ldrbw  $dst, $mem\t# byte" %}
 7273 
 7274   ins_encode(aarch64_enc_ldrb(dst, mem));
 7275 
 7276   ins_pipe(iload_reg_mem);
 7277 %}
 7278 
 7279 // Load Byte (8 bit unsigned) into long
 7280 instruct loadUB2L(iRegLNoSp dst, memory1 mem)
 7281 %{
 7282   match(Set dst (ConvI2L (LoadUB mem)));
 7283   predicate(!needs_acquiring_load(n->in(1)));
 7284 
 7285   ins_cost(4 * INSN_COST);
 7286   format %{ "ldrb  $dst, $mem\t# byte" %}
 7287 
 7288   ins_encode(aarch64_enc_ldrb(dst, mem));
 7289 
 7290   ins_pipe(iload_reg_mem);
 7291 %}
 7292 
 7293 // Load Short (16 bit signed)
 7294 instruct loadS(iRegINoSp dst, memory2 mem)
 7295 %{
 7296   match(Set dst (LoadS mem));
 7297   predicate(!needs_acquiring_load(n));
 7298 
 7299   ins_cost(4 * INSN_COST);
 7300   format %{ "ldrshw  $dst, $mem\t# short" %}
 7301 
 7302   ins_encode(aarch64_enc_ldrshw(dst, mem));
 7303 
 7304   ins_pipe(iload_reg_mem);
 7305 %}
 7306 
 7307 // Load Short (16 bit signed) into long
 7308 instruct loadS2L(iRegLNoSp dst, memory2 mem)
 7309 %{
 7310   match(Set dst (ConvI2L (LoadS mem)));
 7311   predicate(!needs_acquiring_load(n->in(1)));
 7312 
 7313   ins_cost(4 * INSN_COST);
 7314   format %{ "ldrsh  $dst, $mem\t# short" %}
 7315 
 7316   ins_encode(aarch64_enc_ldrsh(dst, mem));
 7317 
 7318   ins_pipe(iload_reg_mem);
 7319 %}
 7320 
 7321 // Load Char (16 bit unsigned)
 7322 instruct loadUS(iRegINoSp dst, memory2 mem)
 7323 %{
 7324   match(Set dst (LoadUS mem));
 7325   predicate(!needs_acquiring_load(n));
 7326 
 7327   ins_cost(4 * INSN_COST);
 7328   format %{ "ldrh  $dst, $mem\t# short" %}
 7329 
 7330   ins_encode(aarch64_enc_ldrh(dst, mem));
 7331 
 7332   ins_pipe(iload_reg_mem);
 7333 %}
 7334 
 7335 // Load Short/Char (16 bit unsigned) into long
 7336 instruct loadUS2L(iRegLNoSp dst, memory2 mem)
 7337 %{
 7338   match(Set dst (ConvI2L (LoadUS mem)));
 7339   predicate(!needs_acquiring_load(n->in(1)));
 7340 
 7341   ins_cost(4 * INSN_COST);
 7342   format %{ "ldrh  $dst, $mem\t# short" %}
 7343 
 7344   ins_encode(aarch64_enc_ldrh(dst, mem));
 7345 
 7346   ins_pipe(iload_reg_mem);
 7347 %}
 7348 
 7349 // Load Integer (32 bit signed)
 7350 instruct loadI(iRegINoSp dst, memory4 mem)
 7351 %{
 7352   match(Set dst (LoadI mem));
 7353   predicate(!needs_acquiring_load(n));
 7354 
 7355   ins_cost(4 * INSN_COST);
 7356   format %{ "ldrw  $dst, $mem\t# int" %}
 7357 
 7358   ins_encode(aarch64_enc_ldrw(dst, mem));
 7359 
 7360   ins_pipe(iload_reg_mem);
 7361 %}
 7362 
 7363 // Load Integer (32 bit signed) into long
 7364 instruct loadI2L(iRegLNoSp dst, memory4 mem)
 7365 %{
 7366   match(Set dst (ConvI2L (LoadI mem)));
 7367   predicate(!needs_acquiring_load(n->in(1)));
 7368 
 7369   ins_cost(4 * INSN_COST);
 7370   format %{ "ldrsw  $dst, $mem\t# int" %}
 7371 
 7372   ins_encode(aarch64_enc_ldrsw(dst, mem));
 7373 
 7374   ins_pipe(iload_reg_mem);
 7375 %}
 7376 
 7377 // Load Integer (32 bit unsigned) into long
 7378 instruct loadUI2L(iRegLNoSp dst, memory4 mem, immL_32bits mask)
 7379 %{
 7380   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 7381   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
 7382 
 7383   ins_cost(4 * INSN_COST);
 7384   format %{ "ldrw  $dst, $mem\t# int" %}
 7385 
 7386   ins_encode(aarch64_enc_ldrw(dst, mem));
 7387 
 7388   ins_pipe(iload_reg_mem);
 7389 %}
 7390 
 7391 // Load Long (64 bit signed)
 7392 instruct loadL(iRegLNoSp dst, memory8 mem)
 7393 %{
 7394   match(Set dst (LoadL mem));
 7395   predicate(!needs_acquiring_load(n));
 7396 
 7397   ins_cost(4 * INSN_COST);
 7398   format %{ "ldr  $dst, $mem\t# int" %}
 7399 
 7400   ins_encode(aarch64_enc_ldr(dst, mem));
 7401 
 7402   ins_pipe(iload_reg_mem);
 7403 %}
 7404 
 7405 // Load Range
 7406 instruct loadRange(iRegINoSp dst, memory4 mem)
 7407 %{
 7408   match(Set dst (LoadRange mem));
 7409 
 7410   ins_cost(4 * INSN_COST);
 7411   format %{ "ldrw  $dst, $mem\t# range" %}
 7412 
 7413   ins_encode(aarch64_enc_ldrw(dst, mem));
 7414 
 7415   ins_pipe(iload_reg_mem);
 7416 %}
 7417 
 7418 // Load Pointer
 7419 instruct loadP(iRegPNoSp dst, memory8 mem)
 7420 %{
 7421   match(Set dst (LoadP mem));
 7422   predicate(!needs_acquiring_load(n) && (n->as_Load()->barrier_data() == 0));
 7423 
 7424   ins_cost(4 * INSN_COST);
 7425   format %{ "ldr  $dst, $mem\t# ptr" %}
 7426 
 7427   ins_encode(aarch64_enc_ldr(dst, mem));
 7428 
 7429   ins_pipe(iload_reg_mem);
 7430 %}
 7431 
 7432 // Load Compressed Pointer
 7433 instruct loadN(iRegNNoSp dst, memory4 mem)
 7434 %{
 7435   match(Set dst (LoadN mem));
 7436   predicate(!needs_acquiring_load(n));
 7437 
 7438   ins_cost(4 * INSN_COST);
 7439   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
 7440 
 7441   ins_encode(aarch64_enc_ldrw(dst, mem));
 7442 
 7443   ins_pipe(iload_reg_mem);
 7444 %}
 7445 
 7446 // Load Klass Pointer
 7447 instruct loadKlass(iRegPNoSp dst, memory8 mem)
 7448 %{
 7449   match(Set dst (LoadKlass mem));
 7450   predicate(!needs_acquiring_load(n));
 7451 
 7452   ins_cost(4 * INSN_COST);
 7453   format %{ "ldr  $dst, $mem\t# class" %}
 7454 
 7455   ins_encode(aarch64_enc_ldr(dst, mem));
 7456 
 7457   ins_pipe(iload_reg_mem);
 7458 %}
 7459 
 7460 // Load Narrow Klass Pointer
 7461 instruct loadNKlass(iRegNNoSp dst, memory4 mem)
 7462 %{
 7463   match(Set dst (LoadNKlass mem));
 7464   predicate(!needs_acquiring_load(n));
 7465 
 7466   ins_cost(4 * INSN_COST);
 7467   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
 7468 
 7469   ins_encode(aarch64_enc_ldrw(dst, mem));
 7470 
 7471   ins_pipe(iload_reg_mem);
 7472 %}
 7473 
 7474 // Load Float
 7475 instruct loadF(vRegF dst, memory4 mem)
 7476 %{
 7477   match(Set dst (LoadF mem));
 7478   predicate(!needs_acquiring_load(n));
 7479 
 7480   ins_cost(4 * INSN_COST);
 7481   format %{ "ldrs  $dst, $mem\t# float" %}
 7482 
 7483   ins_encode( aarch64_enc_ldrs(dst, mem) );
 7484 
 7485   ins_pipe(pipe_class_memory);
 7486 %}
 7487 
 7488 // Load Double
 7489 instruct loadD(vRegD dst, memory8 mem)
 7490 %{
 7491   match(Set dst (LoadD mem));
 7492   predicate(!needs_acquiring_load(n));
 7493 
 7494   ins_cost(4 * INSN_COST);
 7495   format %{ "ldrd  $dst, $mem\t# double" %}
 7496 
 7497   ins_encode( aarch64_enc_ldrd(dst, mem) );
 7498 
 7499   ins_pipe(pipe_class_memory);
 7500 %}
 7501 
 7502 
 7503 // Load Int Constant
 7504 instruct loadConI(iRegINoSp dst, immI src)
 7505 %{
 7506   match(Set dst src);
 7507 
 7508   ins_cost(INSN_COST);
 7509   format %{ "mov $dst, $src\t# int" %}
 7510 
 7511   ins_encode( aarch64_enc_movw_imm(dst, src) );
 7512 
 7513   ins_pipe(ialu_imm);
 7514 %}
 7515 
 7516 // Load Long Constant
 7517 instruct loadConL(iRegLNoSp dst, immL src)
 7518 %{
 7519   match(Set dst src);
 7520 
 7521   ins_cost(INSN_COST);
 7522   format %{ "mov $dst, $src\t# long" %}
 7523 
 7524   ins_encode( aarch64_enc_mov_imm(dst, src) );
 7525 
 7526   ins_pipe(ialu_imm);
 7527 %}
 7528 
 7529 // Load Pointer Constant
 7530 
 7531 instruct loadConP(iRegPNoSp dst, immP con)
 7532 %{
 7533   match(Set dst con);
 7534 
 7535   ins_cost(INSN_COST * 4);
 7536   format %{
 7537     "mov  $dst, $con\t# ptr\n\t"
 7538   %}
 7539 
 7540   ins_encode(aarch64_enc_mov_p(dst, con));
 7541 
 7542   ins_pipe(ialu_imm);
 7543 %}
 7544 
 7545 // Load Null Pointer Constant
 7546 
 7547 instruct loadConP0(iRegPNoSp dst, immP0 con)
 7548 %{
 7549   match(Set dst con);
 7550 
 7551   ins_cost(INSN_COST);
 7552   format %{ "mov  $dst, $con\t# NULL ptr" %}
 7553 
 7554   ins_encode(aarch64_enc_mov_p0(dst, con));
 7555 
 7556   ins_pipe(ialu_imm);
 7557 %}
 7558 
 7559 // Load Pointer Constant One
 7560 
 7561 instruct loadConP1(iRegPNoSp dst, immP_1 con)
 7562 %{
 7563   match(Set dst con);
 7564 
 7565   ins_cost(INSN_COST);
 7566   format %{ "mov  $dst, $con\t# NULL ptr" %}
 7567 
 7568   ins_encode(aarch64_enc_mov_p1(dst, con));
 7569 
 7570   ins_pipe(ialu_imm);
 7571 %}
 7572 
 7573 // Load Byte Map Base Constant
 7574 
 7575 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
 7576 %{
 7577   match(Set dst con);
 7578 
 7579   ins_cost(INSN_COST);
 7580   format %{ "adr  $dst, $con\t# Byte Map Base" %}
 7581 
 7582   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
 7583 
 7584   ins_pipe(ialu_imm);
 7585 %}
 7586 
 7587 // Load Narrow Pointer Constant
 7588 
 7589 instruct loadConN(iRegNNoSp dst, immN con)
 7590 %{
 7591   match(Set dst con);
 7592 
 7593   ins_cost(INSN_COST * 4);
 7594   format %{ "mov  $dst, $con\t# compressed ptr" %}
 7595 
 7596   ins_encode(aarch64_enc_mov_n(dst, con));
 7597 
 7598   ins_pipe(ialu_imm);
 7599 %}
 7600 
 7601 // Load Narrow Null Pointer Constant
 7602 
 7603 instruct loadConN0(iRegNNoSp dst, immN0 con)
 7604 %{
 7605   match(Set dst con);
 7606 
 7607   ins_cost(INSN_COST);
 7608   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
 7609 
 7610   ins_encode(aarch64_enc_mov_n0(dst, con));
 7611 
 7612   ins_pipe(ialu_imm);
 7613 %}
 7614 
 7615 // Load Narrow Klass Constant
 7616 
 7617 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
 7618 %{
 7619   match(Set dst con);
 7620 
 7621   ins_cost(INSN_COST);
 7622   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
 7623 
 7624   ins_encode(aarch64_enc_mov_nk(dst, con));
 7625 
 7626   ins_pipe(ialu_imm);
 7627 %}
 7628 
 7629 // Load Packed Float Constant
 7630 
 7631 instruct loadConF_packed(vRegF dst, immFPacked con) %{
 7632   match(Set dst con);
 7633   ins_cost(INSN_COST * 4);
 7634   format %{ "fmovs  $dst, $con"%}
 7635   ins_encode %{
 7636     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
 7637   %}
 7638 
 7639   ins_pipe(fp_imm_s);
 7640 %}
 7641 
 7642 // Load Float Constant
 7643 
 7644 instruct loadConF(vRegF dst, immF con) %{
 7645   match(Set dst con);
 7646 
 7647   ins_cost(INSN_COST * 4);
 7648 
 7649   format %{
 7650     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 7651   %}
 7652 
 7653   ins_encode %{
 7654     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
 7655   %}
 7656 
 7657   ins_pipe(fp_load_constant_s);
 7658 %}
 7659 
 7660 // Load Packed Double Constant
 7661 
 7662 instruct loadConD_packed(vRegD dst, immDPacked con) %{
 7663   match(Set dst con);
 7664   ins_cost(INSN_COST);
 7665   format %{ "fmovd  $dst, $con"%}
 7666   ins_encode %{
 7667     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
 7668   %}
 7669 
 7670   ins_pipe(fp_imm_d);
 7671 %}
 7672 
 7673 // Load Double Constant
 7674 
 7675 instruct loadConD(vRegD dst, immD con) %{
 7676   match(Set dst con);
 7677 
 7678   ins_cost(INSN_COST * 5);
 7679   format %{
 7680     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 7681   %}
 7682 
 7683   ins_encode %{
 7684     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
 7685   %}
 7686 
 7687   ins_pipe(fp_load_constant_d);
 7688 %}
 7689 
 7690 // Store Instructions
 7691 
 7692 // Store CMS card-mark Immediate
 7693 instruct storeimmCM0(immI0 zero, memory1 mem)
 7694 %{
 7695   match(Set mem (StoreCM mem zero));
 7696 
 7697   ins_cost(INSN_COST);
 7698   format %{ "storestore (elided)\n\t"
 7699             "strb zr, $mem\t# byte" %}
 7700 
 7701   ins_encode(aarch64_enc_strb0(mem));
 7702 
 7703   ins_pipe(istore_mem);
 7704 %}
 7705 
 7706 // Store CMS card-mark Immediate with intervening StoreStore
 7707 // needed when using CMS with no conditional card marking
 7708 instruct storeimmCM0_ordered(immI0 zero, memory1 mem)
 7709 %{
 7710   match(Set mem (StoreCM mem zero));
 7711 
 7712   ins_cost(INSN_COST * 2);
 7713   format %{ "storestore\n\t"
 7714             "dmb ishst"
 7715             "\n\tstrb zr, $mem\t# byte" %}
 7716 
 7717   ins_encode(aarch64_enc_strb0_ordered(mem));
 7718 
 7719   ins_pipe(istore_mem);
 7720 %}
 7721 
 7722 // Store Byte
 7723 instruct storeB(iRegIorL2I src, memory1 mem)
 7724 %{
 7725   match(Set mem (StoreB mem src));
 7726   predicate(!needs_releasing_store(n));
 7727 
 7728   ins_cost(INSN_COST);
 7729   format %{ "strb  $src, $mem\t# byte" %}
 7730 
 7731   ins_encode(aarch64_enc_strb(src, mem));
 7732 
 7733   ins_pipe(istore_reg_mem);
 7734 %}
 7735 
 7736 
 7737 instruct storeimmB0(immI0 zero, memory1 mem)
 7738 %{
 7739   match(Set mem (StoreB mem zero));
 7740   predicate(!needs_releasing_store(n));
 7741 
 7742   ins_cost(INSN_COST);
 7743   format %{ "strb rscractch2, $mem\t# byte" %}
 7744 
 7745   ins_encode(aarch64_enc_strb0(mem));
 7746 
 7747   ins_pipe(istore_mem);
 7748 %}
 7749 
 7750 // Store Char/Short
 7751 instruct storeC(iRegIorL2I src, memory2 mem)
 7752 %{
 7753   match(Set mem (StoreC mem src));
 7754   predicate(!needs_releasing_store(n));
 7755 
 7756   ins_cost(INSN_COST);
 7757   format %{ "strh  $src, $mem\t# short" %}
 7758 
 7759   ins_encode(aarch64_enc_strh(src, mem));
 7760 
 7761   ins_pipe(istore_reg_mem);
 7762 %}
 7763 
 7764 instruct storeimmC0(immI0 zero, memory2 mem)
 7765 %{
 7766   match(Set mem (StoreC mem zero));
 7767   predicate(!needs_releasing_store(n));
 7768 
 7769   ins_cost(INSN_COST);
 7770   format %{ "strh  zr, $mem\t# short" %}
 7771 
 7772   ins_encode(aarch64_enc_strh0(mem));
 7773 
 7774   ins_pipe(istore_mem);
 7775 %}
 7776 
 7777 // Store Integer
 7778 
 7779 instruct storeI(iRegIorL2I src, memory4 mem)
 7780 %{
 7781   match(Set mem(StoreI mem src));
 7782   predicate(!needs_releasing_store(n));
 7783 
 7784   ins_cost(INSN_COST);
 7785   format %{ "strw  $src, $mem\t# int" %}
 7786 
 7787   ins_encode(aarch64_enc_strw(src, mem));
 7788 
 7789   ins_pipe(istore_reg_mem);
 7790 %}
 7791 
 7792 instruct storeimmI0(immI0 zero, memory4 mem)
 7793 %{
 7794   match(Set mem(StoreI mem zero));
 7795   predicate(!needs_releasing_store(n));
 7796 
 7797   ins_cost(INSN_COST);
 7798   format %{ "strw  zr, $mem\t# int" %}
 7799 
 7800   ins_encode(aarch64_enc_strw0(mem));
 7801 
 7802   ins_pipe(istore_mem);
 7803 %}
 7804 
 7805 // Store Long (64 bit signed)
 7806 instruct storeL(iRegL src, memory8 mem)
 7807 %{
 7808   match(Set mem (StoreL mem src));
 7809   predicate(!needs_releasing_store(n));
 7810 
 7811   ins_cost(INSN_COST);
 7812   format %{ "str  $src, $mem\t# int" %}
 7813 
 7814   ins_encode(aarch64_enc_str(src, mem));
 7815 
 7816   ins_pipe(istore_reg_mem);
 7817 %}
 7818 
 7819 // Store Long (64 bit signed)
 7820 instruct storeimmL0(immL0 zero, memory8 mem)
 7821 %{
 7822   match(Set mem (StoreL mem zero));
 7823   predicate(!needs_releasing_store(n));
 7824 
 7825   ins_cost(INSN_COST);
 7826   format %{ "str  zr, $mem\t# int" %}
 7827 
 7828   ins_encode(aarch64_enc_str0(mem));
 7829 
 7830   ins_pipe(istore_mem);
 7831 %}
 7832 
 7833 // Store Pointer
 7834 instruct storeP(iRegP src, memory8 mem)
 7835 %{
 7836   match(Set mem (StoreP mem src));
 7837   predicate(!needs_releasing_store(n));
 7838 
 7839   ins_cost(INSN_COST);
 7840   format %{ "str  $src, $mem\t# ptr" %}
 7841 
 7842   ins_encode(aarch64_enc_str(src, mem));
 7843 
 7844   ins_pipe(istore_reg_mem);
 7845 %}
 7846 
 7847 // Store Pointer
 7848 instruct storeimmP0(immP0 zero, memory8 mem)
 7849 %{
 7850   match(Set mem (StoreP mem zero));
 7851   predicate(!needs_releasing_store(n));
 7852 
 7853   ins_cost(INSN_COST);
 7854   format %{ "str zr, $mem\t# ptr" %}
 7855 
 7856   ins_encode(aarch64_enc_str0(mem));
 7857 
 7858   ins_pipe(istore_mem);
 7859 %}
 7860 
 7861 // Store Compressed Pointer
 7862 instruct storeN(iRegN src, memory4 mem)
 7863 %{
 7864   match(Set mem (StoreN mem src));
 7865   predicate(!needs_releasing_store(n));
 7866 
 7867   ins_cost(INSN_COST);
 7868   format %{ "strw  $src, $mem\t# compressed ptr" %}
 7869 
 7870   ins_encode(aarch64_enc_strw(src, mem));
 7871 
 7872   ins_pipe(istore_reg_mem);
 7873 %}
 7874 
 7875 instruct storeImmN0(immN0 zero, memory4 mem)
 7876 %{
 7877   match(Set mem (StoreN mem zero));
 7878   predicate(!needs_releasing_store(n));
 7879 
 7880   ins_cost(INSN_COST);
 7881   format %{ "strw  zr, $mem\t# compressed ptr" %}
 7882 
 7883   ins_encode(aarch64_enc_strw0(mem));
 7884 
 7885   ins_pipe(istore_mem);
 7886 %}
 7887 
 7888 // Store Float
 7889 instruct storeF(vRegF src, memory4 mem)
 7890 %{
 7891   match(Set mem (StoreF mem src));
 7892   predicate(!needs_releasing_store(n));
 7893 
 7894   ins_cost(INSN_COST);
 7895   format %{ "strs  $src, $mem\t# float" %}
 7896 
 7897   ins_encode( aarch64_enc_strs(src, mem) );
 7898 
 7899   ins_pipe(pipe_class_memory);
 7900 %}
 7901 
 7902 // TODO
 7903 // implement storeImmF0 and storeFImmPacked
 7904 
 7905 // Store Double
 7906 instruct storeD(vRegD src, memory8 mem)
 7907 %{
 7908   match(Set mem (StoreD mem src));
 7909   predicate(!needs_releasing_store(n));
 7910 
 7911   ins_cost(INSN_COST);
 7912   format %{ "strd  $src, $mem\t# double" %}
 7913 
 7914   ins_encode( aarch64_enc_strd(src, mem) );
 7915 
 7916   ins_pipe(pipe_class_memory);
 7917 %}
 7918 
 7919 // Store Compressed Klass Pointer
 7920 instruct storeNKlass(iRegN src, memory4 mem)
 7921 %{
 7922   predicate(!needs_releasing_store(n));
 7923   match(Set mem (StoreNKlass mem src));
 7924 
 7925   ins_cost(INSN_COST);
 7926   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
 7927 
 7928   ins_encode(aarch64_enc_strw(src, mem));
 7929 
 7930   ins_pipe(istore_reg_mem);
 7931 %}
 7932 
 7933 // TODO
 7934 // implement storeImmD0 and storeDImmPacked
 7935 
 7936 // prefetch instructions
 7937 // Must be safe to execute with invalid address (cannot fault).
 7938 
 7939 instruct prefetchalloc( memory8 mem ) %{
 7940   match(PrefetchAllocation mem);
 7941 
 7942   ins_cost(INSN_COST);
 7943   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
 7944 
 7945   ins_encode( aarch64_enc_prefetchw(mem) );
 7946 
 7947   ins_pipe(iload_prefetch);
 7948 %}
 7949 
 7950 //  ---------------- volatile loads and stores ----------------
 7951 
 7952 // Load Byte (8 bit signed)
 7953 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7954 %{
 7955   match(Set dst (LoadB mem));
 7956 
 7957   ins_cost(VOLATILE_REF_COST);
 7958   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7959 
 7960   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7961 
 7962   ins_pipe(pipe_serial);
 7963 %}
 7964 
 7965 // Load Byte (8 bit signed) into long
 7966 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7967 %{
 7968   match(Set dst (ConvI2L (LoadB mem)));
 7969 
 7970   ins_cost(VOLATILE_REF_COST);
 7971   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7972 
 7973   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7974 
 7975   ins_pipe(pipe_serial);
 7976 %}
 7977 
 7978 // Load Byte (8 bit unsigned)
 7979 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7980 %{
 7981   match(Set dst (LoadUB mem));
 7982 
 7983   ins_cost(VOLATILE_REF_COST);
 7984   format %{ "ldarb  $dst, $mem\t# byte" %}
 7985 
 7986   ins_encode(aarch64_enc_ldarb(dst, mem));
 7987 
 7988   ins_pipe(pipe_serial);
 7989 %}
 7990 
 7991 // Load Byte (8 bit unsigned) into long
 7992 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7993 %{
 7994   match(Set dst (ConvI2L (LoadUB mem)));
 7995 
 7996   ins_cost(VOLATILE_REF_COST);
 7997   format %{ "ldarb  $dst, $mem\t# byte" %}
 7998 
 7999   ins_encode(aarch64_enc_ldarb(dst, mem));
 8000 
 8001   ins_pipe(pipe_serial);
 8002 %}
 8003 
 8004 // Load Short (16 bit signed)
 8005 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 8006 %{
 8007   match(Set dst (LoadS mem));
 8008 
 8009   ins_cost(VOLATILE_REF_COST);
 8010   format %{ "ldarshw  $dst, $mem\t# short" %}
 8011 
 8012   ins_encode(aarch64_enc_ldarshw(dst, mem));
 8013 
 8014   ins_pipe(pipe_serial);
 8015 %}
 8016 
 8017 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 8018 %{
 8019   match(Set dst (LoadUS mem));
 8020 
 8021   ins_cost(VOLATILE_REF_COST);
 8022   format %{ "ldarhw  $dst, $mem\t# short" %}
 8023 
 8024   ins_encode(aarch64_enc_ldarhw(dst, mem));
 8025 
 8026   ins_pipe(pipe_serial);
 8027 %}
 8028 
 8029 // Load Short/Char (16 bit unsigned) into long
 8030 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 8031 %{
 8032   match(Set dst (ConvI2L (LoadUS mem)));
 8033 
 8034   ins_cost(VOLATILE_REF_COST);
 8035   format %{ "ldarh  $dst, $mem\t# short" %}
 8036 
 8037   ins_encode(aarch64_enc_ldarh(dst, mem));
 8038 
 8039   ins_pipe(pipe_serial);
 8040 %}
 8041 
 8042 // Load Short/Char (16 bit signed) into long
 8043 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 8044 %{
 8045   match(Set dst (ConvI2L (LoadS mem)));
 8046 
 8047   ins_cost(VOLATILE_REF_COST);
 8048   format %{ "ldarh  $dst, $mem\t# short" %}
 8049 
 8050   ins_encode(aarch64_enc_ldarsh(dst, mem));
 8051 
 8052   ins_pipe(pipe_serial);
 8053 %}
 8054 
 8055 // Load Integer (32 bit signed)
 8056 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 8057 %{
 8058   match(Set dst (LoadI mem));
 8059 
 8060   ins_cost(VOLATILE_REF_COST);
 8061   format %{ "ldarw  $dst, $mem\t# int" %}
 8062 
 8063   ins_encode(aarch64_enc_ldarw(dst, mem));
 8064 
 8065   ins_pipe(pipe_serial);
 8066 %}
 8067 
 8068 // Load Integer (32 bit unsigned) into long
 8069 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
 8070 %{
 8071   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 8072 
 8073   ins_cost(VOLATILE_REF_COST);
 8074   format %{ "ldarw  $dst, $mem\t# int" %}
 8075 
 8076   ins_encode(aarch64_enc_ldarw(dst, mem));
 8077 
 8078   ins_pipe(pipe_serial);
 8079 %}
 8080 
 8081 // Load Long (64 bit signed)
 8082 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 8083 %{
 8084   match(Set dst (LoadL mem));
 8085 
 8086   ins_cost(VOLATILE_REF_COST);
 8087   format %{ "ldar  $dst, $mem\t# int" %}
 8088 
 8089   ins_encode(aarch64_enc_ldar(dst, mem));
 8090 
 8091   ins_pipe(pipe_serial);
 8092 %}
 8093 
 8094 // Load Pointer
 8095 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
 8096 %{
 8097   match(Set dst (LoadP mem));
 8098   predicate(n->as_Load()->barrier_data() == 0);
 8099 
 8100   ins_cost(VOLATILE_REF_COST);
 8101   format %{ "ldar  $dst, $mem\t# ptr" %}
 8102 
 8103   ins_encode(aarch64_enc_ldar(dst, mem));
 8104 
 8105   ins_pipe(pipe_serial);
 8106 %}
 8107 
 8108 // Load Compressed Pointer
 8109 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
 8110 %{
 8111   match(Set dst (LoadN mem));
 8112 
 8113   ins_cost(VOLATILE_REF_COST);
 8114   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
 8115 
 8116   ins_encode(aarch64_enc_ldarw(dst, mem));
 8117 
 8118   ins_pipe(pipe_serial);
 8119 %}
 8120 
 8121 // Load Float
 8122 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
 8123 %{
 8124   match(Set dst (LoadF mem));
 8125 
 8126   ins_cost(VOLATILE_REF_COST);
 8127   format %{ "ldars  $dst, $mem\t# float" %}
 8128 
 8129   ins_encode( aarch64_enc_fldars(dst, mem) );
 8130 
 8131   ins_pipe(pipe_serial);
 8132 %}
 8133 
 8134 // Load Double
 8135 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
 8136 %{
 8137   match(Set dst (LoadD mem));
 8138 
 8139   ins_cost(VOLATILE_REF_COST);
 8140   format %{ "ldard  $dst, $mem\t# double" %}
 8141 
 8142   ins_encode( aarch64_enc_fldard(dst, mem) );
 8143 
 8144   ins_pipe(pipe_serial);
 8145 %}
 8146 
 8147 // Store Byte
 8148 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 8149 %{
 8150   match(Set mem (StoreB mem src));
 8151 
 8152   ins_cost(VOLATILE_REF_COST);
 8153   format %{ "stlrb  $src, $mem\t# byte" %}
 8154 
 8155   ins_encode(aarch64_enc_stlrb(src, mem));
 8156 
 8157   ins_pipe(pipe_class_memory);
 8158 %}
 8159 
 8160 // Store Char/Short
 8161 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 8162 %{
 8163   match(Set mem (StoreC mem src));
 8164 
 8165   ins_cost(VOLATILE_REF_COST);
 8166   format %{ "stlrh  $src, $mem\t# short" %}
 8167 
 8168   ins_encode(aarch64_enc_stlrh(src, mem));
 8169 
 8170   ins_pipe(pipe_class_memory);
 8171 %}
 8172 
 8173 // Store Integer
 8174 
 8175 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 8176 %{
 8177   match(Set mem(StoreI mem src));
 8178 
 8179   ins_cost(VOLATILE_REF_COST);
 8180   format %{ "stlrw  $src, $mem\t# int" %}
 8181 
 8182   ins_encode(aarch64_enc_stlrw(src, mem));
 8183 
 8184   ins_pipe(pipe_class_memory);
 8185 %}
 8186 
 8187 // Store Long (64 bit signed)
 8188 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
 8189 %{
 8190   match(Set mem (StoreL mem src));
 8191 
 8192   ins_cost(VOLATILE_REF_COST);
 8193   format %{ "stlr  $src, $mem\t# int" %}
 8194 
 8195   ins_encode(aarch64_enc_stlr(src, mem));
 8196 
 8197   ins_pipe(pipe_class_memory);
 8198 %}
 8199 
 8200 // Store Pointer
 8201 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
 8202 %{
 8203   match(Set mem (StoreP mem src));
 8204 
 8205   ins_cost(VOLATILE_REF_COST);
 8206   format %{ "stlr  $src, $mem\t# ptr" %}
 8207 
 8208   ins_encode(aarch64_enc_stlr(src, mem));
 8209 
 8210   ins_pipe(pipe_class_memory);
 8211 %}
 8212 
 8213 // Store Compressed Pointer
 8214 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
 8215 %{
 8216   match(Set mem (StoreN mem src));
 8217 
 8218   ins_cost(VOLATILE_REF_COST);
 8219   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
 8220 
 8221   ins_encode(aarch64_enc_stlrw(src, mem));
 8222 
 8223   ins_pipe(pipe_class_memory);
 8224 %}
 8225 
 8226 // Store Float
 8227 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
 8228 %{
 8229   match(Set mem (StoreF mem src));
 8230 
 8231   ins_cost(VOLATILE_REF_COST);
 8232   format %{ "stlrs  $src, $mem\t# float" %}
 8233 
 8234   ins_encode( aarch64_enc_fstlrs(src, mem) );
 8235 
 8236   ins_pipe(pipe_class_memory);
 8237 %}
 8238 
 8239 // TODO
 8240 // implement storeImmF0 and storeFImmPacked
 8241 
 8242 // Store Double
 8243 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
 8244 %{
 8245   match(Set mem (StoreD mem src));
 8246 
 8247   ins_cost(VOLATILE_REF_COST);
 8248   format %{ "stlrd  $src, $mem\t# double" %}
 8249 
 8250   ins_encode( aarch64_enc_fstlrd(src, mem) );
 8251 
 8252   ins_pipe(pipe_class_memory);
 8253 %}
 8254 
 8255 //  ---------------- end of volatile loads and stores ----------------
 8256 
 8257 instruct cacheWB(indirect addr)
 8258 %{
 8259   predicate(VM_Version::supports_data_cache_line_flush());
 8260   match(CacheWB addr);
 8261 
 8262   ins_cost(100);
 8263   format %{"cache wb $addr" %}
 8264   ins_encode %{
 8265     assert($addr->index_position() < 0, "should be");
 8266     assert($addr$$disp == 0, "should be");
 8267     __ cache_wb(Address($addr$$base$$Register, 0));
 8268   %}
 8269   ins_pipe(pipe_slow); // XXX
 8270 %}
 8271 
 8272 instruct cacheWBPreSync()
 8273 %{
 8274   predicate(VM_Version::supports_data_cache_line_flush());
 8275   match(CacheWBPreSync);
 8276 
 8277   ins_cost(100);
 8278   format %{"cache wb presync" %}
 8279   ins_encode %{
 8280     __ cache_wbsync(true);
 8281   %}
 8282   ins_pipe(pipe_slow); // XXX
 8283 %}
 8284 
 8285 instruct cacheWBPostSync()
 8286 %{
 8287   predicate(VM_Version::supports_data_cache_line_flush());
 8288   match(CacheWBPostSync);
 8289 
 8290   ins_cost(100);
 8291   format %{"cache wb postsync" %}
 8292   ins_encode %{
 8293     __ cache_wbsync(false);
 8294   %}
 8295   ins_pipe(pipe_slow); // XXX
 8296 %}
 8297 
 8298 // ============================================================================
 8299 // BSWAP Instructions
 8300 
 8301 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
 8302   match(Set dst (ReverseBytesI src));
 8303 
 8304   ins_cost(INSN_COST);
 8305   format %{ "revw  $dst, $src" %}
 8306 
 8307   ins_encode %{
 8308     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
 8309   %}
 8310 
 8311   ins_pipe(ialu_reg);
 8312 %}
 8313 
 8314 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
 8315   match(Set dst (ReverseBytesL src));
 8316 
 8317   ins_cost(INSN_COST);
 8318   format %{ "rev  $dst, $src" %}
 8319 
 8320   ins_encode %{
 8321     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
 8322   %}
 8323 
 8324   ins_pipe(ialu_reg);
 8325 %}
 8326 
 8327 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
 8328   match(Set dst (ReverseBytesUS src));
 8329 
 8330   ins_cost(INSN_COST);
 8331   format %{ "rev16w  $dst, $src" %}
 8332 
 8333   ins_encode %{
 8334     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 8335   %}
 8336 
 8337   ins_pipe(ialu_reg);
 8338 %}
 8339 
 8340 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
 8341   match(Set dst (ReverseBytesS src));
 8342 
 8343   ins_cost(INSN_COST);
 8344   format %{ "rev16w  $dst, $src\n\t"
 8345             "sbfmw $dst, $dst, #0, #15" %}
 8346 
 8347   ins_encode %{
 8348     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 8349     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
 8350   %}
 8351 
 8352   ins_pipe(ialu_reg);
 8353 %}
 8354 
 8355 // ============================================================================
 8356 // Zero Count Instructions
 8357 
 8358 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 8359   match(Set dst (CountLeadingZerosI src));
 8360 
 8361   ins_cost(INSN_COST);
 8362   format %{ "clzw  $dst, $src" %}
 8363   ins_encode %{
 8364     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
 8365   %}
 8366 
 8367   ins_pipe(ialu_reg);
 8368 %}
 8369 
 8370 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
 8371   match(Set dst (CountLeadingZerosL src));
 8372 
 8373   ins_cost(INSN_COST);
 8374   format %{ "clz   $dst, $src" %}
 8375   ins_encode %{
 8376     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
 8377   %}
 8378 
 8379   ins_pipe(ialu_reg);
 8380 %}
 8381 
 8382 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 8383   match(Set dst (CountTrailingZerosI src));
 8384 
 8385   ins_cost(INSN_COST * 2);
 8386   format %{ "rbitw  $dst, $src\n\t"
 8387             "clzw   $dst, $dst" %}
 8388   ins_encode %{
 8389     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
 8390     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
 8391   %}
 8392 
 8393   ins_pipe(ialu_reg);
 8394 %}
 8395 
 8396 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
 8397   match(Set dst (CountTrailingZerosL src));
 8398 
 8399   ins_cost(INSN_COST * 2);
 8400   format %{ "rbit   $dst, $src\n\t"
 8401             "clz    $dst, $dst" %}
 8402   ins_encode %{
 8403     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
 8404     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
 8405   %}
 8406 
 8407   ins_pipe(ialu_reg);
 8408 %}
 8409 
 8410 //---------- Population Count Instructions -------------------------------------
 8411 //
 8412 
 8413 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
 8414   predicate(UsePopCountInstruction);
 8415   match(Set dst (PopCountI src));
 8416   effect(TEMP tmp);
 8417   ins_cost(INSN_COST * 13);
 8418 
 8419   format %{ "movw   $src, $src\n\t"
 8420             "mov    $tmp, $src\t# vector (1D)\n\t"
 8421             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8422             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8423             "mov    $dst, $tmp\t# vector (1D)" %}
 8424   ins_encode %{
 8425     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
 8426     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
 8427     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8428     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8429     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
 8430   %}
 8431 
 8432   ins_pipe(pipe_class_default);
 8433 %}
 8434 
 8435 instruct popCountI_mem(iRegINoSp dst, memory4 mem, vRegF tmp) %{
 8436   predicate(UsePopCountInstruction);
 8437   match(Set dst (PopCountI (LoadI mem)));
 8438   effect(TEMP tmp);
 8439   ins_cost(INSN_COST * 13);
 8440 
 8441   format %{ "ldrs   $tmp, $mem\n\t"
 8442             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8443             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8444             "mov    $dst, $tmp\t# vector (1D)" %}
 8445   ins_encode %{
 8446     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 8447     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
 8448               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 8449     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8450     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8451     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
 8452   %}
 8453 
 8454   ins_pipe(pipe_class_default);
 8455 %}
 8456 
 8457 // Note: Long.bitCount(long) returns an int.
 8458 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
 8459   predicate(UsePopCountInstruction);
 8460   match(Set dst (PopCountL src));
 8461   effect(TEMP tmp);
 8462   ins_cost(INSN_COST * 13);
 8463 
 8464   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
 8465             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8466             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8467             "mov    $dst, $tmp\t# vector (1D)" %}
 8468   ins_encode %{
 8469     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
 8470     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8471     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8472     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
 8473   %}
 8474 
 8475   ins_pipe(pipe_class_default);
 8476 %}
 8477 
 8478 instruct popCountL_mem(iRegINoSp dst, memory8 mem, vRegD tmp) %{
 8479   predicate(UsePopCountInstruction);
 8480   match(Set dst (PopCountL (LoadL mem)));
 8481   effect(TEMP tmp);
 8482   ins_cost(INSN_COST * 13);
 8483 
 8484   format %{ "ldrd   $tmp, $mem\n\t"
 8485             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 8486             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 8487             "mov    $dst, $tmp\t# vector (1D)" %}
 8488   ins_encode %{
 8489     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 8490     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
 8491               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 8492     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8493     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 8494     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
 8495   %}
 8496 
 8497   ins_pipe(pipe_class_default);
 8498 %}
 8499 
 8500 // ============================================================================
 8501 // MemBar Instruction
 8502 
 8503 instruct load_fence() %{
 8504   match(LoadFence);
 8505   ins_cost(VOLATILE_REF_COST);
 8506 
 8507   format %{ "load_fence" %}
 8508 
 8509   ins_encode %{
 8510     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 8511   %}
 8512   ins_pipe(pipe_serial);
 8513 %}
 8514 
 8515 instruct unnecessary_membar_acquire() %{
 8516   predicate(unnecessary_acquire(n));
 8517   match(MemBarAcquire);
 8518   ins_cost(0);
 8519 
 8520   format %{ "membar_acquire (elided)" %}
 8521 
 8522   ins_encode %{
 8523     __ block_comment("membar_acquire (elided)");
 8524   %}
 8525 
 8526   ins_pipe(pipe_class_empty);
 8527 %}
 8528 
 8529 instruct membar_acquire() %{
 8530   match(MemBarAcquire);
 8531   ins_cost(VOLATILE_REF_COST);
 8532 
 8533   format %{ "membar_acquire\n\t"
 8534             "dmb ish" %}
 8535 
 8536   ins_encode %{
 8537     __ block_comment("membar_acquire");
 8538     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 8539   %}
 8540 
 8541   ins_pipe(pipe_serial);
 8542 %}
 8543 
 8544 
 8545 instruct membar_acquire_lock() %{
 8546   match(MemBarAcquireLock);
 8547   ins_cost(VOLATILE_REF_COST);
 8548 
 8549   format %{ "membar_acquire_lock (elided)" %}
 8550 
 8551   ins_encode %{
 8552     __ block_comment("membar_acquire_lock (elided)");
 8553   %}
 8554 
 8555   ins_pipe(pipe_serial);
 8556 %}
 8557 
 8558 instruct store_fence() %{
 8559   match(StoreFence);
 8560   ins_cost(VOLATILE_REF_COST);
 8561 
 8562   format %{ "store_fence" %}
 8563 
 8564   ins_encode %{
 8565     __ membar(Assembler::LoadStore|Assembler::StoreStore);
 8566   %}
 8567   ins_pipe(pipe_serial);
 8568 %}
 8569 
 8570 instruct unnecessary_membar_release() %{
 8571   predicate(unnecessary_release(n));
 8572   match(MemBarRelease);
 8573   ins_cost(0);
 8574 
 8575   format %{ "membar_release (elided)" %}
 8576 
 8577   ins_encode %{
 8578     __ block_comment("membar_release (elided)");
 8579   %}
 8580   ins_pipe(pipe_serial);
 8581 %}
 8582 
 8583 instruct membar_release() %{
 8584   match(MemBarRelease);
 8585   ins_cost(VOLATILE_REF_COST);
 8586 
 8587   format %{ "membar_release\n\t"
 8588             "dmb ish" %}
 8589 
 8590   ins_encode %{
 8591     __ block_comment("membar_release");
 8592     __ membar(Assembler::LoadStore|Assembler::StoreStore);
 8593   %}
 8594   ins_pipe(pipe_serial);
 8595 %}
 8596 
 8597 instruct membar_storestore() %{
 8598   match(MemBarStoreStore);
 8599   ins_cost(VOLATILE_REF_COST);
 8600 
 8601   format %{ "MEMBAR-store-store" %}
 8602 
 8603   ins_encode %{
 8604     __ membar(Assembler::StoreStore);
 8605   %}
 8606   ins_pipe(pipe_serial);
 8607 %}
 8608 
 8609 instruct membar_release_lock() %{
 8610   match(MemBarReleaseLock);
 8611   ins_cost(VOLATILE_REF_COST);
 8612 
 8613   format %{ "membar_release_lock (elided)" %}
 8614 
 8615   ins_encode %{
 8616     __ block_comment("membar_release_lock (elided)");
 8617   %}
 8618 
 8619   ins_pipe(pipe_serial);
 8620 %}
 8621 
 8622 instruct unnecessary_membar_volatile() %{
 8623   predicate(unnecessary_volatile(n));
 8624   match(MemBarVolatile);
 8625   ins_cost(0);
 8626 
 8627   format %{ "membar_volatile (elided)" %}
 8628 
 8629   ins_encode %{
 8630     __ block_comment("membar_volatile (elided)");
 8631   %}
 8632 
 8633   ins_pipe(pipe_serial);
 8634 %}
 8635 
 8636 instruct membar_volatile() %{
 8637   match(MemBarVolatile);
 8638   ins_cost(VOLATILE_REF_COST*100);
 8639 
 8640   format %{ "membar_volatile\n\t"
 8641              "dmb ish"%}
 8642 
 8643   ins_encode %{
 8644     __ block_comment("membar_volatile");
 8645     __ membar(Assembler::StoreLoad);
 8646   %}
 8647 
 8648   ins_pipe(pipe_serial);
 8649 %}
 8650 
 8651 // ============================================================================
 8652 // Cast/Convert Instructions
 8653 
 8654 instruct castX2P(iRegPNoSp dst, iRegL src) %{
 8655   match(Set dst (CastX2P src));
 8656 
 8657   ins_cost(INSN_COST);
 8658   format %{ "mov $dst, $src\t# long -> ptr" %}
 8659 
 8660   ins_encode %{
 8661     if ($dst$$reg != $src$$reg) {
 8662       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8663     }
 8664   %}
 8665 
 8666   ins_pipe(ialu_reg);
 8667 %}
 8668 
 8669 instruct castP2X(iRegLNoSp dst, iRegP src) %{
 8670   match(Set dst (CastP2X src));
 8671 
 8672   ins_cost(INSN_COST);
 8673   format %{ "mov $dst, $src\t# ptr -> long" %}
 8674 
 8675   ins_encode %{
 8676     if ($dst$$reg != $src$$reg) {
 8677       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8678     }
 8679   %}
 8680 
 8681   ins_pipe(ialu_reg);
 8682 %}
 8683 
 8684 // Convert oop into int for vectors alignment masking
 8685 instruct convP2I(iRegINoSp dst, iRegP src) %{
 8686   match(Set dst (ConvL2I (CastP2X src)));
 8687 
 8688   ins_cost(INSN_COST);
 8689   format %{ "movw $dst, $src\t# ptr -> int" %}
 8690   ins_encode %{
 8691     __ movw($dst$$Register, $src$$Register);
 8692   %}
 8693 
 8694   ins_pipe(ialu_reg);
 8695 %}
 8696 
 8697 // Convert compressed oop into int for vectors alignment masking
 8698 // in case of 32bit oops (heap < 4Gb).
 8699 instruct convN2I(iRegINoSp dst, iRegN src)
 8700 %{
 8701   predicate(CompressedOops::shift() == 0);
 8702   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
 8703 
 8704   ins_cost(INSN_COST);
 8705   format %{ "mov dst, $src\t# compressed ptr -> int" %}
 8706   ins_encode %{
 8707     __ movw($dst$$Register, $src$$Register);
 8708   %}
 8709 
 8710   ins_pipe(ialu_reg);
 8711 %}
 8712 
 8713 
 8714 // Convert oop pointer into compressed form
 8715 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8716   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
 8717   match(Set dst (EncodeP src));
 8718   effect(KILL cr);
 8719   ins_cost(INSN_COST * 3);
 8720   format %{ "encode_heap_oop $dst, $src" %}
 8721   ins_encode %{
 8722     Register s = $src$$Register;
 8723     Register d = $dst$$Register;
 8724     __ encode_heap_oop(d, s);
 8725   %}
 8726   ins_pipe(ialu_reg);
 8727 %}
 8728 
 8729 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8730   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
 8731   match(Set dst (EncodeP src));
 8732   ins_cost(INSN_COST * 3);
 8733   format %{ "encode_heap_oop_not_null $dst, $src" %}
 8734   ins_encode %{
 8735     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
 8736   %}
 8737   ins_pipe(ialu_reg);
 8738 %}
 8739 
 8740 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8741   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
 8742             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
 8743   match(Set dst (DecodeN src));
 8744   ins_cost(INSN_COST * 3);
 8745   format %{ "decode_heap_oop $dst, $src" %}
 8746   ins_encode %{
 8747     Register s = $src$$Register;
 8748     Register d = $dst$$Register;
 8749     __ decode_heap_oop(d, s);
 8750   %}
 8751   ins_pipe(ialu_reg);
 8752 %}
 8753 
 8754 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8755   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
 8756             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
 8757   match(Set dst (DecodeN src));
 8758   ins_cost(INSN_COST * 3);
 8759   format %{ "decode_heap_oop_not_null $dst, $src" %}
 8760   ins_encode %{
 8761     Register s = $src$$Register;
 8762     Register d = $dst$$Register;
 8763     __ decode_heap_oop_not_null(d, s);
 8764   %}
 8765   ins_pipe(ialu_reg);
 8766 %}
 8767 
 8768 // n.b. AArch64 implementations of encode_klass_not_null and
 8769 // decode_klass_not_null do not modify the flags register so, unlike
 8770 // Intel, we don't kill CR as a side effect here
 8771 
 8772 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
 8773   match(Set dst (EncodePKlass src));
 8774 
 8775   ins_cost(INSN_COST * 3);
 8776   format %{ "encode_klass_not_null $dst,$src" %}
 8777 
 8778   ins_encode %{
 8779     Register src_reg = as_Register($src$$reg);
 8780     Register dst_reg = as_Register($dst$$reg);
 8781     __ encode_klass_not_null(dst_reg, src_reg);
 8782   %}
 8783 
 8784    ins_pipe(ialu_reg);
 8785 %}
 8786 
 8787 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
 8788   match(Set dst (DecodeNKlass src));
 8789 
 8790   ins_cost(INSN_COST * 3);
 8791   format %{ "decode_klass_not_null $dst,$src" %}
 8792 
 8793   ins_encode %{
 8794     Register src_reg = as_Register($src$$reg);
 8795     Register dst_reg = as_Register($dst$$reg);
 8796     if (dst_reg != src_reg) {
 8797       __ decode_klass_not_null(dst_reg, src_reg);
 8798     } else {
 8799       __ decode_klass_not_null(dst_reg);
 8800     }
 8801   %}
 8802 
 8803    ins_pipe(ialu_reg);
 8804 %}
 8805 
 8806 instruct checkCastPP(iRegPNoSp dst)
 8807 %{
 8808   match(Set dst (CheckCastPP dst));
 8809 
 8810   size(0);
 8811   format %{ "# checkcastPP of $dst" %}
 8812   ins_encode(/* empty encoding */);
 8813   ins_pipe(pipe_class_empty);
 8814 %}
 8815 
 8816 instruct castPP(iRegPNoSp dst)
 8817 %{
 8818   match(Set dst (CastPP dst));
 8819 
 8820   size(0);
 8821   format %{ "# castPP of $dst" %}
 8822   ins_encode(/* empty encoding */);
 8823   ins_pipe(pipe_class_empty);
 8824 %}
 8825 
 8826 instruct castII(iRegI dst)
 8827 %{
 8828   match(Set dst (CastII dst));
 8829 
 8830   size(0);
 8831   format %{ "# castII of $dst" %}
 8832   ins_encode(/* empty encoding */);
 8833   ins_cost(0);
 8834   ins_pipe(pipe_class_empty);
 8835 %}
 8836 
 8837 // ============================================================================
 8838 // Atomic operation instructions
 8839 //
 8840 // Intel and SPARC both implement Ideal Node LoadPLocked and
 8841 // Store{PIL}Conditional instructions using a normal load for the
 8842 // LoadPLocked and a CAS for the Store{PIL}Conditional.
 8843 //
 8844 // The ideal code appears only to use LoadPLocked/StorePLocked as a
 8845 // pair to lock object allocations from Eden space when not using
 8846 // TLABs.
 8847 //
 8848 // There does not appear to be a Load{IL}Locked Ideal Node and the
 8849 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
 8850 // and to use StoreIConditional only for 32-bit and StoreLConditional
 8851 // only for 64-bit.
 8852 //
 8853 // We implement LoadPLocked and StorePLocked instructions using,
 8854 // respectively the AArch64 hw load-exclusive and store-conditional
 8855 // instructions. Whereas we must implement each of
 8856 // Store{IL}Conditional using a CAS which employs a pair of
 8857 // instructions comprising a load-exclusive followed by a
 8858 // store-conditional.
 8859 
 8860 
 8861 // Locked-load (linked load) of the current heap-top
 8862 // used when updating the eden heap top
 8863 // implemented using ldaxr on AArch64
 8864 
 8865 instruct loadPLocked(iRegPNoSp dst, indirect mem)
 8866 %{
 8867   match(Set dst (LoadPLocked mem));
 8868 
 8869   ins_cost(VOLATILE_REF_COST);
 8870 
 8871   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
 8872 
 8873   ins_encode(aarch64_enc_ldaxr(dst, mem));
 8874 
 8875   ins_pipe(pipe_serial);
 8876 %}
 8877 
 8878 // Conditional-store of the updated heap-top.
 8879 // Used during allocation of the shared heap.
 8880 // Sets flag (EQ) on success.
 8881 // implemented using stlxr on AArch64.
 8882 
 8883 instruct storePConditional(memory8 heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
 8884 %{
 8885   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
 8886 
 8887   ins_cost(VOLATILE_REF_COST);
 8888 
 8889  // TODO
 8890  // do we need to do a store-conditional release or can we just use a
 8891  // plain store-conditional?
 8892 
 8893   format %{
 8894     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
 8895     "cmpw rscratch1, zr\t# EQ on successful write"
 8896   %}
 8897 
 8898   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
 8899 
 8900   ins_pipe(pipe_serial);
 8901 %}
 8902 
 8903 
 8904 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
 8905 // when attempting to rebias a lock towards the current thread.  We
 8906 // must use the acquire form of cmpxchg in order to guarantee acquire
 8907 // semantics in this case.
 8908 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
 8909 %{
 8910   match(Set cr (StoreLConditional mem (Binary oldval newval)));
 8911 
 8912   ins_cost(VOLATILE_REF_COST);
 8913 
 8914   format %{
 8915     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
 8916     "cmpw rscratch1, zr\t# EQ on successful write"
 8917   %}
 8918 
 8919   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
 8920 
 8921   ins_pipe(pipe_slow);
 8922 %}
 8923 
 8924 // storeIConditional also has acquire semantics, for no better reason
 8925 // than matching storeLConditional.  At the time of writing this
 8926 // comment storeIConditional was not used anywhere by AArch64.
 8927 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
 8928 %{
 8929   match(Set cr (StoreIConditional mem (Binary oldval newval)));
 8930 
 8931   ins_cost(VOLATILE_REF_COST);
 8932 
 8933   format %{
 8934     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
 8935     "cmpw rscratch1, zr\t# EQ on successful write"
 8936   %}
 8937 
 8938   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
 8939 
 8940   ins_pipe(pipe_slow);
 8941 %}
 8942 
 8943 // standard CompareAndSwapX when we are using barriers
 8944 // these have higher priority than the rules selected by a predicate
 8945 
 8946 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
 8947 // can't match them
 8948 
 8949 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8950 
 8951   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 8952   ins_cost(2 * VOLATILE_REF_COST);
 8953 
 8954   effect(KILL cr);
 8955 
 8956   format %{
 8957     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8958     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8959   %}
 8960 
 8961   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
 8962             aarch64_enc_cset_eq(res));
 8963 
 8964   ins_pipe(pipe_slow);
 8965 %}
 8966 
 8967 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8968 
 8969   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 8970   ins_cost(2 * VOLATILE_REF_COST);
 8971 
 8972   effect(KILL cr);
 8973 
 8974   format %{
 8975     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8976     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8977   %}
 8978 
 8979   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
 8980             aarch64_enc_cset_eq(res));
 8981 
 8982   ins_pipe(pipe_slow);
 8983 %}
 8984 
 8985 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8986 
 8987   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 8988   ins_cost(2 * VOLATILE_REF_COST);
 8989 
 8990   effect(KILL cr);
 8991 
 8992  format %{
 8993     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8994     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8995  %}
 8996 
 8997  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 8998             aarch64_enc_cset_eq(res));
 8999 
 9000   ins_pipe(pipe_slow);
 9001 %}
 9002 
 9003 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 9004 
 9005   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 9006   ins_cost(2 * VOLATILE_REF_COST);
 9007 
 9008   effect(KILL cr);
 9009 
 9010  format %{
 9011     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 9012     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9013  %}
 9014 
 9015  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 9016             aarch64_enc_cset_eq(res));
 9017 
 9018   ins_pipe(pipe_slow);
 9019 %}
 9020 
 9021 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9022 
 9023   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 9024   predicate(n->as_LoadStore()->barrier_data() == 0);
 9025   ins_cost(2 * VOLATILE_REF_COST);
 9026 
 9027   effect(KILL cr);
 9028 
 9029  format %{
 9030     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 9031     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9032  %}
 9033 
 9034  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 9035             aarch64_enc_cset_eq(res));
 9036 
 9037   ins_pipe(pipe_slow);
 9038 %}
 9039 
 9040 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 9041 
 9042   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 9043   ins_cost(2 * VOLATILE_REF_COST);
 9044 
 9045   effect(KILL cr);
 9046 
 9047  format %{
 9048     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 9049     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9050  %}
 9051 
 9052  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 9053             aarch64_enc_cset_eq(res));
 9054 
 9055   ins_pipe(pipe_slow);
 9056 %}
 9057 
 9058 // alternative CompareAndSwapX when we are eliding barriers
 9059 
 9060 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 9061 
 9062   predicate(needs_acquiring_load_exclusive(n));
 9063   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 9064   ins_cost(VOLATILE_REF_COST);
 9065 
 9066   effect(KILL cr);
 9067 
 9068   format %{
 9069     "cmpxchgb_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 9070     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9071   %}
 9072 
 9073   ins_encode(aarch64_enc_cmpxchgb_acq(mem, oldval, newval),
 9074             aarch64_enc_cset_eq(res));
 9075 
 9076   ins_pipe(pipe_slow);
 9077 %}
 9078 
 9079 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 9080 
 9081   predicate(needs_acquiring_load_exclusive(n));
 9082   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 9083   ins_cost(VOLATILE_REF_COST);
 9084 
 9085   effect(KILL cr);
 9086 
 9087   format %{
 9088     "cmpxchgs_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 9089     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9090   %}
 9091 
 9092   ins_encode(aarch64_enc_cmpxchgs_acq(mem, oldval, newval),
 9093             aarch64_enc_cset_eq(res));
 9094 
 9095   ins_pipe(pipe_slow);
 9096 %}
 9097 
 9098 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 9099 
 9100   predicate(needs_acquiring_load_exclusive(n));
 9101   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 9102   ins_cost(VOLATILE_REF_COST);
 9103 
 9104   effect(KILL cr);
 9105 
 9106  format %{
 9107     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 9108     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9109  %}
 9110 
 9111  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 9112             aarch64_enc_cset_eq(res));
 9113 
 9114   ins_pipe(pipe_slow);
 9115 %}
 9116 
 9117 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 9118 
 9119   predicate(needs_acquiring_load_exclusive(n));
 9120   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 9121   ins_cost(VOLATILE_REF_COST);
 9122 
 9123   effect(KILL cr);
 9124 
 9125  format %{
 9126     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 9127     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9128  %}
 9129 
 9130  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 9131             aarch64_enc_cset_eq(res));
 9132 
 9133   ins_pipe(pipe_slow);
 9134 %}
 9135 
 9136 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9137 
 9138   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9139   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 9140   ins_cost(VOLATILE_REF_COST);
 9141 
 9142   effect(KILL cr);
 9143 
 9144  format %{
 9145     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 9146     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9147  %}
 9148 
 9149  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 9150             aarch64_enc_cset_eq(res));
 9151 
 9152   ins_pipe(pipe_slow);
 9153 %}
 9154 
 9155 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 9156 
 9157   predicate(needs_acquiring_load_exclusive(n));
 9158   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 9159   ins_cost(VOLATILE_REF_COST);
 9160 
 9161   effect(KILL cr);
 9162 
 9163  format %{
 9164     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 9165     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9166  %}
 9167 
 9168  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 9169             aarch64_enc_cset_eq(res));
 9170 
 9171   ins_pipe(pipe_slow);
 9172 %}
 9173 
 9174 
 9175 // ---------------------------------------------------------------------
 9176 
 9177 
 9178 // BEGIN This section of the file is automatically generated. Do not edit --------------
 9179 
 9180 // Sundry CAS operations.  Note that release is always true,
 9181 // regardless of the memory ordering of the CAS.  This is because we
 9182 // need the volatile case to be sequentially consistent but there is
 9183 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
 9184 // can't check the type of memory ordering here, so we always emit a
 9185 // STLXR.
 9186 
 9187 // This section is generated from aarch64_ad_cas.m4
 9188 
 9189 
 9190 
 9191 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9192   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 9193   ins_cost(2 * VOLATILE_REF_COST);
 9194   effect(TEMP_DEF res, KILL cr);
 9195   format %{
 9196     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9197   %}
 9198   ins_encode %{
 9199     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9200                Assembler::byte, /*acquire*/ false, /*release*/ true,
 9201                /*weak*/ false, $res$$Register);
 9202     __ sxtbw($res$$Register, $res$$Register);
 9203   %}
 9204   ins_pipe(pipe_slow);
 9205 %}
 9206 
 9207 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9208   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 9209   ins_cost(2 * VOLATILE_REF_COST);
 9210   effect(TEMP_DEF res, KILL cr);
 9211   format %{
 9212     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9213   %}
 9214   ins_encode %{
 9215     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9216                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 9217                /*weak*/ false, $res$$Register);
 9218     __ sxthw($res$$Register, $res$$Register);
 9219   %}
 9220   ins_pipe(pipe_slow);
 9221 %}
 9222 
 9223 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9224   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 9225   ins_cost(2 * VOLATILE_REF_COST);
 9226   effect(TEMP_DEF res, KILL cr);
 9227   format %{
 9228     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9229   %}
 9230   ins_encode %{
 9231     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9232                Assembler::word, /*acquire*/ false, /*release*/ true,
 9233                /*weak*/ false, $res$$Register);
 9234   %}
 9235   ins_pipe(pipe_slow);
 9236 %}
 9237 
 9238 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9239   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 9240   ins_cost(2 * VOLATILE_REF_COST);
 9241   effect(TEMP_DEF res, KILL cr);
 9242   format %{
 9243     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9244   %}
 9245   ins_encode %{
 9246     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9247                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9248                /*weak*/ false, $res$$Register);
 9249   %}
 9250   ins_pipe(pipe_slow);
 9251 %}
 9252 
 9253 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9254   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 9255   ins_cost(2 * VOLATILE_REF_COST);
 9256   effect(TEMP_DEF res, KILL cr);
 9257   format %{
 9258     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9259   %}
 9260   ins_encode %{
 9261     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9262                Assembler::word, /*acquire*/ false, /*release*/ true,
 9263                /*weak*/ false, $res$$Register);
 9264   %}
 9265   ins_pipe(pipe_slow);
 9266 %}
 9267 
 9268 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9269   predicate(n->as_LoadStore()->barrier_data() == 0);
 9270   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 9271   ins_cost(2 * VOLATILE_REF_COST);
 9272   effect(TEMP_DEF res, KILL cr);
 9273   format %{
 9274     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9275   %}
 9276   ins_encode %{
 9277     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9278                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9279                /*weak*/ false, $res$$Register);
 9280   %}
 9281   ins_pipe(pipe_slow);
 9282 %}
 9283 
 9284 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9285   predicate(needs_acquiring_load_exclusive(n));
 9286   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 9287   ins_cost(VOLATILE_REF_COST);
 9288   effect(TEMP_DEF res, KILL cr);
 9289   format %{
 9290     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9291   %}
 9292   ins_encode %{
 9293     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9294                Assembler::byte, /*acquire*/ true, /*release*/ true,
 9295                /*weak*/ false, $res$$Register);
 9296     __ sxtbw($res$$Register, $res$$Register);
 9297   %}
 9298   ins_pipe(pipe_slow);
 9299 %}
 9300 
 9301 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9302   predicate(needs_acquiring_load_exclusive(n));
 9303   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 9304   ins_cost(VOLATILE_REF_COST);
 9305   effect(TEMP_DEF res, KILL cr);
 9306   format %{
 9307     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9308   %}
 9309   ins_encode %{
 9310     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9311                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 9312                /*weak*/ false, $res$$Register);
 9313     __ sxthw($res$$Register, $res$$Register);
 9314   %}
 9315   ins_pipe(pipe_slow);
 9316 %}
 9317 
 9318 
 9319 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9320   predicate(needs_acquiring_load_exclusive(n));
 9321   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 9322   ins_cost(VOLATILE_REF_COST);
 9323   effect(TEMP_DEF res, KILL cr);
 9324   format %{
 9325     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9326   %}
 9327   ins_encode %{
 9328     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9329                Assembler::word, /*acquire*/ true, /*release*/ true,
 9330                /*weak*/ false, $res$$Register);
 9331   %}
 9332   ins_pipe(pipe_slow);
 9333 %}
 9334 
 9335 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9336   predicate(needs_acquiring_load_exclusive(n));
 9337   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 9338   ins_cost(VOLATILE_REF_COST);
 9339   effect(TEMP_DEF res, KILL cr);
 9340   format %{
 9341     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9342   %}
 9343   ins_encode %{
 9344     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9345                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9346                /*weak*/ false, $res$$Register);
 9347   %}
 9348   ins_pipe(pipe_slow);
 9349 %}
 9350 
 9351 
 9352 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9353   predicate(needs_acquiring_load_exclusive(n));
 9354   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 9355   ins_cost(VOLATILE_REF_COST);
 9356   effect(TEMP_DEF res, KILL cr);
 9357   format %{
 9358     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9359   %}
 9360   ins_encode %{
 9361     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9362                Assembler::word, /*acquire*/ true, /*release*/ true,
 9363                /*weak*/ false, $res$$Register);
 9364   %}
 9365   ins_pipe(pipe_slow);
 9366 %}
 9367 
 9368 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9369   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9370   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 9371   ins_cost(VOLATILE_REF_COST);
 9372   effect(TEMP_DEF res, KILL cr);
 9373   format %{
 9374     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9375   %}
 9376   ins_encode %{
 9377     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9378                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9379                /*weak*/ false, $res$$Register);
 9380   %}
 9381   ins_pipe(pipe_slow);
 9382 %}
 9383 
 9384 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9385   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 9386   ins_cost(2 * VOLATILE_REF_COST);
 9387   effect(KILL cr);
 9388   format %{
 9389     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9390     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9391   %}
 9392   ins_encode %{
 9393     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9394                Assembler::byte, /*acquire*/ false, /*release*/ true,
 9395                /*weak*/ true, noreg);
 9396     __ csetw($res$$Register, Assembler::EQ);
 9397   %}
 9398   ins_pipe(pipe_slow);
 9399 %}
 9400 
 9401 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9402   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 9403   ins_cost(2 * VOLATILE_REF_COST);
 9404   effect(KILL cr);
 9405   format %{
 9406     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9407     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9408   %}
 9409   ins_encode %{
 9410     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9411                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 9412                /*weak*/ true, noreg);
 9413     __ csetw($res$$Register, Assembler::EQ);
 9414   %}
 9415   ins_pipe(pipe_slow);
 9416 %}
 9417 
 9418 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9419   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 9420   ins_cost(2 * VOLATILE_REF_COST);
 9421   effect(KILL cr);
 9422   format %{
 9423     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9424     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9425   %}
 9426   ins_encode %{
 9427     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9428                Assembler::word, /*acquire*/ false, /*release*/ true,
 9429                /*weak*/ true, noreg);
 9430     __ csetw($res$$Register, Assembler::EQ);
 9431   %}
 9432   ins_pipe(pipe_slow);
 9433 %}
 9434 
 9435 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9436   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 9437   ins_cost(2 * VOLATILE_REF_COST);
 9438   effect(KILL cr);
 9439   format %{
 9440     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9441     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9442   %}
 9443   ins_encode %{
 9444     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9445                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9446                /*weak*/ true, noreg);
 9447     __ csetw($res$$Register, Assembler::EQ);
 9448   %}
 9449   ins_pipe(pipe_slow);
 9450 %}
 9451 
 9452 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9453   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 9454   ins_cost(2 * VOLATILE_REF_COST);
 9455   effect(KILL cr);
 9456   format %{
 9457     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9458     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9459   %}
 9460   ins_encode %{
 9461     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9462                Assembler::word, /*acquire*/ false, /*release*/ true,
 9463                /*weak*/ true, noreg);
 9464     __ csetw($res$$Register, Assembler::EQ);
 9465   %}
 9466   ins_pipe(pipe_slow);
 9467 %}
 9468 
 9469 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9470   predicate(n->as_LoadStore()->barrier_data() == 0);
 9471   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 9472   ins_cost(2 * VOLATILE_REF_COST);
 9473   effect(KILL cr);
 9474   format %{
 9475     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9476     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9477   %}
 9478   ins_encode %{
 9479     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9480                Assembler::xword, /*acquire*/ false, /*release*/ true,
 9481                /*weak*/ true, noreg);
 9482     __ csetw($res$$Register, Assembler::EQ);
 9483   %}
 9484   ins_pipe(pipe_slow);
 9485 %}
 9486 
 9487 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9488   predicate(needs_acquiring_load_exclusive(n));
 9489   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 9490   ins_cost(VOLATILE_REF_COST);
 9491   effect(KILL cr);
 9492   format %{
 9493     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 9494     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9495   %}
 9496   ins_encode %{
 9497     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9498                Assembler::byte, /*acquire*/ true, /*release*/ true,
 9499                /*weak*/ true, noreg);
 9500     __ csetw($res$$Register, Assembler::EQ);
 9501   %}
 9502   ins_pipe(pipe_slow);
 9503 %}
 9504 
 9505 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9506   predicate(needs_acquiring_load_exclusive(n));
 9507   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 9508   ins_cost(VOLATILE_REF_COST);
 9509   effect(KILL cr);
 9510   format %{
 9511     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 9512     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9513   %}
 9514   ins_encode %{
 9515     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9516                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 9517                /*weak*/ true, noreg);
 9518     __ csetw($res$$Register, Assembler::EQ);
 9519   %}
 9520   ins_pipe(pipe_slow);
 9521 %}
 9522 
 9523 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 9524   predicate(needs_acquiring_load_exclusive(n));
 9525   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 9526   ins_cost(VOLATILE_REF_COST);
 9527   effect(KILL cr);
 9528   format %{
 9529     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 9530     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9531   %}
 9532   ins_encode %{
 9533     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9534                Assembler::word, /*acquire*/ true, /*release*/ true,
 9535                /*weak*/ true, noreg);
 9536     __ csetw($res$$Register, Assembler::EQ);
 9537   %}
 9538   ins_pipe(pipe_slow);
 9539 %}
 9540 
 9541 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 9542   predicate(needs_acquiring_load_exclusive(n));
 9543   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 9544   ins_cost(VOLATILE_REF_COST);
 9545   effect(KILL cr);
 9546   format %{
 9547     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 9548     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9549   %}
 9550   ins_encode %{
 9551     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9552                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9553                /*weak*/ true, noreg);
 9554     __ csetw($res$$Register, Assembler::EQ);
 9555   %}
 9556   ins_pipe(pipe_slow);
 9557 %}
 9558 
 9559 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 9560   predicate(needs_acquiring_load_exclusive(n));
 9561   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 9562   ins_cost(VOLATILE_REF_COST);
 9563   effect(KILL cr);
 9564   format %{
 9565     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 9566     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9567   %}
 9568   ins_encode %{
 9569     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9570                Assembler::word, /*acquire*/ true, /*release*/ true,
 9571                /*weak*/ true, noreg);
 9572     __ csetw($res$$Register, Assembler::EQ);
 9573   %}
 9574   ins_pipe(pipe_slow);
 9575 %}
 9576 
 9577 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 9578   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 9579   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9580   ins_cost(VOLATILE_REF_COST);
 9581   effect(KILL cr);
 9582   format %{
 9583     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 9584     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 9585   %}
 9586   ins_encode %{
 9587     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 9588                Assembler::xword, /*acquire*/ true, /*release*/ true,
 9589                /*weak*/ true, noreg);
 9590     __ csetw($res$$Register, Assembler::EQ);
 9591   %}
 9592   ins_pipe(pipe_slow);
 9593 %}
 9594 
 9595 // END This section of the file is automatically generated. Do not edit --------------
 9596 // ---------------------------------------------------------------------
 9597 
 9598 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
 9599   match(Set prev (GetAndSetI mem newv));
 9600   ins_cost(2 * VOLATILE_REF_COST);
 9601   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
 9602   ins_encode %{
 9603     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9604   %}
 9605   ins_pipe(pipe_serial);
 9606 %}
 9607 
 9608 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
 9609   match(Set prev (GetAndSetL mem newv));
 9610   ins_cost(2 * VOLATILE_REF_COST);
 9611   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
 9612   ins_encode %{
 9613     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9614   %}
 9615   ins_pipe(pipe_serial);
 9616 %}
 9617 
 9618 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
 9619   match(Set prev (GetAndSetN mem newv));
 9620   ins_cost(2 * VOLATILE_REF_COST);
 9621   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
 9622   ins_encode %{
 9623     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9624   %}
 9625   ins_pipe(pipe_serial);
 9626 %}
 9627 
 9628 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
 9629   predicate(n->as_LoadStore()->barrier_data() == 0);
 9630   match(Set prev (GetAndSetP mem newv));
 9631   ins_cost(2 * VOLATILE_REF_COST);
 9632   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
 9633   ins_encode %{
 9634     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9635   %}
 9636   ins_pipe(pipe_serial);
 9637 %}
 9638 
 9639 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
 9640   predicate(needs_acquiring_load_exclusive(n));
 9641   match(Set prev (GetAndSetI mem newv));
 9642   ins_cost(VOLATILE_REF_COST);
 9643   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]" %}
 9644   ins_encode %{
 9645     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9646   %}
 9647   ins_pipe(pipe_serial);
 9648 %}
 9649 
 9650 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
 9651   predicate(needs_acquiring_load_exclusive(n));
 9652   match(Set prev (GetAndSetL mem newv));
 9653   ins_cost(VOLATILE_REF_COST);
 9654   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
 9655   ins_encode %{
 9656     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9657   %}
 9658   ins_pipe(pipe_serial);
 9659 %}
 9660 
 9661 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
 9662   predicate(needs_acquiring_load_exclusive(n));
 9663   match(Set prev (GetAndSetN mem newv));
 9664   ins_cost(VOLATILE_REF_COST);
 9665   format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
 9666   ins_encode %{
 9667     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9668   %}
 9669   ins_pipe(pipe_serial);
 9670 %}
 9671 
 9672 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
 9673   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 9674   match(Set prev (GetAndSetP mem newv));
 9675   ins_cost(VOLATILE_REF_COST);
 9676   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
 9677   ins_encode %{
 9678     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9679   %}
 9680   ins_pipe(pipe_serial);
 9681 %}
 9682 
 9683 
 9684 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
 9685   match(Set newval (GetAndAddL mem incr));
 9686   ins_cost(2 * VOLATILE_REF_COST + 1);
 9687   format %{ "get_and_addL $newval, [$mem], $incr" %}
 9688   ins_encode %{
 9689     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9690   %}
 9691   ins_pipe(pipe_serial);
 9692 %}
 9693 
 9694 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
 9695   predicate(n->as_LoadStore()->result_not_used());
 9696   match(Set dummy (GetAndAddL mem incr));
 9697   ins_cost(2 * VOLATILE_REF_COST);
 9698   format %{ "get_and_addL [$mem], $incr" %}
 9699   ins_encode %{
 9700     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
 9701   %}
 9702   ins_pipe(pipe_serial);
 9703 %}
 9704 
 9705 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
 9706   match(Set newval (GetAndAddL mem incr));
 9707   ins_cost(2 * VOLATILE_REF_COST + 1);
 9708   format %{ "get_and_addL $newval, [$mem], $incr" %}
 9709   ins_encode %{
 9710     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9711   %}
 9712   ins_pipe(pipe_serial);
 9713 %}
 9714 
 9715 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
 9716   predicate(n->as_LoadStore()->result_not_used());
 9717   match(Set dummy (GetAndAddL mem incr));
 9718   ins_cost(2 * VOLATILE_REF_COST);
 9719   format %{ "get_and_addL [$mem], $incr" %}
 9720   ins_encode %{
 9721     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
 9722   %}
 9723   ins_pipe(pipe_serial);
 9724 %}
 9725 
 9726 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
 9727   match(Set newval (GetAndAddI mem incr));
 9728   ins_cost(2 * VOLATILE_REF_COST + 1);
 9729   format %{ "get_and_addI $newval, [$mem], $incr" %}
 9730   ins_encode %{
 9731     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9732   %}
 9733   ins_pipe(pipe_serial);
 9734 %}
 9735 
 9736 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
 9737   predicate(n->as_LoadStore()->result_not_used());
 9738   match(Set dummy (GetAndAddI mem incr));
 9739   ins_cost(2 * VOLATILE_REF_COST);
 9740   format %{ "get_and_addI [$mem], $incr" %}
 9741   ins_encode %{
 9742     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
 9743   %}
 9744   ins_pipe(pipe_serial);
 9745 %}
 9746 
 9747 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
 9748   match(Set newval (GetAndAddI mem incr));
 9749   ins_cost(2 * VOLATILE_REF_COST + 1);
 9750   format %{ "get_and_addI $newval, [$mem], $incr" %}
 9751   ins_encode %{
 9752     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9753   %}
 9754   ins_pipe(pipe_serial);
 9755 %}
 9756 
 9757 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
 9758   predicate(n->as_LoadStore()->result_not_used());
 9759   match(Set dummy (GetAndAddI mem incr));
 9760   ins_cost(2 * VOLATILE_REF_COST);
 9761   format %{ "get_and_addI [$mem], $incr" %}
 9762   ins_encode %{
 9763     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
 9764   %}
 9765   ins_pipe(pipe_serial);
 9766 %}
 9767 
 9768 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
 9769   predicate(needs_acquiring_load_exclusive(n));
 9770   match(Set newval (GetAndAddL mem incr));
 9771   ins_cost(VOLATILE_REF_COST + 1);
 9772   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
 9773   ins_encode %{
 9774     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9775   %}
 9776   ins_pipe(pipe_serial);
 9777 %}
 9778 
 9779 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
 9780   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9781   match(Set dummy (GetAndAddL mem incr));
 9782   ins_cost(VOLATILE_REF_COST);
 9783   format %{ "get_and_addL_acq [$mem], $incr" %}
 9784   ins_encode %{
 9785     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
 9786   %}
 9787   ins_pipe(pipe_serial);
 9788 %}
 9789 
 9790 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
 9791   predicate(needs_acquiring_load_exclusive(n));
 9792   match(Set newval (GetAndAddL mem incr));
 9793   ins_cost(VOLATILE_REF_COST + 1);
 9794   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
 9795   ins_encode %{
 9796     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9797   %}
 9798   ins_pipe(pipe_serial);
 9799 %}
 9800 
 9801 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
 9802   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9803   match(Set dummy (GetAndAddL mem incr));
 9804   ins_cost(VOLATILE_REF_COST);
 9805   format %{ "get_and_addL_acq [$mem], $incr" %}
 9806   ins_encode %{
 9807     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
 9808   %}
 9809   ins_pipe(pipe_serial);
 9810 %}
 9811 
 9812 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
 9813   predicate(needs_acquiring_load_exclusive(n));
 9814   match(Set newval (GetAndAddI mem incr));
 9815   ins_cost(VOLATILE_REF_COST + 1);
 9816   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
 9817   ins_encode %{
 9818     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9819   %}
 9820   ins_pipe(pipe_serial);
 9821 %}
 9822 
 9823 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
 9824   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9825   match(Set dummy (GetAndAddI mem incr));
 9826   ins_cost(VOLATILE_REF_COST);
 9827   format %{ "get_and_addI_acq [$mem], $incr" %}
 9828   ins_encode %{
 9829     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
 9830   %}
 9831   ins_pipe(pipe_serial);
 9832 %}
 9833 
 9834 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
 9835   predicate(needs_acquiring_load_exclusive(n));
 9836   match(Set newval (GetAndAddI mem incr));
 9837   ins_cost(VOLATILE_REF_COST + 1);
 9838   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
 9839   ins_encode %{
 9840     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9841   %}
 9842   ins_pipe(pipe_serial);
 9843 %}
 9844 
 9845 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
 9846   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9847   match(Set dummy (GetAndAddI mem incr));
 9848   ins_cost(VOLATILE_REF_COST);
 9849   format %{ "get_and_addI_acq [$mem], $incr" %}
 9850   ins_encode %{
 9851     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
 9852   %}
 9853   ins_pipe(pipe_serial);
 9854 %}
 9855 
 9856 // Manifest a CmpL result in an integer register.
 9857 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9858 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
 9859 %{
 9860   match(Set dst (CmpL3 src1 src2));
 9861   effect(KILL flags);
 9862 
 9863   ins_cost(INSN_COST * 6);
 9864   format %{
 9865       "cmp $src1, $src2"
 9866       "csetw $dst, ne"
 9867       "cnegw $dst, lt"
 9868   %}
 9869   // format %{ "CmpL3 $dst, $src1, $src2" %}
 9870   ins_encode %{
 9871     __ cmp($src1$$Register, $src2$$Register);
 9872     __ csetw($dst$$Register, Assembler::NE);
 9873     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 9874   %}
 9875 
 9876   ins_pipe(pipe_class_default);
 9877 %}
 9878 
 9879 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
 9880 %{
 9881   match(Set dst (CmpL3 src1 src2));
 9882   effect(KILL flags);
 9883 
 9884   ins_cost(INSN_COST * 6);
 9885   format %{
 9886       "cmp $src1, $src2"
 9887       "csetw $dst, ne"
 9888       "cnegw $dst, lt"
 9889   %}
 9890   ins_encode %{
 9891     int32_t con = (int32_t)$src2$$constant;
 9892      if (con < 0) {
 9893       __ adds(zr, $src1$$Register, -con);
 9894     } else {
 9895       __ subs(zr, $src1$$Register, con);
 9896     }
 9897     __ csetw($dst$$Register, Assembler::NE);
 9898     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 9899   %}
 9900 
 9901   ins_pipe(pipe_class_default);
 9902 %}
 9903 
 9904 // ============================================================================
 9905 // Conditional Move Instructions
 9906 
 9907 // n.b. we have identical rules for both a signed compare op (cmpOp)
 9908 // and an unsigned compare op (cmpOpU). it would be nice if we could
 9909 // define an op class which merged both inputs and use it to type the
 9910 // argument to a single rule. unfortunatelyt his fails because the
 9911 // opclass does not live up to the COND_INTER interface of its
 9912 // component operands. When the generic code tries to negate the
 9913 // operand it ends up running the generci Machoper::negate method
 9914 // which throws a ShouldNotHappen. So, we have to provide two flavours
 9915 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
 9916 
 9917 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9918   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 9919 
 9920   ins_cost(INSN_COST * 2);
 9921   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
 9922 
 9923   ins_encode %{
 9924     __ cselw(as_Register($dst$$reg),
 9925              as_Register($src2$$reg),
 9926              as_Register($src1$$reg),
 9927              (Assembler::Condition)$cmp$$cmpcode);
 9928   %}
 9929 
 9930   ins_pipe(icond_reg_reg);
 9931 %}
 9932 
 9933 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9934   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 9935 
 9936   ins_cost(INSN_COST * 2);
 9937   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
 9938 
 9939   ins_encode %{
 9940     __ cselw(as_Register($dst$$reg),
 9941              as_Register($src2$$reg),
 9942              as_Register($src1$$reg),
 9943              (Assembler::Condition)$cmp$$cmpcode);
 9944   %}
 9945 
 9946   ins_pipe(icond_reg_reg);
 9947 %}
 9948 
 9949 // special cases where one arg is zero
 9950 
 9951 // n.b. this is selected in preference to the rule above because it
 9952 // avoids loading constant 0 into a source register
 9953 
 9954 // TODO
 9955 // we ought only to be able to cull one of these variants as the ideal
 9956 // transforms ought always to order the zero consistently (to left/right?)
 9957 
 9958 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
 9959   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
 9960 
 9961   ins_cost(INSN_COST * 2);
 9962   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
 9963 
 9964   ins_encode %{
 9965     __ cselw(as_Register($dst$$reg),
 9966              as_Register($src$$reg),
 9967              zr,
 9968              (Assembler::Condition)$cmp$$cmpcode);
 9969   %}
 9970 
 9971   ins_pipe(icond_reg);
 9972 %}
 9973 
 9974 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
 9975   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
 9976 
 9977   ins_cost(INSN_COST * 2);
 9978   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
 9979 
 9980   ins_encode %{
 9981     __ cselw(as_Register($dst$$reg),
 9982              as_Register($src$$reg),
 9983              zr,
 9984              (Assembler::Condition)$cmp$$cmpcode);
 9985   %}
 9986 
 9987   ins_pipe(icond_reg);
 9988 %}
 9989 
 9990 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 9991   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
 9992 
 9993   ins_cost(INSN_COST * 2);
 9994   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
 9995 
 9996   ins_encode %{
 9997     __ cselw(as_Register($dst$$reg),
 9998              zr,
 9999              as_Register($src$$reg),
10000              (Assembler::Condition)$cmp$$cmpcode);
10001   %}
10002 
10003   ins_pipe(icond_reg);
10004 %}
10005 
10006 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
10007   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
10008 
10009   ins_cost(INSN_COST * 2);
10010   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
10011 
10012   ins_encode %{
10013     __ cselw(as_Register($dst$$reg),
10014              zr,
10015              as_Register($src$$reg),
10016              (Assembler::Condition)$cmp$$cmpcode);
10017   %}
10018 
10019   ins_pipe(icond_reg);
10020 %}
10021 
10022 // special case for creating a boolean 0 or 1
10023 
10024 // n.b. this is selected in preference to the rule above because it
10025 // avoids loading constants 0 and 1 into a source register
10026 
10027 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
10028   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
10029 
10030   ins_cost(INSN_COST * 2);
10031   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
10032 
10033   ins_encode %{
10034     // equivalently
10035     // cset(as_Register($dst$$reg),
10036     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
10037     __ csincw(as_Register($dst$$reg),
10038              zr,
10039              zr,
10040              (Assembler::Condition)$cmp$$cmpcode);
10041   %}
10042 
10043   ins_pipe(icond_none);
10044 %}
10045 
10046 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
10047   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
10048 
10049   ins_cost(INSN_COST * 2);
10050   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
10051 
10052   ins_encode %{
10053     // equivalently
10054     // cset(as_Register($dst$$reg),
10055     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
10056     __ csincw(as_Register($dst$$reg),
10057              zr,
10058              zr,
10059              (Assembler::Condition)$cmp$$cmpcode);
10060   %}
10061 
10062   ins_pipe(icond_none);
10063 %}
10064 
10065 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
10066   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
10067 
10068   ins_cost(INSN_COST * 2);
10069   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
10070 
10071   ins_encode %{
10072     __ csel(as_Register($dst$$reg),
10073             as_Register($src2$$reg),
10074             as_Register($src1$$reg),
10075             (Assembler::Condition)$cmp$$cmpcode);
10076   %}
10077 
10078   ins_pipe(icond_reg_reg);
10079 %}
10080 
10081 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
10082   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
10083 
10084   ins_cost(INSN_COST * 2);
10085   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
10086 
10087   ins_encode %{
10088     __ csel(as_Register($dst$$reg),
10089             as_Register($src2$$reg),
10090             as_Register($src1$$reg),
10091             (Assembler::Condition)$cmp$$cmpcode);
10092   %}
10093 
10094   ins_pipe(icond_reg_reg);
10095 %}
10096 
10097 // special cases where one arg is zero
10098 
10099 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
10100   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
10101 
10102   ins_cost(INSN_COST * 2);
10103   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
10104 
10105   ins_encode %{
10106     __ csel(as_Register($dst$$reg),
10107             zr,
10108             as_Register($src$$reg),
10109             (Assembler::Condition)$cmp$$cmpcode);
10110   %}
10111 
10112   ins_pipe(icond_reg);
10113 %}
10114 
10115 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
10116   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
10117 
10118   ins_cost(INSN_COST * 2);
10119   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
10120 
10121   ins_encode %{
10122     __ csel(as_Register($dst$$reg),
10123             zr,
10124             as_Register($src$$reg),
10125             (Assembler::Condition)$cmp$$cmpcode);
10126   %}
10127 
10128   ins_pipe(icond_reg);
10129 %}
10130 
10131 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
10132   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
10133 
10134   ins_cost(INSN_COST * 2);
10135   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
10136 
10137   ins_encode %{
10138     __ csel(as_Register($dst$$reg),
10139             as_Register($src$$reg),
10140             zr,
10141             (Assembler::Condition)$cmp$$cmpcode);
10142   %}
10143 
10144   ins_pipe(icond_reg);
10145 %}
10146 
10147 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
10148   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
10149 
10150   ins_cost(INSN_COST * 2);
10151   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
10152 
10153   ins_encode %{
10154     __ csel(as_Register($dst$$reg),
10155             as_Register($src$$reg),
10156             zr,
10157             (Assembler::Condition)$cmp$$cmpcode);
10158   %}
10159 
10160   ins_pipe(icond_reg);
10161 %}
10162 
10163 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10164   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10165 
10166   ins_cost(INSN_COST * 2);
10167   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
10168 
10169   ins_encode %{
10170     __ csel(as_Register($dst$$reg),
10171             as_Register($src2$$reg),
10172             as_Register($src1$$reg),
10173             (Assembler::Condition)$cmp$$cmpcode);
10174   %}
10175 
10176   ins_pipe(icond_reg_reg);
10177 %}
10178 
10179 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10180   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10181 
10182   ins_cost(INSN_COST * 2);
10183   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
10184 
10185   ins_encode %{
10186     __ csel(as_Register($dst$$reg),
10187             as_Register($src2$$reg),
10188             as_Register($src1$$reg),
10189             (Assembler::Condition)$cmp$$cmpcode);
10190   %}
10191 
10192   ins_pipe(icond_reg_reg);
10193 %}
10194 
10195 // special cases where one arg is zero
10196 
10197 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10198   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10199 
10200   ins_cost(INSN_COST * 2);
10201   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
10202 
10203   ins_encode %{
10204     __ csel(as_Register($dst$$reg),
10205             zr,
10206             as_Register($src$$reg),
10207             (Assembler::Condition)$cmp$$cmpcode);
10208   %}
10209 
10210   ins_pipe(icond_reg);
10211 %}
10212 
10213 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10214   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10215 
10216   ins_cost(INSN_COST * 2);
10217   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
10218 
10219   ins_encode %{
10220     __ csel(as_Register($dst$$reg),
10221             zr,
10222             as_Register($src$$reg),
10223             (Assembler::Condition)$cmp$$cmpcode);
10224   %}
10225 
10226   ins_pipe(icond_reg);
10227 %}
10228 
10229 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10230   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10231 
10232   ins_cost(INSN_COST * 2);
10233   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
10234 
10235   ins_encode %{
10236     __ csel(as_Register($dst$$reg),
10237             as_Register($src$$reg),
10238             zr,
10239             (Assembler::Condition)$cmp$$cmpcode);
10240   %}
10241 
10242   ins_pipe(icond_reg);
10243 %}
10244 
10245 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10246   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10247 
10248   ins_cost(INSN_COST * 2);
10249   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
10250 
10251   ins_encode %{
10252     __ csel(as_Register($dst$$reg),
10253             as_Register($src$$reg),
10254             zr,
10255             (Assembler::Condition)$cmp$$cmpcode);
10256   %}
10257 
10258   ins_pipe(icond_reg);
10259 %}
10260 
10261 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10262   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10263 
10264   ins_cost(INSN_COST * 2);
10265   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10266 
10267   ins_encode %{
10268     __ cselw(as_Register($dst$$reg),
10269              as_Register($src2$$reg),
10270              as_Register($src1$$reg),
10271              (Assembler::Condition)$cmp$$cmpcode);
10272   %}
10273 
10274   ins_pipe(icond_reg_reg);
10275 %}
10276 
10277 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10278   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10279 
10280   ins_cost(INSN_COST * 2);
10281   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10282 
10283   ins_encode %{
10284     __ cselw(as_Register($dst$$reg),
10285              as_Register($src2$$reg),
10286              as_Register($src1$$reg),
10287              (Assembler::Condition)$cmp$$cmpcode);
10288   %}
10289 
10290   ins_pipe(icond_reg_reg);
10291 %}
10292 
10293 // special cases where one arg is zero
10294 
10295 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10296   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10297 
10298   ins_cost(INSN_COST * 2);
10299   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
10300 
10301   ins_encode %{
10302     __ cselw(as_Register($dst$$reg),
10303              zr,
10304              as_Register($src$$reg),
10305              (Assembler::Condition)$cmp$$cmpcode);
10306   %}
10307 
10308   ins_pipe(icond_reg);
10309 %}
10310 
10311 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10312   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10313 
10314   ins_cost(INSN_COST * 2);
10315   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
10316 
10317   ins_encode %{
10318     __ cselw(as_Register($dst$$reg),
10319              zr,
10320              as_Register($src$$reg),
10321              (Assembler::Condition)$cmp$$cmpcode);
10322   %}
10323 
10324   ins_pipe(icond_reg);
10325 %}
10326 
10327 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10328   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10329 
10330   ins_cost(INSN_COST * 2);
10331   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
10332 
10333   ins_encode %{
10334     __ cselw(as_Register($dst$$reg),
10335              as_Register($src$$reg),
10336              zr,
10337              (Assembler::Condition)$cmp$$cmpcode);
10338   %}
10339 
10340   ins_pipe(icond_reg);
10341 %}
10342 
10343 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10344   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10345 
10346   ins_cost(INSN_COST * 2);
10347   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
10348 
10349   ins_encode %{
10350     __ cselw(as_Register($dst$$reg),
10351              as_Register($src$$reg),
10352              zr,
10353              (Assembler::Condition)$cmp$$cmpcode);
10354   %}
10355 
10356   ins_pipe(icond_reg);
10357 %}
10358 
10359 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
10360 %{
10361   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10362 
10363   ins_cost(INSN_COST * 3);
10364 
10365   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10366   ins_encode %{
10367     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10368     __ fcsels(as_FloatRegister($dst$$reg),
10369               as_FloatRegister($src2$$reg),
10370               as_FloatRegister($src1$$reg),
10371               cond);
10372   %}
10373 
10374   ins_pipe(fp_cond_reg_reg_s);
10375 %}
10376 
10377 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
10378 %{
10379   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10380 
10381   ins_cost(INSN_COST * 3);
10382 
10383   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10384   ins_encode %{
10385     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10386     __ fcsels(as_FloatRegister($dst$$reg),
10387               as_FloatRegister($src2$$reg),
10388               as_FloatRegister($src1$$reg),
10389               cond);
10390   %}
10391 
10392   ins_pipe(fp_cond_reg_reg_s);
10393 %}
10394 
10395 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
10396 %{
10397   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10398 
10399   ins_cost(INSN_COST * 3);
10400 
10401   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10402   ins_encode %{
10403     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10404     __ fcseld(as_FloatRegister($dst$$reg),
10405               as_FloatRegister($src2$$reg),
10406               as_FloatRegister($src1$$reg),
10407               cond);
10408   %}
10409 
10410   ins_pipe(fp_cond_reg_reg_d);
10411 %}
10412 
10413 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
10414 %{
10415   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10416 
10417   ins_cost(INSN_COST * 3);
10418 
10419   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10420   ins_encode %{
10421     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10422     __ fcseld(as_FloatRegister($dst$$reg),
10423               as_FloatRegister($src2$$reg),
10424               as_FloatRegister($src1$$reg),
10425               cond);
10426   %}
10427 
10428   ins_pipe(fp_cond_reg_reg_d);
10429 %}
10430 
10431 // ============================================================================
10432 // Arithmetic Instructions
10433 //
10434 
10435 // Integer Addition
10436 
10437 // TODO
10438 // these currently employ operations which do not set CR and hence are
10439 // not flagged as killing CR but we would like to isolate the cases
10440 // where we want to set flags from those where we don't. need to work
10441 // out how to do that.
10442 
10443 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10444   match(Set dst (AddI src1 src2));
10445 
10446   ins_cost(INSN_COST);
10447   format %{ "addw  $dst, $src1, $src2" %}
10448 
10449   ins_encode %{
10450     __ addw(as_Register($dst$$reg),
10451             as_Register($src1$$reg),
10452             as_Register($src2$$reg));
10453   %}
10454 
10455   ins_pipe(ialu_reg_reg);
10456 %}
10457 
10458 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10459   match(Set dst (AddI src1 src2));
10460 
10461   ins_cost(INSN_COST);
10462   format %{ "addw $dst, $src1, $src2" %}
10463 
10464   // use opcode to indicate that this is an add not a sub
10465   opcode(0x0);
10466 
10467   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10468 
10469   ins_pipe(ialu_reg_imm);
10470 %}
10471 
10472 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10473   match(Set dst (AddI (ConvL2I src1) src2));
10474 
10475   ins_cost(INSN_COST);
10476   format %{ "addw $dst, $src1, $src2" %}
10477 
10478   // use opcode to indicate that this is an add not a sub
10479   opcode(0x0);
10480 
10481   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10482 
10483   ins_pipe(ialu_reg_imm);
10484 %}
10485 
10486 // Pointer Addition
10487 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10488   match(Set dst (AddP src1 src2));
10489 
10490   ins_cost(INSN_COST);
10491   format %{ "add $dst, $src1, $src2\t# ptr" %}
10492 
10493   ins_encode %{
10494     __ add(as_Register($dst$$reg),
10495            as_Register($src1$$reg),
10496            as_Register($src2$$reg));
10497   %}
10498 
10499   ins_pipe(ialu_reg_reg);
10500 %}
10501 
10502 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10503   match(Set dst (AddP src1 (ConvI2L src2)));
10504 
10505   ins_cost(1.9 * INSN_COST);
10506   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10507 
10508   ins_encode %{
10509     __ add(as_Register($dst$$reg),
10510            as_Register($src1$$reg),
10511            as_Register($src2$$reg), ext::sxtw);
10512   %}
10513 
10514   ins_pipe(ialu_reg_reg);
10515 %}
10516 
10517 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10518   match(Set dst (AddP src1 (LShiftL src2 scale)));
10519 
10520   ins_cost(1.9 * INSN_COST);
10521   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10522 
10523   ins_encode %{
10524     __ lea(as_Register($dst$$reg),
10525            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10526                    Address::lsl($scale$$constant)));
10527   %}
10528 
10529   ins_pipe(ialu_reg_reg_shift);
10530 %}
10531 
10532 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10533   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10534 
10535   ins_cost(1.9 * INSN_COST);
10536   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10537 
10538   ins_encode %{
10539     __ lea(as_Register($dst$$reg),
10540            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10541                    Address::sxtw($scale$$constant)));
10542   %}
10543 
10544   ins_pipe(ialu_reg_reg_shift);
10545 %}
10546 
10547 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10548   match(Set dst (LShiftL (ConvI2L src) scale));
10549 
10550   ins_cost(INSN_COST);
10551   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10552 
10553   ins_encode %{
10554     __ sbfiz(as_Register($dst$$reg),
10555           as_Register($src$$reg),
10556           $scale$$constant & 63, MIN2(32, (int)((-$scale$$constant) & 63)));
10557   %}
10558 
10559   ins_pipe(ialu_reg_shift);
10560 %}
10561 
10562 // Pointer Immediate Addition
10563 // n.b. this needs to be more expensive than using an indirect memory
10564 // operand
10565 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10566   match(Set dst (AddP src1 src2));
10567 
10568   ins_cost(INSN_COST);
10569   format %{ "add $dst, $src1, $src2\t# ptr" %}
10570 
10571   // use opcode to indicate that this is an add not a sub
10572   opcode(0x0);
10573 
10574   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10575 
10576   ins_pipe(ialu_reg_imm);
10577 %}
10578 
10579 // Long Addition
10580 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10581 
10582   match(Set dst (AddL src1 src2));
10583 
10584   ins_cost(INSN_COST);
10585   format %{ "add  $dst, $src1, $src2" %}
10586 
10587   ins_encode %{
10588     __ add(as_Register($dst$$reg),
10589            as_Register($src1$$reg),
10590            as_Register($src2$$reg));
10591   %}
10592 
10593   ins_pipe(ialu_reg_reg);
10594 %}
10595 
10596 // No constant pool entries requiredLong Immediate Addition.
10597 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10598   match(Set dst (AddL src1 src2));
10599 
10600   ins_cost(INSN_COST);
10601   format %{ "add $dst, $src1, $src2" %}
10602 
10603   // use opcode to indicate that this is an add not a sub
10604   opcode(0x0);
10605 
10606   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10607 
10608   ins_pipe(ialu_reg_imm);
10609 %}
10610 
10611 // Integer Subtraction
10612 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10613   match(Set dst (SubI src1 src2));
10614 
10615   ins_cost(INSN_COST);
10616   format %{ "subw  $dst, $src1, $src2" %}
10617 
10618   ins_encode %{
10619     __ subw(as_Register($dst$$reg),
10620             as_Register($src1$$reg),
10621             as_Register($src2$$reg));
10622   %}
10623 
10624   ins_pipe(ialu_reg_reg);
10625 %}
10626 
10627 // Immediate Subtraction
10628 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10629   match(Set dst (SubI src1 src2));
10630 
10631   ins_cost(INSN_COST);
10632   format %{ "subw $dst, $src1, $src2" %}
10633 
10634   // use opcode to indicate that this is a sub not an add
10635   opcode(0x1);
10636 
10637   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10638 
10639   ins_pipe(ialu_reg_imm);
10640 %}
10641 
10642 // Long Subtraction
10643 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10644 
10645   match(Set dst (SubL src1 src2));
10646 
10647   ins_cost(INSN_COST);
10648   format %{ "sub  $dst, $src1, $src2" %}
10649 
10650   ins_encode %{
10651     __ sub(as_Register($dst$$reg),
10652            as_Register($src1$$reg),
10653            as_Register($src2$$reg));
10654   %}
10655 
10656   ins_pipe(ialu_reg_reg);
10657 %}
10658 
10659 // No constant pool entries requiredLong Immediate Subtraction.
10660 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10661   match(Set dst (SubL src1 src2));
10662 
10663   ins_cost(INSN_COST);
10664   format %{ "sub$dst, $src1, $src2" %}
10665 
10666   // use opcode to indicate that this is a sub not an add
10667   opcode(0x1);
10668 
10669   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10670 
10671   ins_pipe(ialu_reg_imm);
10672 %}
10673 
10674 // Integer Negation (special case for sub)
10675 
10676 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10677   match(Set dst (SubI zero src));
10678 
10679   ins_cost(INSN_COST);
10680   format %{ "negw $dst, $src\t# int" %}
10681 
10682   ins_encode %{
10683     __ negw(as_Register($dst$$reg),
10684             as_Register($src$$reg));
10685   %}
10686 
10687   ins_pipe(ialu_reg);
10688 %}
10689 
10690 // Long Negation
10691 
10692 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
10693   match(Set dst (SubL zero src));
10694 
10695   ins_cost(INSN_COST);
10696   format %{ "neg $dst, $src\t# long" %}
10697 
10698   ins_encode %{
10699     __ neg(as_Register($dst$$reg),
10700            as_Register($src$$reg));
10701   %}
10702 
10703   ins_pipe(ialu_reg);
10704 %}
10705 
10706 // Integer Multiply
10707 
10708 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10709   match(Set dst (MulI src1 src2));
10710 
10711   ins_cost(INSN_COST * 3);
10712   format %{ "mulw  $dst, $src1, $src2" %}
10713 
10714   ins_encode %{
10715     __ mulw(as_Register($dst$$reg),
10716             as_Register($src1$$reg),
10717             as_Register($src2$$reg));
10718   %}
10719 
10720   ins_pipe(imul_reg_reg);
10721 %}
10722 
10723 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10724   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10725 
10726   ins_cost(INSN_COST * 3);
10727   format %{ "smull  $dst, $src1, $src2" %}
10728 
10729   ins_encode %{
10730     __ smull(as_Register($dst$$reg),
10731              as_Register($src1$$reg),
10732              as_Register($src2$$reg));
10733   %}
10734 
10735   ins_pipe(imul_reg_reg);
10736 %}
10737 
10738 // Long Multiply
10739 
10740 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10741   match(Set dst (MulL src1 src2));
10742 
10743   ins_cost(INSN_COST * 5);
10744   format %{ "mul  $dst, $src1, $src2" %}
10745 
10746   ins_encode %{
10747     __ mul(as_Register($dst$$reg),
10748            as_Register($src1$$reg),
10749            as_Register($src2$$reg));
10750   %}
10751 
10752   ins_pipe(lmul_reg_reg);
10753 %}
10754 
10755 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10756 %{
10757   match(Set dst (MulHiL src1 src2));
10758 
10759   ins_cost(INSN_COST * 7);
10760   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
10761 
10762   ins_encode %{
10763     __ smulh(as_Register($dst$$reg),
10764              as_Register($src1$$reg),
10765              as_Register($src2$$reg));
10766   %}
10767 
10768   ins_pipe(lmul_reg_reg);
10769 %}
10770 
10771 // Combined Integer Multiply & Add/Sub
10772 
10773 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10774   match(Set dst (AddI src3 (MulI src1 src2)));
10775 
10776   ins_cost(INSN_COST * 3);
10777   format %{ "madd  $dst, $src1, $src2, $src3" %}
10778 
10779   ins_encode %{
10780     __ maddw(as_Register($dst$$reg),
10781              as_Register($src1$$reg),
10782              as_Register($src2$$reg),
10783              as_Register($src3$$reg));
10784   %}
10785 
10786   ins_pipe(imac_reg_reg);
10787 %}
10788 
10789 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10790   match(Set dst (SubI src3 (MulI src1 src2)));
10791 
10792   ins_cost(INSN_COST * 3);
10793   format %{ "msub  $dst, $src1, $src2, $src3" %}
10794 
10795   ins_encode %{
10796     __ msubw(as_Register($dst$$reg),
10797              as_Register($src1$$reg),
10798              as_Register($src2$$reg),
10799              as_Register($src3$$reg));
10800   %}
10801 
10802   ins_pipe(imac_reg_reg);
10803 %}
10804 
10805 // Combined Integer Multiply & Neg
10806 
10807 instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
10808   match(Set dst (MulI (SubI zero src1) src2));
10809   match(Set dst (MulI src1 (SubI zero src2)));
10810 
10811   ins_cost(INSN_COST * 3);
10812   format %{ "mneg  $dst, $src1, $src2" %}
10813 
10814   ins_encode %{
10815     __ mnegw(as_Register($dst$$reg),
10816              as_Register($src1$$reg),
10817              as_Register($src2$$reg));
10818   %}
10819 
10820   ins_pipe(imac_reg_reg);
10821 %}
10822 
10823 // Combined Long Multiply & Add/Sub
10824 
10825 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10826   match(Set dst (AddL src3 (MulL src1 src2)));
10827 
10828   ins_cost(INSN_COST * 5);
10829   format %{ "madd  $dst, $src1, $src2, $src3" %}
10830 
10831   ins_encode %{
10832     __ madd(as_Register($dst$$reg),
10833             as_Register($src1$$reg),
10834             as_Register($src2$$reg),
10835             as_Register($src3$$reg));
10836   %}
10837 
10838   ins_pipe(lmac_reg_reg);
10839 %}
10840 
10841 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10842   match(Set dst (SubL src3 (MulL src1 src2)));
10843 
10844   ins_cost(INSN_COST * 5);
10845   format %{ "msub  $dst, $src1, $src2, $src3" %}
10846 
10847   ins_encode %{
10848     __ msub(as_Register($dst$$reg),
10849             as_Register($src1$$reg),
10850             as_Register($src2$$reg),
10851             as_Register($src3$$reg));
10852   %}
10853 
10854   ins_pipe(lmac_reg_reg);
10855 %}
10856 
10857 // Combined Long Multiply & Neg
10858 
10859 instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
10860   match(Set dst (MulL (SubL zero src1) src2));
10861   match(Set dst (MulL src1 (SubL zero src2)));
10862 
10863   ins_cost(INSN_COST * 5);
10864   format %{ "mneg  $dst, $src1, $src2" %}
10865 
10866   ins_encode %{
10867     __ mneg(as_Register($dst$$reg),
10868             as_Register($src1$$reg),
10869             as_Register($src2$$reg));
10870   %}
10871 
10872   ins_pipe(lmac_reg_reg);
10873 %}
10874 
10875 // Combine Integer Signed Multiply & Add/Sub/Neg Long
10876 
10877 instruct smaddL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10878   match(Set dst (AddL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10879 
10880   ins_cost(INSN_COST * 3);
10881   format %{ "smaddl  $dst, $src1, $src2, $src3" %}
10882 
10883   ins_encode %{
10884     __ smaddl(as_Register($dst$$reg),
10885               as_Register($src1$$reg),
10886               as_Register($src2$$reg),
10887               as_Register($src3$$reg));
10888   %}
10889 
10890   ins_pipe(imac_reg_reg);
10891 %}
10892 
10893 instruct smsubL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10894   match(Set dst (SubL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10895 
10896   ins_cost(INSN_COST * 3);
10897   format %{ "smsubl  $dst, $src1, $src2, $src3" %}
10898 
10899   ins_encode %{
10900     __ smsubl(as_Register($dst$$reg),
10901               as_Register($src1$$reg),
10902               as_Register($src2$$reg),
10903               as_Register($src3$$reg));
10904   %}
10905 
10906   ins_pipe(imac_reg_reg);
10907 %}
10908 
10909 instruct smnegL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, immL0 zero) %{
10910   match(Set dst (MulL (SubL zero (ConvI2L src1)) (ConvI2L src2)));
10911   match(Set dst (MulL (ConvI2L src1) (SubL zero (ConvI2L src2))));
10912 
10913   ins_cost(INSN_COST * 3);
10914   format %{ "smnegl  $dst, $src1, $src2" %}
10915 
10916   ins_encode %{
10917     __ smnegl(as_Register($dst$$reg),
10918               as_Register($src1$$reg),
10919               as_Register($src2$$reg));
10920   %}
10921 
10922   ins_pipe(imac_reg_reg);
10923 %}
10924 
10925 // Combined Multiply-Add Shorts into Integer (dst = src1 * src2 + src3 * src4)
10926 
10927 instruct muladdS2I(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3, iRegIorL2I src4) %{
10928   match(Set dst (MulAddS2I (Binary src1 src2) (Binary src3 src4)));
10929 
10930   ins_cost(INSN_COST * 5);
10931   format %{ "mulw  rscratch1, $src1, $src2\n\t"
10932             "maddw $dst, $src3, $src4, rscratch1" %}
10933 
10934   ins_encode %{
10935     __ mulw(rscratch1, as_Register($src1$$reg), as_Register($src2$$reg));
10936     __ maddw(as_Register($dst$$reg), as_Register($src3$$reg), as_Register($src4$$reg), rscratch1); %}
10937 
10938   ins_pipe(imac_reg_reg);
10939 %}
10940 
10941 // Integer Divide
10942 
10943 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10944   match(Set dst (DivI src1 src2));
10945 
10946   ins_cost(INSN_COST * 19);
10947   format %{ "sdivw  $dst, $src1, $src2" %}
10948 
10949   ins_encode(aarch64_enc_divw(dst, src1, src2));
10950   ins_pipe(idiv_reg_reg);
10951 %}
10952 
10953 // Long Divide
10954 
10955 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10956   match(Set dst (DivL src1 src2));
10957 
10958   ins_cost(INSN_COST * 35);
10959   format %{ "sdiv   $dst, $src1, $src2" %}
10960 
10961   ins_encode(aarch64_enc_div(dst, src1, src2));
10962   ins_pipe(ldiv_reg_reg);
10963 %}
10964 
10965 // Integer Remainder
10966 
10967 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10968   match(Set dst (ModI src1 src2));
10969 
10970   ins_cost(INSN_COST * 22);
10971   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10972             "msubw($dst, rscratch1, $src2, $src1" %}
10973 
10974   ins_encode(aarch64_enc_modw(dst, src1, src2));
10975   ins_pipe(idiv_reg_reg);
10976 %}
10977 
10978 // Long Remainder
10979 
10980 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10981   match(Set dst (ModL src1 src2));
10982 
10983   ins_cost(INSN_COST * 38);
10984   format %{ "sdiv   rscratch1, $src1, $src2\n"
10985             "msub($dst, rscratch1, $src2, $src1" %}
10986 
10987   ins_encode(aarch64_enc_mod(dst, src1, src2));
10988   ins_pipe(ldiv_reg_reg);
10989 %}
10990 
10991 // Integer Shifts
10992 
10993 // Shift Left Register
10994 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10995   match(Set dst (LShiftI src1 src2));
10996 
10997   ins_cost(INSN_COST * 2);
10998   format %{ "lslvw  $dst, $src1, $src2" %}
10999 
11000   ins_encode %{
11001     __ lslvw(as_Register($dst$$reg),
11002              as_Register($src1$$reg),
11003              as_Register($src2$$reg));
11004   %}
11005 
11006   ins_pipe(ialu_reg_reg_vshift);
11007 %}
11008 
11009 // Shift Left Immediate
11010 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11011   match(Set dst (LShiftI src1 src2));
11012 
11013   ins_cost(INSN_COST);
11014   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
11015 
11016   ins_encode %{
11017     __ lslw(as_Register($dst$$reg),
11018             as_Register($src1$$reg),
11019             $src2$$constant & 0x1f);
11020   %}
11021 
11022   ins_pipe(ialu_reg_shift);
11023 %}
11024 
11025 // Shift Right Logical Register
11026 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11027   match(Set dst (URShiftI src1 src2));
11028 
11029   ins_cost(INSN_COST * 2);
11030   format %{ "lsrvw  $dst, $src1, $src2" %}
11031 
11032   ins_encode %{
11033     __ lsrvw(as_Register($dst$$reg),
11034              as_Register($src1$$reg),
11035              as_Register($src2$$reg));
11036   %}
11037 
11038   ins_pipe(ialu_reg_reg_vshift);
11039 %}
11040 
11041 // Shift Right Logical Immediate
11042 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11043   match(Set dst (URShiftI src1 src2));
11044 
11045   ins_cost(INSN_COST);
11046   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
11047 
11048   ins_encode %{
11049     __ lsrw(as_Register($dst$$reg),
11050             as_Register($src1$$reg),
11051             $src2$$constant & 0x1f);
11052   %}
11053 
11054   ins_pipe(ialu_reg_shift);
11055 %}
11056 
11057 // Shift Right Arithmetic Register
11058 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11059   match(Set dst (RShiftI src1 src2));
11060 
11061   ins_cost(INSN_COST * 2);
11062   format %{ "asrvw  $dst, $src1, $src2" %}
11063 
11064   ins_encode %{
11065     __ asrvw(as_Register($dst$$reg),
11066              as_Register($src1$$reg),
11067              as_Register($src2$$reg));
11068   %}
11069 
11070   ins_pipe(ialu_reg_reg_vshift);
11071 %}
11072 
11073 // Shift Right Arithmetic Immediate
11074 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11075   match(Set dst (RShiftI src1 src2));
11076 
11077   ins_cost(INSN_COST);
11078   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
11079 
11080   ins_encode %{
11081     __ asrw(as_Register($dst$$reg),
11082             as_Register($src1$$reg),
11083             $src2$$constant & 0x1f);
11084   %}
11085 
11086   ins_pipe(ialu_reg_shift);
11087 %}
11088 
11089 // Combined Int Mask and Right Shift (using UBFM)
11090 // TODO
11091 
11092 // Long Shifts
11093 
11094 // Shift Left Register
11095 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11096   match(Set dst (LShiftL src1 src2));
11097 
11098   ins_cost(INSN_COST * 2);
11099   format %{ "lslv  $dst, $src1, $src2" %}
11100 
11101   ins_encode %{
11102     __ lslv(as_Register($dst$$reg),
11103             as_Register($src1$$reg),
11104             as_Register($src2$$reg));
11105   %}
11106 
11107   ins_pipe(ialu_reg_reg_vshift);
11108 %}
11109 
11110 // Shift Left Immediate
11111 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11112   match(Set dst (LShiftL src1 src2));
11113 
11114   ins_cost(INSN_COST);
11115   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
11116 
11117   ins_encode %{
11118     __ lsl(as_Register($dst$$reg),
11119             as_Register($src1$$reg),
11120             $src2$$constant & 0x3f);
11121   %}
11122 
11123   ins_pipe(ialu_reg_shift);
11124 %}
11125 
11126 // Shift Right Logical Register
11127 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11128   match(Set dst (URShiftL src1 src2));
11129 
11130   ins_cost(INSN_COST * 2);
11131   format %{ "lsrv  $dst, $src1, $src2" %}
11132 
11133   ins_encode %{
11134     __ lsrv(as_Register($dst$$reg),
11135             as_Register($src1$$reg),
11136             as_Register($src2$$reg));
11137   %}
11138 
11139   ins_pipe(ialu_reg_reg_vshift);
11140 %}
11141 
11142 // Shift Right Logical Immediate
11143 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11144   match(Set dst (URShiftL src1 src2));
11145 
11146   ins_cost(INSN_COST);
11147   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
11148 
11149   ins_encode %{
11150     __ lsr(as_Register($dst$$reg),
11151            as_Register($src1$$reg),
11152            $src2$$constant & 0x3f);
11153   %}
11154 
11155   ins_pipe(ialu_reg_shift);
11156 %}
11157 
11158 // A special-case pattern for card table stores.
11159 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
11160   match(Set dst (URShiftL (CastP2X src1) src2));
11161 
11162   ins_cost(INSN_COST);
11163   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
11164 
11165   ins_encode %{
11166     __ lsr(as_Register($dst$$reg),
11167            as_Register($src1$$reg),
11168            $src2$$constant & 0x3f);
11169   %}
11170 
11171   ins_pipe(ialu_reg_shift);
11172 %}
11173 
11174 // Shift Right Arithmetic Register
11175 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11176   match(Set dst (RShiftL src1 src2));
11177 
11178   ins_cost(INSN_COST * 2);
11179   format %{ "asrv  $dst, $src1, $src2" %}
11180 
11181   ins_encode %{
11182     __ asrv(as_Register($dst$$reg),
11183             as_Register($src1$$reg),
11184             as_Register($src2$$reg));
11185   %}
11186 
11187   ins_pipe(ialu_reg_reg_vshift);
11188 %}
11189 
11190 // Shift Right Arithmetic Immediate
11191 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11192   match(Set dst (RShiftL src1 src2));
11193 
11194   ins_cost(INSN_COST);
11195   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
11196 
11197   ins_encode %{
11198     __ asr(as_Register($dst$$reg),
11199            as_Register($src1$$reg),
11200            $src2$$constant & 0x3f);
11201   %}
11202 
11203   ins_pipe(ialu_reg_shift);
11204 %}
11205 
11206 // BEGIN This section of the file is automatically generated. Do not edit --------------
11207 
11208 
11209 // This pattern is automatically generated from aarch64_ad.m4
11210 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11211 instruct regL_not_reg(iRegLNoSp dst,
11212                          iRegL src1, immL_M1 m1,
11213                          rFlagsReg cr) %{
11214   match(Set dst (XorL src1 m1));
11215   ins_cost(INSN_COST);
11216   format %{ "eon  $dst, $src1, zr" %}
11217 
11218   ins_encode %{
11219     __ eon(as_Register($dst$$reg),
11220               as_Register($src1$$reg),
11221               zr,
11222               Assembler::LSL, 0);
11223   %}
11224 
11225   ins_pipe(ialu_reg);
11226 %}
11227 
11228 // This pattern is automatically generated from aarch64_ad.m4
11229 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11230 instruct regI_not_reg(iRegINoSp dst,
11231                          iRegIorL2I src1, immI_M1 m1,
11232                          rFlagsReg cr) %{
11233   match(Set dst (XorI src1 m1));
11234   ins_cost(INSN_COST);
11235   format %{ "eonw  $dst, $src1, zr" %}
11236 
11237   ins_encode %{
11238     __ eonw(as_Register($dst$$reg),
11239               as_Register($src1$$reg),
11240               zr,
11241               Assembler::LSL, 0);
11242   %}
11243 
11244   ins_pipe(ialu_reg);
11245 %}
11246 
11247 // This pattern is automatically generated from aarch64_ad.m4
11248 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11249 instruct AndI_reg_not_reg(iRegINoSp dst,
11250                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11251                          rFlagsReg cr) %{
11252   match(Set dst (AndI src1 (XorI src2 m1)));
11253   ins_cost(INSN_COST);
11254   format %{ "bicw  $dst, $src1, $src2" %}
11255 
11256   ins_encode %{
11257     __ bicw(as_Register($dst$$reg),
11258               as_Register($src1$$reg),
11259               as_Register($src2$$reg),
11260               Assembler::LSL, 0);
11261   %}
11262 
11263   ins_pipe(ialu_reg_reg);
11264 %}
11265 
11266 // This pattern is automatically generated from aarch64_ad.m4
11267 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11268 instruct AndL_reg_not_reg(iRegLNoSp dst,
11269                          iRegL src1, iRegL src2, immL_M1 m1,
11270                          rFlagsReg cr) %{
11271   match(Set dst (AndL src1 (XorL src2 m1)));
11272   ins_cost(INSN_COST);
11273   format %{ "bic  $dst, $src1, $src2" %}
11274 
11275   ins_encode %{
11276     __ bic(as_Register($dst$$reg),
11277               as_Register($src1$$reg),
11278               as_Register($src2$$reg),
11279               Assembler::LSL, 0);
11280   %}
11281 
11282   ins_pipe(ialu_reg_reg);
11283 %}
11284 
11285 // This pattern is automatically generated from aarch64_ad.m4
11286 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11287 instruct OrI_reg_not_reg(iRegINoSp dst,
11288                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11289                          rFlagsReg cr) %{
11290   match(Set dst (OrI src1 (XorI src2 m1)));
11291   ins_cost(INSN_COST);
11292   format %{ "ornw  $dst, $src1, $src2" %}
11293 
11294   ins_encode %{
11295     __ ornw(as_Register($dst$$reg),
11296               as_Register($src1$$reg),
11297               as_Register($src2$$reg),
11298               Assembler::LSL, 0);
11299   %}
11300 
11301   ins_pipe(ialu_reg_reg);
11302 %}
11303 
11304 // This pattern is automatically generated from aarch64_ad.m4
11305 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11306 instruct OrL_reg_not_reg(iRegLNoSp dst,
11307                          iRegL src1, iRegL src2, immL_M1 m1,
11308                          rFlagsReg cr) %{
11309   match(Set dst (OrL src1 (XorL src2 m1)));
11310   ins_cost(INSN_COST);
11311   format %{ "orn  $dst, $src1, $src2" %}
11312 
11313   ins_encode %{
11314     __ orn(as_Register($dst$$reg),
11315               as_Register($src1$$reg),
11316               as_Register($src2$$reg),
11317               Assembler::LSL, 0);
11318   %}
11319 
11320   ins_pipe(ialu_reg_reg);
11321 %}
11322 
11323 // This pattern is automatically generated from aarch64_ad.m4
11324 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11325 instruct XorI_reg_not_reg(iRegINoSp dst,
11326                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11327                          rFlagsReg cr) %{
11328   match(Set dst (XorI m1 (XorI src2 src1)));
11329   ins_cost(INSN_COST);
11330   format %{ "eonw  $dst, $src1, $src2" %}
11331 
11332   ins_encode %{
11333     __ eonw(as_Register($dst$$reg),
11334               as_Register($src1$$reg),
11335               as_Register($src2$$reg),
11336               Assembler::LSL, 0);
11337   %}
11338 
11339   ins_pipe(ialu_reg_reg);
11340 %}
11341 
11342 // This pattern is automatically generated from aarch64_ad.m4
11343 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11344 instruct XorL_reg_not_reg(iRegLNoSp dst,
11345                          iRegL src1, iRegL src2, immL_M1 m1,
11346                          rFlagsReg cr) %{
11347   match(Set dst (XorL m1 (XorL src2 src1)));
11348   ins_cost(INSN_COST);
11349   format %{ "eon  $dst, $src1, $src2" %}
11350 
11351   ins_encode %{
11352     __ eon(as_Register($dst$$reg),
11353               as_Register($src1$$reg),
11354               as_Register($src2$$reg),
11355               Assembler::LSL, 0);
11356   %}
11357 
11358   ins_pipe(ialu_reg_reg);
11359 %}
11360 
11361 // This pattern is automatically generated from aarch64_ad.m4
11362 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11363 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
11364                          iRegIorL2I src1, iRegIorL2I src2,
11365                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11366   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
11367   ins_cost(1.9 * INSN_COST);
11368   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
11369 
11370   ins_encode %{
11371     __ bicw(as_Register($dst$$reg),
11372               as_Register($src1$$reg),
11373               as_Register($src2$$reg),
11374               Assembler::LSR,
11375               $src3$$constant & 0x1f);
11376   %}
11377 
11378   ins_pipe(ialu_reg_reg_shift);
11379 %}
11380 
11381 // This pattern is automatically generated from aarch64_ad.m4
11382 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11383 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
11384                          iRegL src1, iRegL src2,
11385                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11386   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
11387   ins_cost(1.9 * INSN_COST);
11388   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
11389 
11390   ins_encode %{
11391     __ bic(as_Register($dst$$reg),
11392               as_Register($src1$$reg),
11393               as_Register($src2$$reg),
11394               Assembler::LSR,
11395               $src3$$constant & 0x3f);
11396   %}
11397 
11398   ins_pipe(ialu_reg_reg_shift);
11399 %}
11400 
11401 // This pattern is automatically generated from aarch64_ad.m4
11402 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11403 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
11404                          iRegIorL2I src1, iRegIorL2I src2,
11405                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11406   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
11407   ins_cost(1.9 * INSN_COST);
11408   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
11409 
11410   ins_encode %{
11411     __ bicw(as_Register($dst$$reg),
11412               as_Register($src1$$reg),
11413               as_Register($src2$$reg),
11414               Assembler::ASR,
11415               $src3$$constant & 0x1f);
11416   %}
11417 
11418   ins_pipe(ialu_reg_reg_shift);
11419 %}
11420 
11421 // This pattern is automatically generated from aarch64_ad.m4
11422 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11423 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
11424                          iRegL src1, iRegL src2,
11425                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11426   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
11427   ins_cost(1.9 * INSN_COST);
11428   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
11429 
11430   ins_encode %{
11431     __ bic(as_Register($dst$$reg),
11432               as_Register($src1$$reg),
11433               as_Register($src2$$reg),
11434               Assembler::ASR,
11435               $src3$$constant & 0x3f);
11436   %}
11437 
11438   ins_pipe(ialu_reg_reg_shift);
11439 %}
11440 
11441 // This pattern is automatically generated from aarch64_ad.m4
11442 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11443 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11444                          iRegIorL2I src1, iRegIorL2I src2,
11445                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11446   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11447   ins_cost(1.9 * INSN_COST);
11448   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11449 
11450   ins_encode %{
11451     __ bicw(as_Register($dst$$reg),
11452               as_Register($src1$$reg),
11453               as_Register($src2$$reg),
11454               Assembler::LSL,
11455               $src3$$constant & 0x1f);
11456   %}
11457 
11458   ins_pipe(ialu_reg_reg_shift);
11459 %}
11460 
11461 // This pattern is automatically generated from aarch64_ad.m4
11462 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11463 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11464                          iRegL src1, iRegL src2,
11465                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11466   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11467   ins_cost(1.9 * INSN_COST);
11468   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11469 
11470   ins_encode %{
11471     __ bic(as_Register($dst$$reg),
11472               as_Register($src1$$reg),
11473               as_Register($src2$$reg),
11474               Assembler::LSL,
11475               $src3$$constant & 0x3f);
11476   %}
11477 
11478   ins_pipe(ialu_reg_reg_shift);
11479 %}
11480 
11481 // This pattern is automatically generated from aarch64_ad.m4
11482 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11483 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11484                          iRegIorL2I src1, iRegIorL2I src2,
11485                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11486   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11487   ins_cost(1.9 * INSN_COST);
11488   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11489 
11490   ins_encode %{
11491     __ eonw(as_Register($dst$$reg),
11492               as_Register($src1$$reg),
11493               as_Register($src2$$reg),
11494               Assembler::LSR,
11495               $src3$$constant & 0x1f);
11496   %}
11497 
11498   ins_pipe(ialu_reg_reg_shift);
11499 %}
11500 
11501 // This pattern is automatically generated from aarch64_ad.m4
11502 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11503 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11504                          iRegL src1, iRegL src2,
11505                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11506   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11507   ins_cost(1.9 * INSN_COST);
11508   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11509 
11510   ins_encode %{
11511     __ eon(as_Register($dst$$reg),
11512               as_Register($src1$$reg),
11513               as_Register($src2$$reg),
11514               Assembler::LSR,
11515               $src3$$constant & 0x3f);
11516   %}
11517 
11518   ins_pipe(ialu_reg_reg_shift);
11519 %}
11520 
11521 // This pattern is automatically generated from aarch64_ad.m4
11522 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11523 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11524                          iRegIorL2I src1, iRegIorL2I src2,
11525                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11526   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11527   ins_cost(1.9 * INSN_COST);
11528   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11529 
11530   ins_encode %{
11531     __ eonw(as_Register($dst$$reg),
11532               as_Register($src1$$reg),
11533               as_Register($src2$$reg),
11534               Assembler::ASR,
11535               $src3$$constant & 0x1f);
11536   %}
11537 
11538   ins_pipe(ialu_reg_reg_shift);
11539 %}
11540 
11541 // This pattern is automatically generated from aarch64_ad.m4
11542 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11543 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11544                          iRegL src1, iRegL src2,
11545                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11546   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11547   ins_cost(1.9 * INSN_COST);
11548   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11549 
11550   ins_encode %{
11551     __ eon(as_Register($dst$$reg),
11552               as_Register($src1$$reg),
11553               as_Register($src2$$reg),
11554               Assembler::ASR,
11555               $src3$$constant & 0x3f);
11556   %}
11557 
11558   ins_pipe(ialu_reg_reg_shift);
11559 %}
11560 
11561 // This pattern is automatically generated from aarch64_ad.m4
11562 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11563 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11564                          iRegIorL2I src1, iRegIorL2I src2,
11565                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11566   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11567   ins_cost(1.9 * INSN_COST);
11568   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11569 
11570   ins_encode %{
11571     __ eonw(as_Register($dst$$reg),
11572               as_Register($src1$$reg),
11573               as_Register($src2$$reg),
11574               Assembler::LSL,
11575               $src3$$constant & 0x1f);
11576   %}
11577 
11578   ins_pipe(ialu_reg_reg_shift);
11579 %}
11580 
11581 // This pattern is automatically generated from aarch64_ad.m4
11582 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11583 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11584                          iRegL src1, iRegL src2,
11585                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11586   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11587   ins_cost(1.9 * INSN_COST);
11588   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11589 
11590   ins_encode %{
11591     __ eon(as_Register($dst$$reg),
11592               as_Register($src1$$reg),
11593               as_Register($src2$$reg),
11594               Assembler::LSL,
11595               $src3$$constant & 0x3f);
11596   %}
11597 
11598   ins_pipe(ialu_reg_reg_shift);
11599 %}
11600 
11601 // This pattern is automatically generated from aarch64_ad.m4
11602 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11603 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11604                          iRegIorL2I src1, iRegIorL2I src2,
11605                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11606   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11607   ins_cost(1.9 * INSN_COST);
11608   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11609 
11610   ins_encode %{
11611     __ ornw(as_Register($dst$$reg),
11612               as_Register($src1$$reg),
11613               as_Register($src2$$reg),
11614               Assembler::LSR,
11615               $src3$$constant & 0x1f);
11616   %}
11617 
11618   ins_pipe(ialu_reg_reg_shift);
11619 %}
11620 
11621 // This pattern is automatically generated from aarch64_ad.m4
11622 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11623 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11624                          iRegL src1, iRegL src2,
11625                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11626   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11627   ins_cost(1.9 * INSN_COST);
11628   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11629 
11630   ins_encode %{
11631     __ orn(as_Register($dst$$reg),
11632               as_Register($src1$$reg),
11633               as_Register($src2$$reg),
11634               Assembler::LSR,
11635               $src3$$constant & 0x3f);
11636   %}
11637 
11638   ins_pipe(ialu_reg_reg_shift);
11639 %}
11640 
11641 // This pattern is automatically generated from aarch64_ad.m4
11642 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11643 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11644                          iRegIorL2I src1, iRegIorL2I src2,
11645                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11646   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11647   ins_cost(1.9 * INSN_COST);
11648   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11649 
11650   ins_encode %{
11651     __ ornw(as_Register($dst$$reg),
11652               as_Register($src1$$reg),
11653               as_Register($src2$$reg),
11654               Assembler::ASR,
11655               $src3$$constant & 0x1f);
11656   %}
11657 
11658   ins_pipe(ialu_reg_reg_shift);
11659 %}
11660 
11661 // This pattern is automatically generated from aarch64_ad.m4
11662 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11663 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11664                          iRegL src1, iRegL src2,
11665                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11666   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11667   ins_cost(1.9 * INSN_COST);
11668   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11669 
11670   ins_encode %{
11671     __ orn(as_Register($dst$$reg),
11672               as_Register($src1$$reg),
11673               as_Register($src2$$reg),
11674               Assembler::ASR,
11675               $src3$$constant & 0x3f);
11676   %}
11677 
11678   ins_pipe(ialu_reg_reg_shift);
11679 %}
11680 
11681 // This pattern is automatically generated from aarch64_ad.m4
11682 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11683 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11684                          iRegIorL2I src1, iRegIorL2I src2,
11685                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11686   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11687   ins_cost(1.9 * INSN_COST);
11688   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11689 
11690   ins_encode %{
11691     __ ornw(as_Register($dst$$reg),
11692               as_Register($src1$$reg),
11693               as_Register($src2$$reg),
11694               Assembler::LSL,
11695               $src3$$constant & 0x1f);
11696   %}
11697 
11698   ins_pipe(ialu_reg_reg_shift);
11699 %}
11700 
11701 // This pattern is automatically generated from aarch64_ad.m4
11702 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11703 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11704                          iRegL src1, iRegL src2,
11705                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11706   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11707   ins_cost(1.9 * INSN_COST);
11708   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11709 
11710   ins_encode %{
11711     __ orn(as_Register($dst$$reg),
11712               as_Register($src1$$reg),
11713               as_Register($src2$$reg),
11714               Assembler::LSL,
11715               $src3$$constant & 0x3f);
11716   %}
11717 
11718   ins_pipe(ialu_reg_reg_shift);
11719 %}
11720 
11721 // This pattern is automatically generated from aarch64_ad.m4
11722 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11723 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11724                          iRegIorL2I src1, iRegIorL2I src2,
11725                          immI src3, rFlagsReg cr) %{
11726   match(Set dst (AndI src1 (URShiftI src2 src3)));
11727 
11728   ins_cost(1.9 * INSN_COST);
11729   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11730 
11731   ins_encode %{
11732     __ andw(as_Register($dst$$reg),
11733               as_Register($src1$$reg),
11734               as_Register($src2$$reg),
11735               Assembler::LSR,
11736               $src3$$constant & 0x1f);
11737   %}
11738 
11739   ins_pipe(ialu_reg_reg_shift);
11740 %}
11741 
11742 // This pattern is automatically generated from aarch64_ad.m4
11743 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11744 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11745                          iRegL src1, iRegL src2,
11746                          immI src3, rFlagsReg cr) %{
11747   match(Set dst (AndL src1 (URShiftL src2 src3)));
11748 
11749   ins_cost(1.9 * INSN_COST);
11750   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11751 
11752   ins_encode %{
11753     __ andr(as_Register($dst$$reg),
11754               as_Register($src1$$reg),
11755               as_Register($src2$$reg),
11756               Assembler::LSR,
11757               $src3$$constant & 0x3f);
11758   %}
11759 
11760   ins_pipe(ialu_reg_reg_shift);
11761 %}
11762 
11763 // This pattern is automatically generated from aarch64_ad.m4
11764 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11765 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11766                          iRegIorL2I src1, iRegIorL2I src2,
11767                          immI src3, rFlagsReg cr) %{
11768   match(Set dst (AndI src1 (RShiftI src2 src3)));
11769 
11770   ins_cost(1.9 * INSN_COST);
11771   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11772 
11773   ins_encode %{
11774     __ andw(as_Register($dst$$reg),
11775               as_Register($src1$$reg),
11776               as_Register($src2$$reg),
11777               Assembler::ASR,
11778               $src3$$constant & 0x1f);
11779   %}
11780 
11781   ins_pipe(ialu_reg_reg_shift);
11782 %}
11783 
11784 // This pattern is automatically generated from aarch64_ad.m4
11785 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11786 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11787                          iRegL src1, iRegL src2,
11788                          immI src3, rFlagsReg cr) %{
11789   match(Set dst (AndL src1 (RShiftL src2 src3)));
11790 
11791   ins_cost(1.9 * INSN_COST);
11792   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11793 
11794   ins_encode %{
11795     __ andr(as_Register($dst$$reg),
11796               as_Register($src1$$reg),
11797               as_Register($src2$$reg),
11798               Assembler::ASR,
11799               $src3$$constant & 0x3f);
11800   %}
11801 
11802   ins_pipe(ialu_reg_reg_shift);
11803 %}
11804 
11805 // This pattern is automatically generated from aarch64_ad.m4
11806 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11807 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11808                          iRegIorL2I src1, iRegIorL2I src2,
11809                          immI src3, rFlagsReg cr) %{
11810   match(Set dst (AndI src1 (LShiftI src2 src3)));
11811 
11812   ins_cost(1.9 * INSN_COST);
11813   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11814 
11815   ins_encode %{
11816     __ andw(as_Register($dst$$reg),
11817               as_Register($src1$$reg),
11818               as_Register($src2$$reg),
11819               Assembler::LSL,
11820               $src3$$constant & 0x1f);
11821   %}
11822 
11823   ins_pipe(ialu_reg_reg_shift);
11824 %}
11825 
11826 // This pattern is automatically generated from aarch64_ad.m4
11827 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11828 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11829                          iRegL src1, iRegL src2,
11830                          immI src3, rFlagsReg cr) %{
11831   match(Set dst (AndL src1 (LShiftL src2 src3)));
11832 
11833   ins_cost(1.9 * INSN_COST);
11834   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11835 
11836   ins_encode %{
11837     __ andr(as_Register($dst$$reg),
11838               as_Register($src1$$reg),
11839               as_Register($src2$$reg),
11840               Assembler::LSL,
11841               $src3$$constant & 0x3f);
11842   %}
11843 
11844   ins_pipe(ialu_reg_reg_shift);
11845 %}
11846 
11847 // This pattern is automatically generated from aarch64_ad.m4
11848 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11849 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11850                          iRegIorL2I src1, iRegIorL2I src2,
11851                          immI src3, rFlagsReg cr) %{
11852   match(Set dst (XorI src1 (URShiftI src2 src3)));
11853 
11854   ins_cost(1.9 * INSN_COST);
11855   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11856 
11857   ins_encode %{
11858     __ eorw(as_Register($dst$$reg),
11859               as_Register($src1$$reg),
11860               as_Register($src2$$reg),
11861               Assembler::LSR,
11862               $src3$$constant & 0x1f);
11863   %}
11864 
11865   ins_pipe(ialu_reg_reg_shift);
11866 %}
11867 
11868 // This pattern is automatically generated from aarch64_ad.m4
11869 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11870 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11871                          iRegL src1, iRegL src2,
11872                          immI src3, rFlagsReg cr) %{
11873   match(Set dst (XorL src1 (URShiftL src2 src3)));
11874 
11875   ins_cost(1.9 * INSN_COST);
11876   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11877 
11878   ins_encode %{
11879     __ eor(as_Register($dst$$reg),
11880               as_Register($src1$$reg),
11881               as_Register($src2$$reg),
11882               Assembler::LSR,
11883               $src3$$constant & 0x3f);
11884   %}
11885 
11886   ins_pipe(ialu_reg_reg_shift);
11887 %}
11888 
11889 // This pattern is automatically generated from aarch64_ad.m4
11890 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11891 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11892                          iRegIorL2I src1, iRegIorL2I src2,
11893                          immI src3, rFlagsReg cr) %{
11894   match(Set dst (XorI src1 (RShiftI src2 src3)));
11895 
11896   ins_cost(1.9 * INSN_COST);
11897   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11898 
11899   ins_encode %{
11900     __ eorw(as_Register($dst$$reg),
11901               as_Register($src1$$reg),
11902               as_Register($src2$$reg),
11903               Assembler::ASR,
11904               $src3$$constant & 0x1f);
11905   %}
11906 
11907   ins_pipe(ialu_reg_reg_shift);
11908 %}
11909 
11910 // This pattern is automatically generated from aarch64_ad.m4
11911 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11912 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11913                          iRegL src1, iRegL src2,
11914                          immI src3, rFlagsReg cr) %{
11915   match(Set dst (XorL src1 (RShiftL src2 src3)));
11916 
11917   ins_cost(1.9 * INSN_COST);
11918   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11919 
11920   ins_encode %{
11921     __ eor(as_Register($dst$$reg),
11922               as_Register($src1$$reg),
11923               as_Register($src2$$reg),
11924               Assembler::ASR,
11925               $src3$$constant & 0x3f);
11926   %}
11927 
11928   ins_pipe(ialu_reg_reg_shift);
11929 %}
11930 
11931 // This pattern is automatically generated from aarch64_ad.m4
11932 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11933 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11934                          iRegIorL2I src1, iRegIorL2I src2,
11935                          immI src3, rFlagsReg cr) %{
11936   match(Set dst (XorI src1 (LShiftI src2 src3)));
11937 
11938   ins_cost(1.9 * INSN_COST);
11939   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11940 
11941   ins_encode %{
11942     __ eorw(as_Register($dst$$reg),
11943               as_Register($src1$$reg),
11944               as_Register($src2$$reg),
11945               Assembler::LSL,
11946               $src3$$constant & 0x1f);
11947   %}
11948 
11949   ins_pipe(ialu_reg_reg_shift);
11950 %}
11951 
11952 // This pattern is automatically generated from aarch64_ad.m4
11953 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11954 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11955                          iRegL src1, iRegL src2,
11956                          immI src3, rFlagsReg cr) %{
11957   match(Set dst (XorL src1 (LShiftL src2 src3)));
11958 
11959   ins_cost(1.9 * INSN_COST);
11960   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11961 
11962   ins_encode %{
11963     __ eor(as_Register($dst$$reg),
11964               as_Register($src1$$reg),
11965               as_Register($src2$$reg),
11966               Assembler::LSL,
11967               $src3$$constant & 0x3f);
11968   %}
11969 
11970   ins_pipe(ialu_reg_reg_shift);
11971 %}
11972 
11973 // This pattern is automatically generated from aarch64_ad.m4
11974 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11975 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11976                          iRegIorL2I src1, iRegIorL2I src2,
11977                          immI src3, rFlagsReg cr) %{
11978   match(Set dst (OrI src1 (URShiftI src2 src3)));
11979 
11980   ins_cost(1.9 * INSN_COST);
11981   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11982 
11983   ins_encode %{
11984     __ orrw(as_Register($dst$$reg),
11985               as_Register($src1$$reg),
11986               as_Register($src2$$reg),
11987               Assembler::LSR,
11988               $src3$$constant & 0x1f);
11989   %}
11990 
11991   ins_pipe(ialu_reg_reg_shift);
11992 %}
11993 
11994 // This pattern is automatically generated from aarch64_ad.m4
11995 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11996 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11997                          iRegL src1, iRegL src2,
11998                          immI src3, rFlagsReg cr) %{
11999   match(Set dst (OrL src1 (URShiftL src2 src3)));
12000 
12001   ins_cost(1.9 * INSN_COST);
12002   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
12003 
12004   ins_encode %{
12005     __ orr(as_Register($dst$$reg),
12006               as_Register($src1$$reg),
12007               as_Register($src2$$reg),
12008               Assembler::LSR,
12009               $src3$$constant & 0x3f);
12010   %}
12011 
12012   ins_pipe(ialu_reg_reg_shift);
12013 %}
12014 
12015 // This pattern is automatically generated from aarch64_ad.m4
12016 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12017 instruct OrI_reg_RShift_reg(iRegINoSp dst,
12018                          iRegIorL2I src1, iRegIorL2I src2,
12019                          immI src3, rFlagsReg cr) %{
12020   match(Set dst (OrI src1 (RShiftI src2 src3)));
12021 
12022   ins_cost(1.9 * INSN_COST);
12023   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
12024 
12025   ins_encode %{
12026     __ orrw(as_Register($dst$$reg),
12027               as_Register($src1$$reg),
12028               as_Register($src2$$reg),
12029               Assembler::ASR,
12030               $src3$$constant & 0x1f);
12031   %}
12032 
12033   ins_pipe(ialu_reg_reg_shift);
12034 %}
12035 
12036 // This pattern is automatically generated from aarch64_ad.m4
12037 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12038 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
12039                          iRegL src1, iRegL src2,
12040                          immI src3, rFlagsReg cr) %{
12041   match(Set dst (OrL src1 (RShiftL src2 src3)));
12042 
12043   ins_cost(1.9 * INSN_COST);
12044   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
12045 
12046   ins_encode %{
12047     __ orr(as_Register($dst$$reg),
12048               as_Register($src1$$reg),
12049               as_Register($src2$$reg),
12050               Assembler::ASR,
12051               $src3$$constant & 0x3f);
12052   %}
12053 
12054   ins_pipe(ialu_reg_reg_shift);
12055 %}
12056 
12057 // This pattern is automatically generated from aarch64_ad.m4
12058 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12059 instruct OrI_reg_LShift_reg(iRegINoSp dst,
12060                          iRegIorL2I src1, iRegIorL2I src2,
12061                          immI src3, rFlagsReg cr) %{
12062   match(Set dst (OrI src1 (LShiftI src2 src3)));
12063 
12064   ins_cost(1.9 * INSN_COST);
12065   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
12066 
12067   ins_encode %{
12068     __ orrw(as_Register($dst$$reg),
12069               as_Register($src1$$reg),
12070               as_Register($src2$$reg),
12071               Assembler::LSL,
12072               $src3$$constant & 0x1f);
12073   %}
12074 
12075   ins_pipe(ialu_reg_reg_shift);
12076 %}
12077 
12078 // This pattern is automatically generated from aarch64_ad.m4
12079 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12080 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
12081                          iRegL src1, iRegL src2,
12082                          immI src3, rFlagsReg cr) %{
12083   match(Set dst (OrL src1 (LShiftL src2 src3)));
12084 
12085   ins_cost(1.9 * INSN_COST);
12086   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
12087 
12088   ins_encode %{
12089     __ orr(as_Register($dst$$reg),
12090               as_Register($src1$$reg),
12091               as_Register($src2$$reg),
12092               Assembler::LSL,
12093               $src3$$constant & 0x3f);
12094   %}
12095 
12096   ins_pipe(ialu_reg_reg_shift);
12097 %}
12098 
12099 // This pattern is automatically generated from aarch64_ad.m4
12100 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12101 instruct AddI_reg_URShift_reg(iRegINoSp dst,
12102                          iRegIorL2I src1, iRegIorL2I src2,
12103                          immI src3, rFlagsReg cr) %{
12104   match(Set dst (AddI src1 (URShiftI src2 src3)));
12105 
12106   ins_cost(1.9 * INSN_COST);
12107   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
12108 
12109   ins_encode %{
12110     __ addw(as_Register($dst$$reg),
12111               as_Register($src1$$reg),
12112               as_Register($src2$$reg),
12113               Assembler::LSR,
12114               $src3$$constant & 0x1f);
12115   %}
12116 
12117   ins_pipe(ialu_reg_reg_shift);
12118 %}
12119 
12120 // This pattern is automatically generated from aarch64_ad.m4
12121 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12122 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
12123                          iRegL src1, iRegL src2,
12124                          immI src3, rFlagsReg cr) %{
12125   match(Set dst (AddL src1 (URShiftL src2 src3)));
12126 
12127   ins_cost(1.9 * INSN_COST);
12128   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
12129 
12130   ins_encode %{
12131     __ add(as_Register($dst$$reg),
12132               as_Register($src1$$reg),
12133               as_Register($src2$$reg),
12134               Assembler::LSR,
12135               $src3$$constant & 0x3f);
12136   %}
12137 
12138   ins_pipe(ialu_reg_reg_shift);
12139 %}
12140 
12141 // This pattern is automatically generated from aarch64_ad.m4
12142 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12143 instruct AddI_reg_RShift_reg(iRegINoSp dst,
12144                          iRegIorL2I src1, iRegIorL2I src2,
12145                          immI src3, rFlagsReg cr) %{
12146   match(Set dst (AddI src1 (RShiftI src2 src3)));
12147 
12148   ins_cost(1.9 * INSN_COST);
12149   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
12150 
12151   ins_encode %{
12152     __ addw(as_Register($dst$$reg),
12153               as_Register($src1$$reg),
12154               as_Register($src2$$reg),
12155               Assembler::ASR,
12156               $src3$$constant & 0x1f);
12157   %}
12158 
12159   ins_pipe(ialu_reg_reg_shift);
12160 %}
12161 
12162 // This pattern is automatically generated from aarch64_ad.m4
12163 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12164 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
12165                          iRegL src1, iRegL src2,
12166                          immI src3, rFlagsReg cr) %{
12167   match(Set dst (AddL src1 (RShiftL src2 src3)));
12168 
12169   ins_cost(1.9 * INSN_COST);
12170   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
12171 
12172   ins_encode %{
12173     __ add(as_Register($dst$$reg),
12174               as_Register($src1$$reg),
12175               as_Register($src2$$reg),
12176               Assembler::ASR,
12177               $src3$$constant & 0x3f);
12178   %}
12179 
12180   ins_pipe(ialu_reg_reg_shift);
12181 %}
12182 
12183 // This pattern is automatically generated from aarch64_ad.m4
12184 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12185 instruct AddI_reg_LShift_reg(iRegINoSp dst,
12186                          iRegIorL2I src1, iRegIorL2I src2,
12187                          immI src3, rFlagsReg cr) %{
12188   match(Set dst (AddI src1 (LShiftI src2 src3)));
12189 
12190   ins_cost(1.9 * INSN_COST);
12191   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
12192 
12193   ins_encode %{
12194     __ addw(as_Register($dst$$reg),
12195               as_Register($src1$$reg),
12196               as_Register($src2$$reg),
12197               Assembler::LSL,
12198               $src3$$constant & 0x1f);
12199   %}
12200 
12201   ins_pipe(ialu_reg_reg_shift);
12202 %}
12203 
12204 // This pattern is automatically generated from aarch64_ad.m4
12205 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12206 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
12207                          iRegL src1, iRegL src2,
12208                          immI src3, rFlagsReg cr) %{
12209   match(Set dst (AddL src1 (LShiftL src2 src3)));
12210 
12211   ins_cost(1.9 * INSN_COST);
12212   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
12213 
12214   ins_encode %{
12215     __ add(as_Register($dst$$reg),
12216               as_Register($src1$$reg),
12217               as_Register($src2$$reg),
12218               Assembler::LSL,
12219               $src3$$constant & 0x3f);
12220   %}
12221 
12222   ins_pipe(ialu_reg_reg_shift);
12223 %}
12224 
12225 // This pattern is automatically generated from aarch64_ad.m4
12226 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12227 instruct SubI_reg_URShift_reg(iRegINoSp dst,
12228                          iRegIorL2I src1, iRegIorL2I src2,
12229                          immI src3, rFlagsReg cr) %{
12230   match(Set dst (SubI src1 (URShiftI src2 src3)));
12231 
12232   ins_cost(1.9 * INSN_COST);
12233   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
12234 
12235   ins_encode %{
12236     __ subw(as_Register($dst$$reg),
12237               as_Register($src1$$reg),
12238               as_Register($src2$$reg),
12239               Assembler::LSR,
12240               $src3$$constant & 0x1f);
12241   %}
12242 
12243   ins_pipe(ialu_reg_reg_shift);
12244 %}
12245 
12246 // This pattern is automatically generated from aarch64_ad.m4
12247 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12248 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
12249                          iRegL src1, iRegL src2,
12250                          immI src3, rFlagsReg cr) %{
12251   match(Set dst (SubL src1 (URShiftL src2 src3)));
12252 
12253   ins_cost(1.9 * INSN_COST);
12254   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
12255 
12256   ins_encode %{
12257     __ sub(as_Register($dst$$reg),
12258               as_Register($src1$$reg),
12259               as_Register($src2$$reg),
12260               Assembler::LSR,
12261               $src3$$constant & 0x3f);
12262   %}
12263 
12264   ins_pipe(ialu_reg_reg_shift);
12265 %}
12266 
12267 // This pattern is automatically generated from aarch64_ad.m4
12268 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12269 instruct SubI_reg_RShift_reg(iRegINoSp dst,
12270                          iRegIorL2I src1, iRegIorL2I src2,
12271                          immI src3, rFlagsReg cr) %{
12272   match(Set dst (SubI src1 (RShiftI src2 src3)));
12273 
12274   ins_cost(1.9 * INSN_COST);
12275   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
12276 
12277   ins_encode %{
12278     __ subw(as_Register($dst$$reg),
12279               as_Register($src1$$reg),
12280               as_Register($src2$$reg),
12281               Assembler::ASR,
12282               $src3$$constant & 0x1f);
12283   %}
12284 
12285   ins_pipe(ialu_reg_reg_shift);
12286 %}
12287 
12288 // This pattern is automatically generated from aarch64_ad.m4
12289 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12290 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
12291                          iRegL src1, iRegL src2,
12292                          immI src3, rFlagsReg cr) %{
12293   match(Set dst (SubL src1 (RShiftL src2 src3)));
12294 
12295   ins_cost(1.9 * INSN_COST);
12296   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
12297 
12298   ins_encode %{
12299     __ sub(as_Register($dst$$reg),
12300               as_Register($src1$$reg),
12301               as_Register($src2$$reg),
12302               Assembler::ASR,
12303               $src3$$constant & 0x3f);
12304   %}
12305 
12306   ins_pipe(ialu_reg_reg_shift);
12307 %}
12308 
12309 // This pattern is automatically generated from aarch64_ad.m4
12310 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12311 instruct SubI_reg_LShift_reg(iRegINoSp dst,
12312                          iRegIorL2I src1, iRegIorL2I src2,
12313                          immI src3, rFlagsReg cr) %{
12314   match(Set dst (SubI src1 (LShiftI src2 src3)));
12315 
12316   ins_cost(1.9 * INSN_COST);
12317   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
12318 
12319   ins_encode %{
12320     __ subw(as_Register($dst$$reg),
12321               as_Register($src1$$reg),
12322               as_Register($src2$$reg),
12323               Assembler::LSL,
12324               $src3$$constant & 0x1f);
12325   %}
12326 
12327   ins_pipe(ialu_reg_reg_shift);
12328 %}
12329 
12330 // This pattern is automatically generated from aarch64_ad.m4
12331 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12332 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
12333                          iRegL src1, iRegL src2,
12334                          immI src3, rFlagsReg cr) %{
12335   match(Set dst (SubL src1 (LShiftL src2 src3)));
12336 
12337   ins_cost(1.9 * INSN_COST);
12338   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
12339 
12340   ins_encode %{
12341     __ sub(as_Register($dst$$reg),
12342               as_Register($src1$$reg),
12343               as_Register($src2$$reg),
12344               Assembler::LSL,
12345               $src3$$constant & 0x3f);
12346   %}
12347 
12348   ins_pipe(ialu_reg_reg_shift);
12349 %}
12350 
12351  
12352 // This pattern is automatically generated from aarch64_ad.m4
12353 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12354 
12355 // Shift Left followed by Shift Right.
12356 // This idiom is used by the compiler for the i2b bytecode etc.
12357 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12358 %{
12359   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
12360   ins_cost(INSN_COST * 2);
12361   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12362   ins_encode %{
12363     int lshift = $lshift_count$$constant & 63;
12364     int rshift = $rshift_count$$constant & 63;
12365     int s = 63 - lshift;
12366     int r = (rshift - lshift) & 63;
12367     __ sbfm(as_Register($dst$$reg),
12368             as_Register($src$$reg),
12369             r, s);
12370   %}
12371 
12372   ins_pipe(ialu_reg_shift);
12373 %}
12374 
12375 // This pattern is automatically generated from aarch64_ad.m4
12376 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12377 
12378 // Shift Left followed by Shift Right.
12379 // This idiom is used by the compiler for the i2b bytecode etc.
12380 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12381 %{
12382   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
12383   ins_cost(INSN_COST * 2);
12384   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12385   ins_encode %{
12386     int lshift = $lshift_count$$constant & 31;
12387     int rshift = $rshift_count$$constant & 31;
12388     int s = 31 - lshift;
12389     int r = (rshift - lshift) & 31;
12390     __ sbfmw(as_Register($dst$$reg),
12391             as_Register($src$$reg),
12392             r, s);
12393   %}
12394 
12395   ins_pipe(ialu_reg_shift);
12396 %}
12397 
12398 // This pattern is automatically generated from aarch64_ad.m4
12399 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12400 
12401 // Shift Left followed by Shift Right.
12402 // This idiom is used by the compiler for the i2b bytecode etc.
12403 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12404 %{
12405   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
12406   ins_cost(INSN_COST * 2);
12407   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12408   ins_encode %{
12409     int lshift = $lshift_count$$constant & 63;
12410     int rshift = $rshift_count$$constant & 63;
12411     int s = 63 - lshift;
12412     int r = (rshift - lshift) & 63;
12413     __ ubfm(as_Register($dst$$reg),
12414             as_Register($src$$reg),
12415             r, s);
12416   %}
12417 
12418   ins_pipe(ialu_reg_shift);
12419 %}
12420 
12421 // This pattern is automatically generated from aarch64_ad.m4
12422 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12423 
12424 // Shift Left followed by Shift Right.
12425 // This idiom is used by the compiler for the i2b bytecode etc.
12426 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12427 %{
12428   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
12429   ins_cost(INSN_COST * 2);
12430   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12431   ins_encode %{
12432     int lshift = $lshift_count$$constant & 31;
12433     int rshift = $rshift_count$$constant & 31;
12434     int s = 31 - lshift;
12435     int r = (rshift - lshift) & 31;
12436     __ ubfmw(as_Register($dst$$reg),
12437             as_Register($src$$reg),
12438             r, s);
12439   %}
12440 
12441   ins_pipe(ialu_reg_shift);
12442 %}
12443 
12444 // Bitfield extract with shift & mask
12445 
12446 // This pattern is automatically generated from aarch64_ad.m4
12447 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12448 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12449 %{
12450   match(Set dst (AndI (URShiftI src rshift) mask));
12451   // Make sure we are not going to exceed what ubfxw can do.
12452   predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12453 
12454   ins_cost(INSN_COST);
12455   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
12456   ins_encode %{
12457     int rshift = $rshift$$constant & 31;
12458     intptr_t mask = $mask$$constant;
12459     int width = exact_log2(mask+1);
12460     __ ubfxw(as_Register($dst$$reg),
12461             as_Register($src$$reg), rshift, width);
12462   %}
12463   ins_pipe(ialu_reg_shift);
12464 %}
12465 
12466 // This pattern is automatically generated from aarch64_ad.m4
12467 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12468 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
12469 %{
12470   match(Set dst (AndL (URShiftL src rshift) mask));
12471   // Make sure we are not going to exceed what ubfx can do.
12472   predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
12473 
12474   ins_cost(INSN_COST);
12475   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12476   ins_encode %{
12477     int rshift = $rshift$$constant & 63;
12478     intptr_t mask = $mask$$constant;
12479     int width = exact_log2_long(mask+1);
12480     __ ubfx(as_Register($dst$$reg),
12481             as_Register($src$$reg), rshift, width);
12482   %}
12483   ins_pipe(ialu_reg_shift);
12484 %}
12485 
12486 
12487 // This pattern is automatically generated from aarch64_ad.m4
12488 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12489 
12490 // We can use ubfx when extending an And with a mask when we know mask
12491 // is positive.  We know that because immI_bitmask guarantees it.
12492 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12493 %{
12494   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
12495   // Make sure we are not going to exceed what ubfxw can do.
12496   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12497 
12498   ins_cost(INSN_COST * 2);
12499   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12500   ins_encode %{
12501     int rshift = $rshift$$constant & 31;
12502     intptr_t mask = $mask$$constant;
12503     int width = exact_log2(mask+1);
12504     __ ubfx(as_Register($dst$$reg),
12505             as_Register($src$$reg), rshift, width);
12506   %}
12507   ins_pipe(ialu_reg_shift);
12508 %}
12509 
12510 
12511 // This pattern is automatically generated from aarch64_ad.m4
12512 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12513 
12514 // We can use ubfiz when masking by a positive number and then left shifting the result.
12515 // We know that the mask is positive because immI_bitmask guarantees it.
12516 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12517 %{
12518   match(Set dst (LShiftI (AndI src mask) lshift));
12519   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 31)) <= (31 + 1));
12520 
12521   ins_cost(INSN_COST);
12522   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12523   ins_encode %{
12524     int lshift = $lshift$$constant & 31;
12525     intptr_t mask = $mask$$constant;
12526     int width = exact_log2(mask+1);
12527     __ ubfizw(as_Register($dst$$reg),
12528           as_Register($src$$reg), lshift, width);
12529   %}
12530   ins_pipe(ialu_reg_shift);
12531 %}
12532 
12533 // This pattern is automatically generated from aarch64_ad.m4
12534 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12535 
12536 // We can use ubfiz when masking by a positive number and then left shifting the result.
12537 // We know that the mask is positive because immL_bitmask guarantees it.
12538 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
12539 %{
12540   match(Set dst (LShiftL (AndL src mask) lshift));
12541   predicate((exact_log2_long(n->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12542 
12543   ins_cost(INSN_COST);
12544   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12545   ins_encode %{
12546     int lshift = $lshift$$constant & 63;
12547     intptr_t mask = $mask$$constant;
12548     int width = exact_log2_long(mask+1);
12549     __ ubfiz(as_Register($dst$$reg),
12550           as_Register($src$$reg), lshift, width);
12551   %}
12552   ins_pipe(ialu_reg_shift);
12553 %}
12554 
12555 // This pattern is automatically generated from aarch64_ad.m4
12556 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12557 
12558 // We can use ubfiz when masking by a positive number and then left shifting the result.
12559 // We know that the mask is positive because immI_bitmask guarantees it.
12560 instruct ubfizwIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12561 %{
12562   match(Set dst (ConvI2L (LShiftI (AndI src mask) lshift)));
12563   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= 31);
12564 
12565   ins_cost(INSN_COST);
12566   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12567   ins_encode %{
12568     int lshift = $lshift$$constant & 31;
12569     intptr_t mask = $mask$$constant;
12570     int width = exact_log2(mask+1);
12571     __ ubfizw(as_Register($dst$$reg),
12572           as_Register($src$$reg), lshift, width);
12573   %}
12574   ins_pipe(ialu_reg_shift);
12575 %}
12576 
12577 // This pattern is automatically generated from aarch64_ad.m4
12578 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12579 
12580 // We can use ubfiz when masking by a positive number and then left shifting the result.
12581 // We know that the mask is positive because immL_bitmask guarantees it.
12582 instruct ubfizLConvL2I(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
12583 %{
12584   match(Set dst (ConvL2I (LShiftL (AndL src mask) lshift)));
12585   predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= 31);
12586 
12587   ins_cost(INSN_COST);
12588   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12589   ins_encode %{
12590     int lshift = $lshift$$constant & 63;
12591     intptr_t mask = $mask$$constant;
12592     int width = exact_log2_long(mask+1);
12593     __ ubfiz(as_Register($dst$$reg),
12594           as_Register($src$$reg), lshift, width);
12595   %}
12596   ins_pipe(ialu_reg_shift);
12597 %}
12598 
12599 
12600 // This pattern is automatically generated from aarch64_ad.m4
12601 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12602 
12603 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
12604 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12605 %{
12606   match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
12607   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12608 
12609   ins_cost(INSN_COST);
12610   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12611   ins_encode %{
12612     int lshift = $lshift$$constant & 63;
12613     intptr_t mask = $mask$$constant;
12614     int width = exact_log2(mask+1);
12615     __ ubfiz(as_Register($dst$$reg),
12616              as_Register($src$$reg), lshift, width);
12617   %}
12618   ins_pipe(ialu_reg_shift);
12619 %}
12620 
12621 // This pattern is automatically generated from aarch64_ad.m4
12622 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12623 
12624 // If there is a convert L to I block between and AndL and a LShiftI, we can also match ubfiz
12625 instruct ubfizLConvL2Ix(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
12626 %{
12627   match(Set dst (LShiftI (ConvL2I (AndL src mask)) lshift));
12628   predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 31)) <= 31);
12629 
12630   ins_cost(INSN_COST);
12631   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12632   ins_encode %{
12633     int lshift = $lshift$$constant & 31;
12634     intptr_t mask = $mask$$constant;
12635     int width = exact_log2(mask+1);
12636     __ ubfiz(as_Register($dst$$reg),
12637              as_Register($src$$reg), lshift, width);
12638   %}
12639   ins_pipe(ialu_reg_shift);
12640 %}
12641 
12642 // This pattern is automatically generated from aarch64_ad.m4
12643 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12644 
12645 // Can skip int2long conversions after AND with small bitmask
12646 instruct ubfizIConvI2LAndI(iRegLNoSp dst, iRegI src, immI_bitmask msk)
12647 %{
12648   match(Set dst (ConvI2L (AndI src msk)));
12649   ins_cost(INSN_COST);
12650   format %{ "ubfiz $dst, $src, 0, exact_log2($msk + 1) " %}
12651   ins_encode %{
12652     __ ubfiz(as_Register($dst$$reg), as_Register($src$$reg), 0, exact_log2($msk$$constant + 1));
12653   %}
12654   ins_pipe(ialu_reg_shift);
12655 %}
12656 
12657 
12658 // Rotations 
12659 // This pattern is automatically generated from aarch64_ad.m4
12660 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12661 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12662 %{
12663   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12664   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12665 
12666   ins_cost(INSN_COST);
12667   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12668 
12669   ins_encode %{
12670     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12671             $rshift$$constant & 63);
12672   %}
12673   ins_pipe(ialu_reg_reg_extr);
12674 %}
12675 
12676 
12677 // This pattern is automatically generated from aarch64_ad.m4
12678 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12679 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12680 %{
12681   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12682   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12683 
12684   ins_cost(INSN_COST);
12685   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12686 
12687   ins_encode %{
12688     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12689             $rshift$$constant & 31);
12690   %}
12691   ins_pipe(ialu_reg_reg_extr);
12692 %}
12693 
12694 
12695 // This pattern is automatically generated from aarch64_ad.m4
12696 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12697 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12698 %{
12699   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12700   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12701 
12702   ins_cost(INSN_COST);
12703   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12704 
12705   ins_encode %{
12706     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12707             $rshift$$constant & 63);
12708   %}
12709   ins_pipe(ialu_reg_reg_extr);
12710 %}
12711 
12712 
12713 // This pattern is automatically generated from aarch64_ad.m4
12714 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12715 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12716 %{
12717   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12718   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12719 
12720   ins_cost(INSN_COST);
12721   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12722 
12723   ins_encode %{
12724     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12725             $rshift$$constant & 31);
12726   %}
12727   ins_pipe(ialu_reg_reg_extr);
12728 %}
12729 
12730 
12731 // This pattern is automatically generated from aarch64_ad.m4
12732 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12733 
12734 // rol expander
12735 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12736 %{
12737   effect(DEF dst, USE src, USE shift);
12738 
12739   format %{ "rol    $dst, $src, $shift" %}
12740   ins_cost(INSN_COST * 3);
12741   ins_encode %{
12742     __ subw(rscratch1, zr, as_Register($shift$$reg));
12743     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12744             rscratch1);
12745     %}
12746   ins_pipe(ialu_reg_reg_vshift);
12747 %}
12748 
12749 // This pattern is automatically generated from aarch64_ad.m4
12750 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12751 
12752 // rol expander
12753 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12754 %{
12755   effect(DEF dst, USE src, USE shift);
12756 
12757   format %{ "rol    $dst, $src, $shift" %}
12758   ins_cost(INSN_COST * 3);
12759   ins_encode %{
12760     __ subw(rscratch1, zr, as_Register($shift$$reg));
12761     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12762             rscratch1);
12763     %}
12764   ins_pipe(ialu_reg_reg_vshift);
12765 %}
12766 
12767 // This pattern is automatically generated from aarch64_ad.m4
12768 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12769 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12770 %{
12771   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
12772 
12773   expand %{
12774     rolL_rReg(dst, src, shift, cr);
12775   %}
12776 %}
12777 
12778 // This pattern is automatically generated from aarch64_ad.m4
12779 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12780 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12781 %{
12782   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
12783 
12784   expand %{
12785     rolL_rReg(dst, src, shift, cr);
12786   %}
12787 %}
12788 
12789 // This pattern is automatically generated from aarch64_ad.m4
12790 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12791 instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12792 %{
12793   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
12794 
12795   expand %{
12796     rolI_rReg(dst, src, shift, cr);
12797   %}
12798 %}
12799 
12800 // This pattern is automatically generated from aarch64_ad.m4
12801 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12802 instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12803 %{
12804   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
12805 
12806   expand %{
12807     rolI_rReg(dst, src, shift, cr);
12808   %}
12809 %}
12810 
12811 // This pattern is automatically generated from aarch64_ad.m4
12812 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12813 
12814 // ror expander
12815 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12816 %{
12817   effect(DEF dst, USE src, USE shift);
12818 
12819   format %{ "ror    $dst, $src, $shift" %}
12820   ins_cost(INSN_COST);
12821   ins_encode %{
12822     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12823             as_Register($shift$$reg));
12824     %}
12825   ins_pipe(ialu_reg_reg_vshift);
12826 %}
12827 
12828 // This pattern is automatically generated from aarch64_ad.m4
12829 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12830 
12831 // ror expander
12832 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12833 %{
12834   effect(DEF dst, USE src, USE shift);
12835 
12836   format %{ "ror    $dst, $src, $shift" %}
12837   ins_cost(INSN_COST);
12838   ins_encode %{
12839     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12840             as_Register($shift$$reg));
12841     %}
12842   ins_pipe(ialu_reg_reg_vshift);
12843 %}
12844 
12845 // This pattern is automatically generated from aarch64_ad.m4
12846 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12847 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12848 %{
12849   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
12850 
12851   expand %{
12852     rorL_rReg(dst, src, shift, cr);
12853   %}
12854 %}
12855 
12856 // This pattern is automatically generated from aarch64_ad.m4
12857 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12858 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12859 %{
12860   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
12861 
12862   expand %{
12863     rorL_rReg(dst, src, shift, cr);
12864   %}
12865 %}
12866 
12867 // This pattern is automatically generated from aarch64_ad.m4
12868 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12869 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12870 %{
12871   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
12872 
12873   expand %{
12874     rorI_rReg(dst, src, shift, cr);
12875   %}
12876 %}
12877 
12878 // This pattern is automatically generated from aarch64_ad.m4
12879 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12880 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12881 %{
12882   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
12883 
12884   expand %{
12885     rorI_rReg(dst, src, shift, cr);
12886   %}
12887 %}
12888 
12889 
12890 // Add/subtract (extended)
12891 
12892 // This pattern is automatically generated from aarch64_ad.m4
12893 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12894 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12895 %{
12896   match(Set dst (AddL src1 (ConvI2L src2)));
12897   ins_cost(INSN_COST);
12898   format %{ "add  $dst, $src1, $src2, sxtw" %}
12899 
12900    ins_encode %{
12901      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12902             as_Register($src2$$reg), ext::sxtw);
12903    %}
12904   ins_pipe(ialu_reg_reg);
12905 %}
12906 
12907 // This pattern is automatically generated from aarch64_ad.m4
12908 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12909 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12910 %{
12911   match(Set dst (SubL src1 (ConvI2L src2)));
12912   ins_cost(INSN_COST);
12913   format %{ "sub  $dst, $src1, $src2, sxtw" %}
12914 
12915    ins_encode %{
12916      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12917             as_Register($src2$$reg), ext::sxtw);
12918    %}
12919   ins_pipe(ialu_reg_reg);
12920 %}
12921 
12922 // This pattern is automatically generated from aarch64_ad.m4
12923 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12924 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12925 %{
12926   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12927   ins_cost(INSN_COST);
12928   format %{ "add  $dst, $src1, $src2, sxth" %}
12929 
12930    ins_encode %{
12931      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12932             as_Register($src2$$reg), ext::sxth);
12933    %}
12934   ins_pipe(ialu_reg_reg);
12935 %}
12936 
12937 // This pattern is automatically generated from aarch64_ad.m4
12938 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12939 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12940 %{
12941   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12942   ins_cost(INSN_COST);
12943   format %{ "add  $dst, $src1, $src2, sxtb" %}
12944 
12945    ins_encode %{
12946      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12947             as_Register($src2$$reg), ext::sxtb);
12948    %}
12949   ins_pipe(ialu_reg_reg);
12950 %}
12951 
12952 // This pattern is automatically generated from aarch64_ad.m4
12953 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12954 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12955 %{
12956   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12957   ins_cost(INSN_COST);
12958   format %{ "add  $dst, $src1, $src2, uxtb" %}
12959 
12960    ins_encode %{
12961      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12962             as_Register($src2$$reg), ext::uxtb);
12963    %}
12964   ins_pipe(ialu_reg_reg);
12965 %}
12966 
12967 // This pattern is automatically generated from aarch64_ad.m4
12968 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12969 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12970 %{
12971   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12972   ins_cost(INSN_COST);
12973   format %{ "add  $dst, $src1, $src2, sxth" %}
12974 
12975    ins_encode %{
12976      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12977             as_Register($src2$$reg), ext::sxth);
12978    %}
12979   ins_pipe(ialu_reg_reg);
12980 %}
12981 
12982 // This pattern is automatically generated from aarch64_ad.m4
12983 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12984 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12985 %{
12986   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12987   ins_cost(INSN_COST);
12988   format %{ "add  $dst, $src1, $src2, sxtw" %}
12989 
12990    ins_encode %{
12991      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12992             as_Register($src2$$reg), ext::sxtw);
12993    %}
12994   ins_pipe(ialu_reg_reg);
12995 %}
12996 
12997 // This pattern is automatically generated from aarch64_ad.m4
12998 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12999 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
13000 %{
13001   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
13002   ins_cost(INSN_COST);
13003   format %{ "add  $dst, $src1, $src2, sxtb" %}
13004 
13005    ins_encode %{
13006      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13007             as_Register($src2$$reg), ext::sxtb);
13008    %}
13009   ins_pipe(ialu_reg_reg);
13010 %}
13011 
13012 // This pattern is automatically generated from aarch64_ad.m4
13013 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13014 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
13015 %{
13016   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
13017   ins_cost(INSN_COST);
13018   format %{ "add  $dst, $src1, $src2, uxtb" %}
13019 
13020    ins_encode %{
13021      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13022             as_Register($src2$$reg), ext::uxtb);
13023    %}
13024   ins_pipe(ialu_reg_reg);
13025 %}
13026 
13027 // This pattern is automatically generated from aarch64_ad.m4
13028 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13029 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
13030 %{
13031   match(Set dst (AddI src1 (AndI src2 mask)));
13032   ins_cost(INSN_COST);
13033   format %{ "addw  $dst, $src1, $src2, uxtb" %}
13034 
13035    ins_encode %{
13036      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13037             as_Register($src2$$reg), ext::uxtb);
13038    %}
13039   ins_pipe(ialu_reg_reg);
13040 %}
13041 
13042 // This pattern is automatically generated from aarch64_ad.m4
13043 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13044 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
13045 %{
13046   match(Set dst (AddI src1 (AndI src2 mask)));
13047   ins_cost(INSN_COST);
13048   format %{ "addw  $dst, $src1, $src2, uxth" %}
13049 
13050    ins_encode %{
13051      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13052             as_Register($src2$$reg), ext::uxth);
13053    %}
13054   ins_pipe(ialu_reg_reg);
13055 %}
13056 
13057 // This pattern is automatically generated from aarch64_ad.m4
13058 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13059 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
13060 %{
13061   match(Set dst (AddL src1 (AndL src2 mask)));
13062   ins_cost(INSN_COST);
13063   format %{ "add  $dst, $src1, $src2, uxtb" %}
13064 
13065    ins_encode %{
13066      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13067             as_Register($src2$$reg), ext::uxtb);
13068    %}
13069   ins_pipe(ialu_reg_reg);
13070 %}
13071 
13072 // This pattern is automatically generated from aarch64_ad.m4
13073 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13074 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13075 %{
13076   match(Set dst (AddL src1 (AndL src2 mask)));
13077   ins_cost(INSN_COST);
13078   format %{ "add  $dst, $src1, $src2, uxth" %}
13079 
13080    ins_encode %{
13081      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13082             as_Register($src2$$reg), ext::uxth);
13083    %}
13084   ins_pipe(ialu_reg_reg);
13085 %}
13086 
13087 // This pattern is automatically generated from aarch64_ad.m4
13088 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13089 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13090 %{
13091   match(Set dst (AddL src1 (AndL src2 mask)));
13092   ins_cost(INSN_COST);
13093   format %{ "add  $dst, $src1, $src2, uxtw" %}
13094 
13095    ins_encode %{
13096      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13097             as_Register($src2$$reg), ext::uxtw);
13098    %}
13099   ins_pipe(ialu_reg_reg);
13100 %}
13101 
13102 // This pattern is automatically generated from aarch64_ad.m4
13103 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13104 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
13105 %{
13106   match(Set dst (SubI src1 (AndI src2 mask)));
13107   ins_cost(INSN_COST);
13108   format %{ "subw  $dst, $src1, $src2, uxtb" %}
13109 
13110    ins_encode %{
13111      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13112             as_Register($src2$$reg), ext::uxtb);
13113    %}
13114   ins_pipe(ialu_reg_reg);
13115 %}
13116 
13117 // This pattern is automatically generated from aarch64_ad.m4
13118 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13119 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
13120 %{
13121   match(Set dst (SubI src1 (AndI src2 mask)));
13122   ins_cost(INSN_COST);
13123   format %{ "subw  $dst, $src1, $src2, uxth" %}
13124 
13125    ins_encode %{
13126      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13127             as_Register($src2$$reg), ext::uxth);
13128    %}
13129   ins_pipe(ialu_reg_reg);
13130 %}
13131 
13132 // This pattern is automatically generated from aarch64_ad.m4
13133 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13134 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
13135 %{
13136   match(Set dst (SubL src1 (AndL src2 mask)));
13137   ins_cost(INSN_COST);
13138   format %{ "sub  $dst, $src1, $src2, uxtb" %}
13139 
13140    ins_encode %{
13141      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13142             as_Register($src2$$reg), ext::uxtb);
13143    %}
13144   ins_pipe(ialu_reg_reg);
13145 %}
13146 
13147 // This pattern is automatically generated from aarch64_ad.m4
13148 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13149 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13150 %{
13151   match(Set dst (SubL src1 (AndL src2 mask)));
13152   ins_cost(INSN_COST);
13153   format %{ "sub  $dst, $src1, $src2, uxth" %}
13154 
13155    ins_encode %{
13156      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13157             as_Register($src2$$reg), ext::uxth);
13158    %}
13159   ins_pipe(ialu_reg_reg);
13160 %}
13161 
13162 // This pattern is automatically generated from aarch64_ad.m4
13163 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13164 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13165 %{
13166   match(Set dst (SubL src1 (AndL src2 mask)));
13167   ins_cost(INSN_COST);
13168   format %{ "sub  $dst, $src1, $src2, uxtw" %}
13169 
13170    ins_encode %{
13171      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13172             as_Register($src2$$reg), ext::uxtw);
13173    %}
13174   ins_pipe(ialu_reg_reg);
13175 %}
13176 
13177 
13178 // This pattern is automatically generated from aarch64_ad.m4
13179 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13180 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13181 %{
13182   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13183   ins_cost(1.9 * INSN_COST);
13184   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
13185 
13186    ins_encode %{
13187      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13188             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13189    %}
13190   ins_pipe(ialu_reg_reg_shift);
13191 %}
13192 
13193 // This pattern is automatically generated from aarch64_ad.m4
13194 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13195 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13196 %{
13197   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13198   ins_cost(1.9 * INSN_COST);
13199   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
13200 
13201    ins_encode %{
13202      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13203             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13204    %}
13205   ins_pipe(ialu_reg_reg_shift);
13206 %}
13207 
13208 // This pattern is automatically generated from aarch64_ad.m4
13209 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13210 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13211 %{
13212   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13213   ins_cost(1.9 * INSN_COST);
13214   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
13215 
13216    ins_encode %{
13217      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13218             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13219    %}
13220   ins_pipe(ialu_reg_reg_shift);
13221 %}
13222 
13223 // This pattern is automatically generated from aarch64_ad.m4
13224 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13225 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13226 %{
13227   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13228   ins_cost(1.9 * INSN_COST);
13229   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
13230 
13231    ins_encode %{
13232      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13233             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13234    %}
13235   ins_pipe(ialu_reg_reg_shift);
13236 %}
13237 
13238 // This pattern is automatically generated from aarch64_ad.m4
13239 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13240 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13241 %{
13242   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13243   ins_cost(1.9 * INSN_COST);
13244   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
13245 
13246    ins_encode %{
13247      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13248             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13249    %}
13250   ins_pipe(ialu_reg_reg_shift);
13251 %}
13252 
13253 // This pattern is automatically generated from aarch64_ad.m4
13254 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13255 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13256 %{
13257   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13258   ins_cost(1.9 * INSN_COST);
13259   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
13260 
13261    ins_encode %{
13262      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13263             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13264    %}
13265   ins_pipe(ialu_reg_reg_shift);
13266 %}
13267 
13268 // This pattern is automatically generated from aarch64_ad.m4
13269 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13270 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13271 %{
13272   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13273   ins_cost(1.9 * INSN_COST);
13274   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
13275 
13276    ins_encode %{
13277      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13278             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13279    %}
13280   ins_pipe(ialu_reg_reg_shift);
13281 %}
13282 
13283 // This pattern is automatically generated from aarch64_ad.m4
13284 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13285 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13286 %{
13287   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13288   ins_cost(1.9 * INSN_COST);
13289   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
13290 
13291    ins_encode %{
13292      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13293             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13294    %}
13295   ins_pipe(ialu_reg_reg_shift);
13296 %}
13297 
13298 // This pattern is automatically generated from aarch64_ad.m4
13299 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13300 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13301 %{
13302   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13303   ins_cost(1.9 * INSN_COST);
13304   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
13305 
13306    ins_encode %{
13307      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13308             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13309    %}
13310   ins_pipe(ialu_reg_reg_shift);
13311 %}
13312 
13313 // This pattern is automatically generated from aarch64_ad.m4
13314 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13315 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13316 %{
13317   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13318   ins_cost(1.9 * INSN_COST);
13319   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
13320 
13321    ins_encode %{
13322      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13323             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13324    %}
13325   ins_pipe(ialu_reg_reg_shift);
13326 %}
13327 
13328 // This pattern is automatically generated from aarch64_ad.m4
13329 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13330 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13331 %{
13332   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
13333   ins_cost(1.9 * INSN_COST);
13334   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
13335 
13336    ins_encode %{
13337      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13338             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13339    %}
13340   ins_pipe(ialu_reg_reg_shift);
13341 %}
13342 
13343 // This pattern is automatically generated from aarch64_ad.m4
13344 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13345 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13346 %{
13347   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
13348   ins_cost(1.9 * INSN_COST);
13349   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
13350 
13351    ins_encode %{
13352      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13353             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13354    %}
13355   ins_pipe(ialu_reg_reg_shift);
13356 %}
13357 
13358 // This pattern is automatically generated from aarch64_ad.m4
13359 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13360 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13361 %{
13362   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13363   ins_cost(1.9 * INSN_COST);
13364   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
13365 
13366    ins_encode %{
13367      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13368             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13369    %}
13370   ins_pipe(ialu_reg_reg_shift);
13371 %}
13372 
13373 // This pattern is automatically generated from aarch64_ad.m4
13374 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13375 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13376 %{
13377   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13378   ins_cost(1.9 * INSN_COST);
13379   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
13380 
13381    ins_encode %{
13382      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13383             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13384    %}
13385   ins_pipe(ialu_reg_reg_shift);
13386 %}
13387 
13388 // This pattern is automatically generated from aarch64_ad.m4
13389 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13390 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13391 %{
13392   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13393   ins_cost(1.9 * INSN_COST);
13394   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
13395 
13396    ins_encode %{
13397      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13398             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13399    %}
13400   ins_pipe(ialu_reg_reg_shift);
13401 %}
13402 
13403 // This pattern is automatically generated from aarch64_ad.m4
13404 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13405 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13406 %{
13407   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13408   ins_cost(1.9 * INSN_COST);
13409   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
13410 
13411    ins_encode %{
13412      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13413             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13414    %}
13415   ins_pipe(ialu_reg_reg_shift);
13416 %}
13417 
13418 // This pattern is automatically generated from aarch64_ad.m4
13419 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13420 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13421 %{
13422   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13423   ins_cost(1.9 * INSN_COST);
13424   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
13425 
13426    ins_encode %{
13427      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13428             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13429    %}
13430   ins_pipe(ialu_reg_reg_shift);
13431 %}
13432 
13433 // This pattern is automatically generated from aarch64_ad.m4
13434 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13435 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13436 %{
13437   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13438   ins_cost(1.9 * INSN_COST);
13439   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
13440 
13441    ins_encode %{
13442      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13443             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13444    %}
13445   ins_pipe(ialu_reg_reg_shift);
13446 %}
13447 
13448 // This pattern is automatically generated from aarch64_ad.m4
13449 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13450 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13451 %{
13452   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13453   ins_cost(1.9 * INSN_COST);
13454   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
13455 
13456    ins_encode %{
13457      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13458             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13459    %}
13460   ins_pipe(ialu_reg_reg_shift);
13461 %}
13462 
13463 // This pattern is automatically generated from aarch64_ad.m4
13464 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13465 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13466 %{
13467   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13468   ins_cost(1.9 * INSN_COST);
13469   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
13470 
13471    ins_encode %{
13472      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13473             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13474    %}
13475   ins_pipe(ialu_reg_reg_shift);
13476 %}
13477 
13478 // This pattern is automatically generated from aarch64_ad.m4
13479 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13480 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13481 %{
13482   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13483   ins_cost(1.9 * INSN_COST);
13484   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
13485 
13486    ins_encode %{
13487      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13488             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13489    %}
13490   ins_pipe(ialu_reg_reg_shift);
13491 %}
13492 
13493 // This pattern is automatically generated from aarch64_ad.m4
13494 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13495 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13496 %{
13497   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13498   ins_cost(1.9 * INSN_COST);
13499   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
13500 
13501    ins_encode %{
13502      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13503             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13504    %}
13505   ins_pipe(ialu_reg_reg_shift);
13506 %}
13507 
13508 
13509 
13510 // END This section of the file is automatically generated. Do not edit --------------
13511 
13512 
13513 // ============================================================================
13514 // Floating Point Arithmetic Instructions
13515 
13516 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13517   match(Set dst (AddF src1 src2));
13518 
13519   ins_cost(INSN_COST * 5);
13520   format %{ "fadds   $dst, $src1, $src2" %}
13521 
13522   ins_encode %{
13523     __ fadds(as_FloatRegister($dst$$reg),
13524              as_FloatRegister($src1$$reg),
13525              as_FloatRegister($src2$$reg));
13526   %}
13527 
13528   ins_pipe(fp_dop_reg_reg_s);
13529 %}
13530 
13531 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13532   match(Set dst (AddD src1 src2));
13533 
13534   ins_cost(INSN_COST * 5);
13535   format %{ "faddd   $dst, $src1, $src2" %}
13536 
13537   ins_encode %{
13538     __ faddd(as_FloatRegister($dst$$reg),
13539              as_FloatRegister($src1$$reg),
13540              as_FloatRegister($src2$$reg));
13541   %}
13542 
13543   ins_pipe(fp_dop_reg_reg_d);
13544 %}
13545 
13546 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13547   match(Set dst (SubF src1 src2));
13548 
13549   ins_cost(INSN_COST * 5);
13550   format %{ "fsubs   $dst, $src1, $src2" %}
13551 
13552   ins_encode %{
13553     __ fsubs(as_FloatRegister($dst$$reg),
13554              as_FloatRegister($src1$$reg),
13555              as_FloatRegister($src2$$reg));
13556   %}
13557 
13558   ins_pipe(fp_dop_reg_reg_s);
13559 %}
13560 
13561 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13562   match(Set dst (SubD src1 src2));
13563 
13564   ins_cost(INSN_COST * 5);
13565   format %{ "fsubd   $dst, $src1, $src2" %}
13566 
13567   ins_encode %{
13568     __ fsubd(as_FloatRegister($dst$$reg),
13569              as_FloatRegister($src1$$reg),
13570              as_FloatRegister($src2$$reg));
13571   %}
13572 
13573   ins_pipe(fp_dop_reg_reg_d);
13574 %}
13575 
13576 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13577   match(Set dst (MulF src1 src2));
13578 
13579   ins_cost(INSN_COST * 6);
13580   format %{ "fmuls   $dst, $src1, $src2" %}
13581 
13582   ins_encode %{
13583     __ fmuls(as_FloatRegister($dst$$reg),
13584              as_FloatRegister($src1$$reg),
13585              as_FloatRegister($src2$$reg));
13586   %}
13587 
13588   ins_pipe(fp_dop_reg_reg_s);
13589 %}
13590 
13591 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13592   match(Set dst (MulD src1 src2));
13593 
13594   ins_cost(INSN_COST * 6);
13595   format %{ "fmuld   $dst, $src1, $src2" %}
13596 
13597   ins_encode %{
13598     __ fmuld(as_FloatRegister($dst$$reg),
13599              as_FloatRegister($src1$$reg),
13600              as_FloatRegister($src2$$reg));
13601   %}
13602 
13603   ins_pipe(fp_dop_reg_reg_d);
13604 %}
13605 
13606 // src1 * src2 + src3
13607 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13608   predicate(UseFMA);
13609   match(Set dst (FmaF src3 (Binary src1 src2)));
13610 
13611   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
13612 
13613   ins_encode %{
13614     __ fmadds(as_FloatRegister($dst$$reg),
13615              as_FloatRegister($src1$$reg),
13616              as_FloatRegister($src2$$reg),
13617              as_FloatRegister($src3$$reg));
13618   %}
13619 
13620   ins_pipe(pipe_class_default);
13621 %}
13622 
13623 // src1 * src2 + src3
13624 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13625   predicate(UseFMA);
13626   match(Set dst (FmaD src3 (Binary src1 src2)));
13627 
13628   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
13629 
13630   ins_encode %{
13631     __ fmaddd(as_FloatRegister($dst$$reg),
13632              as_FloatRegister($src1$$reg),
13633              as_FloatRegister($src2$$reg),
13634              as_FloatRegister($src3$$reg));
13635   %}
13636 
13637   ins_pipe(pipe_class_default);
13638 %}
13639 
13640 // -src1 * src2 + src3
13641 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13642   predicate(UseFMA);
13643   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
13644   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
13645 
13646   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
13647 
13648   ins_encode %{
13649     __ fmsubs(as_FloatRegister($dst$$reg),
13650               as_FloatRegister($src1$$reg),
13651               as_FloatRegister($src2$$reg),
13652               as_FloatRegister($src3$$reg));
13653   %}
13654 
13655   ins_pipe(pipe_class_default);
13656 %}
13657 
13658 // -src1 * src2 + src3
13659 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13660   predicate(UseFMA);
13661   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
13662   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
13663 
13664   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
13665 
13666   ins_encode %{
13667     __ fmsubd(as_FloatRegister($dst$$reg),
13668               as_FloatRegister($src1$$reg),
13669               as_FloatRegister($src2$$reg),
13670               as_FloatRegister($src3$$reg));
13671   %}
13672 
13673   ins_pipe(pipe_class_default);
13674 %}
13675 
13676 // -src1 * src2 - src3
13677 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13678   predicate(UseFMA);
13679   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
13680   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
13681 
13682   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
13683 
13684   ins_encode %{
13685     __ fnmadds(as_FloatRegister($dst$$reg),
13686                as_FloatRegister($src1$$reg),
13687                as_FloatRegister($src2$$reg),
13688                as_FloatRegister($src3$$reg));
13689   %}
13690 
13691   ins_pipe(pipe_class_default);
13692 %}
13693 
13694 // -src1 * src2 - src3
13695 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13696   predicate(UseFMA);
13697   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
13698   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
13699 
13700   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
13701 
13702   ins_encode %{
13703     __ fnmaddd(as_FloatRegister($dst$$reg),
13704                as_FloatRegister($src1$$reg),
13705                as_FloatRegister($src2$$reg),
13706                as_FloatRegister($src3$$reg));
13707   %}
13708 
13709   ins_pipe(pipe_class_default);
13710 %}
13711 
13712 // src1 * src2 - src3
13713 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
13714   predicate(UseFMA);
13715   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
13716 
13717   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
13718 
13719   ins_encode %{
13720     __ fnmsubs(as_FloatRegister($dst$$reg),
13721                as_FloatRegister($src1$$reg),
13722                as_FloatRegister($src2$$reg),
13723                as_FloatRegister($src3$$reg));
13724   %}
13725 
13726   ins_pipe(pipe_class_default);
13727 %}
13728 
13729 // src1 * src2 - src3
13730 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
13731   predicate(UseFMA);
13732   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
13733 
13734   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
13735 
13736   ins_encode %{
13737   // n.b. insn name should be fnmsubd
13738     __ fnmsub(as_FloatRegister($dst$$reg),
13739               as_FloatRegister($src1$$reg),
13740               as_FloatRegister($src2$$reg),
13741               as_FloatRegister($src3$$reg));
13742   %}
13743 
13744   ins_pipe(pipe_class_default);
13745 %}
13746 
13747 
13748 // Math.max(FF)F
13749 instruct maxF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13750   match(Set dst (MaxF src1 src2));
13751 
13752   format %{ "fmaxs   $dst, $src1, $src2" %}
13753   ins_encode %{
13754     __ fmaxs(as_FloatRegister($dst$$reg),
13755              as_FloatRegister($src1$$reg),
13756              as_FloatRegister($src2$$reg));
13757   %}
13758 
13759   ins_pipe(fp_dop_reg_reg_s);
13760 %}
13761 
13762 // Math.min(FF)F
13763 instruct minF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13764   match(Set dst (MinF src1 src2));
13765 
13766   format %{ "fmins   $dst, $src1, $src2" %}
13767   ins_encode %{
13768     __ fmins(as_FloatRegister($dst$$reg),
13769              as_FloatRegister($src1$$reg),
13770              as_FloatRegister($src2$$reg));
13771   %}
13772 
13773   ins_pipe(fp_dop_reg_reg_s);
13774 %}
13775 
13776 // Math.max(DD)D
13777 instruct maxD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13778   match(Set dst (MaxD src1 src2));
13779 
13780   format %{ "fmaxd   $dst, $src1, $src2" %}
13781   ins_encode %{
13782     __ fmaxd(as_FloatRegister($dst$$reg),
13783              as_FloatRegister($src1$$reg),
13784              as_FloatRegister($src2$$reg));
13785   %}
13786 
13787   ins_pipe(fp_dop_reg_reg_d);
13788 %}
13789 
13790 // Math.min(DD)D
13791 instruct minD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13792   match(Set dst (MinD src1 src2));
13793 
13794   format %{ "fmind   $dst, $src1, $src2" %}
13795   ins_encode %{
13796     __ fmind(as_FloatRegister($dst$$reg),
13797              as_FloatRegister($src1$$reg),
13798              as_FloatRegister($src2$$reg));
13799   %}
13800 
13801   ins_pipe(fp_dop_reg_reg_d);
13802 %}
13803 
13804 
13805 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13806   match(Set dst (DivF src1  src2));
13807 
13808   ins_cost(INSN_COST * 18);
13809   format %{ "fdivs   $dst, $src1, $src2" %}
13810 
13811   ins_encode %{
13812     __ fdivs(as_FloatRegister($dst$$reg),
13813              as_FloatRegister($src1$$reg),
13814              as_FloatRegister($src2$$reg));
13815   %}
13816 
13817   ins_pipe(fp_div_s);
13818 %}
13819 
13820 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13821   match(Set dst (DivD src1  src2));
13822 
13823   ins_cost(INSN_COST * 32);
13824   format %{ "fdivd   $dst, $src1, $src2" %}
13825 
13826   ins_encode %{
13827     __ fdivd(as_FloatRegister($dst$$reg),
13828              as_FloatRegister($src1$$reg),
13829              as_FloatRegister($src2$$reg));
13830   %}
13831 
13832   ins_pipe(fp_div_d);
13833 %}
13834 
13835 instruct negF_reg_reg(vRegF dst, vRegF src) %{
13836   match(Set dst (NegF src));
13837 
13838   ins_cost(INSN_COST * 3);
13839   format %{ "fneg   $dst, $src" %}
13840 
13841   ins_encode %{
13842     __ fnegs(as_FloatRegister($dst$$reg),
13843              as_FloatRegister($src$$reg));
13844   %}
13845 
13846   ins_pipe(fp_uop_s);
13847 %}
13848 
13849 instruct negD_reg_reg(vRegD dst, vRegD src) %{
13850   match(Set dst (NegD src));
13851 
13852   ins_cost(INSN_COST * 3);
13853   format %{ "fnegd   $dst, $src" %}
13854 
13855   ins_encode %{
13856     __ fnegd(as_FloatRegister($dst$$reg),
13857              as_FloatRegister($src$$reg));
13858   %}
13859 
13860   ins_pipe(fp_uop_d);
13861 %}
13862 
13863 instruct absI_reg(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
13864 %{
13865   match(Set dst (AbsI src));
13866 
13867   effect(KILL cr);
13868   ins_cost(INSN_COST * 2);
13869   format %{ "cmpw  $src, zr\n\t"
13870             "cnegw $dst, $src, Assembler::LT\t# int abs"
13871   %}
13872 
13873   ins_encode %{
13874     __ cmpw(as_Register($src$$reg), zr);
13875     __ cnegw(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
13876   %}
13877   ins_pipe(pipe_class_default);
13878 %}
13879 
13880 instruct absL_reg(iRegLNoSp dst, iRegL src, rFlagsReg cr)
13881 %{
13882   match(Set dst (AbsL src));
13883 
13884   effect(KILL cr);
13885   ins_cost(INSN_COST * 2);
13886   format %{ "cmp  $src, zr\n\t"
13887             "cneg $dst, $src, Assembler::LT\t# long abs"
13888   %}
13889 
13890   ins_encode %{
13891     __ cmp(as_Register($src$$reg), zr);
13892     __ cneg(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
13893   %}
13894   ins_pipe(pipe_class_default);
13895 %}
13896 
13897 instruct absF_reg(vRegF dst, vRegF src) %{
13898   match(Set dst (AbsF src));
13899 
13900   ins_cost(INSN_COST * 3);
13901   format %{ "fabss   $dst, $src" %}
13902   ins_encode %{
13903     __ fabss(as_FloatRegister($dst$$reg),
13904              as_FloatRegister($src$$reg));
13905   %}
13906 
13907   ins_pipe(fp_uop_s);
13908 %}
13909 
13910 instruct absD_reg(vRegD dst, vRegD src) %{
13911   match(Set dst (AbsD src));
13912 
13913   ins_cost(INSN_COST * 3);
13914   format %{ "fabsd   $dst, $src" %}
13915   ins_encode %{
13916     __ fabsd(as_FloatRegister($dst$$reg),
13917              as_FloatRegister($src$$reg));
13918   %}
13919 
13920   ins_pipe(fp_uop_d);
13921 %}
13922 
13923 instruct sqrtD_reg(vRegD dst, vRegD src) %{
13924   match(Set dst (SqrtD src));
13925 
13926   ins_cost(INSN_COST * 50);
13927   format %{ "fsqrtd  $dst, $src" %}
13928   ins_encode %{
13929     __ fsqrtd(as_FloatRegister($dst$$reg),
13930              as_FloatRegister($src$$reg));
13931   %}
13932 
13933   ins_pipe(fp_div_s);
13934 %}
13935 
13936 instruct sqrtF_reg(vRegF dst, vRegF src) %{
13937   match(Set dst (SqrtF src));
13938 
13939   ins_cost(INSN_COST * 50);
13940   format %{ "fsqrts  $dst, $src" %}
13941   ins_encode %{
13942     __ fsqrts(as_FloatRegister($dst$$reg),
13943              as_FloatRegister($src$$reg));
13944   %}
13945 
13946   ins_pipe(fp_div_d);
13947 %}
13948 
13949 // Math.rint, floor, ceil
13950 instruct roundD_reg(vRegD dst, vRegD src, immI rmode) %{
13951   match(Set dst (RoundDoubleMode src rmode));
13952   format %{ "frint  $dst, $src, $rmode" %}
13953   ins_encode %{
13954     switch ($rmode$$constant) {
13955       case RoundDoubleModeNode::rmode_rint:
13956         __ frintnd(as_FloatRegister($dst$$reg),
13957                    as_FloatRegister($src$$reg));
13958         break;
13959       case RoundDoubleModeNode::rmode_floor:
13960         __ frintmd(as_FloatRegister($dst$$reg),
13961                    as_FloatRegister($src$$reg));
13962         break;
13963       case RoundDoubleModeNode::rmode_ceil:
13964         __ frintpd(as_FloatRegister($dst$$reg),
13965                    as_FloatRegister($src$$reg));
13966         break;
13967     }
13968   %}
13969   ins_pipe(fp_uop_d);
13970 %}
13971 
13972 // ============================================================================
13973 // Logical Instructions
13974 
13975 // Integer Logical Instructions
13976 
13977 // And Instructions
13978 
13979 
13980 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
13981   match(Set dst (AndI src1 src2));
13982 
13983   format %{ "andw  $dst, $src1, $src2\t# int" %}
13984 
13985   ins_cost(INSN_COST);
13986   ins_encode %{
13987     __ andw(as_Register($dst$$reg),
13988             as_Register($src1$$reg),
13989             as_Register($src2$$reg));
13990   %}
13991 
13992   ins_pipe(ialu_reg_reg);
13993 %}
13994 
13995 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
13996   match(Set dst (AndI src1 src2));
13997 
13998   format %{ "andsw  $dst, $src1, $src2\t# int" %}
13999 
14000   ins_cost(INSN_COST);
14001   ins_encode %{
14002     __ andw(as_Register($dst$$reg),
14003             as_Register($src1$$reg),
14004             (uint64_t)($src2$$constant));
14005   %}
14006 
14007   ins_pipe(ialu_reg_imm);
14008 %}
14009 
14010 // Or Instructions
14011 
14012 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14013   match(Set dst (OrI src1 src2));
14014 
14015   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14016 
14017   ins_cost(INSN_COST);
14018   ins_encode %{
14019     __ orrw(as_Register($dst$$reg),
14020             as_Register($src1$$reg),
14021             as_Register($src2$$reg));
14022   %}
14023 
14024   ins_pipe(ialu_reg_reg);
14025 %}
14026 
14027 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14028   match(Set dst (OrI src1 src2));
14029 
14030   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14031 
14032   ins_cost(INSN_COST);
14033   ins_encode %{
14034     __ orrw(as_Register($dst$$reg),
14035             as_Register($src1$$reg),
14036             (uint64_t)($src2$$constant));
14037   %}
14038 
14039   ins_pipe(ialu_reg_imm);
14040 %}
14041 
14042 // Xor Instructions
14043 
14044 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14045   match(Set dst (XorI src1 src2));
14046 
14047   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14048 
14049   ins_cost(INSN_COST);
14050   ins_encode %{
14051     __ eorw(as_Register($dst$$reg),
14052             as_Register($src1$$reg),
14053             as_Register($src2$$reg));
14054   %}
14055 
14056   ins_pipe(ialu_reg_reg);
14057 %}
14058 
14059 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14060   match(Set dst (XorI src1 src2));
14061 
14062   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14063 
14064   ins_cost(INSN_COST);
14065   ins_encode %{
14066     __ eorw(as_Register($dst$$reg),
14067             as_Register($src1$$reg),
14068             (uint64_t)($src2$$constant));
14069   %}
14070 
14071   ins_pipe(ialu_reg_imm);
14072 %}
14073 
14074 // Long Logical Instructions
14075 // TODO
14076 
14077 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
14078   match(Set dst (AndL src1 src2));
14079 
14080   format %{ "and  $dst, $src1, $src2\t# int" %}
14081 
14082   ins_cost(INSN_COST);
14083   ins_encode %{
14084     __ andr(as_Register($dst$$reg),
14085             as_Register($src1$$reg),
14086             as_Register($src2$$reg));
14087   %}
14088 
14089   ins_pipe(ialu_reg_reg);
14090 %}
14091 
14092 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
14093   match(Set dst (AndL src1 src2));
14094 
14095   format %{ "and  $dst, $src1, $src2\t# int" %}
14096 
14097   ins_cost(INSN_COST);
14098   ins_encode %{
14099     __ andr(as_Register($dst$$reg),
14100             as_Register($src1$$reg),
14101             (uint64_t)($src2$$constant));
14102   %}
14103 
14104   ins_pipe(ialu_reg_imm);
14105 %}
14106 
14107 // Or Instructions
14108 
14109 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14110   match(Set dst (OrL src1 src2));
14111 
14112   format %{ "orr  $dst, $src1, $src2\t# int" %}
14113 
14114   ins_cost(INSN_COST);
14115   ins_encode %{
14116     __ orr(as_Register($dst$$reg),
14117            as_Register($src1$$reg),
14118            as_Register($src2$$reg));
14119   %}
14120 
14121   ins_pipe(ialu_reg_reg);
14122 %}
14123 
14124 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14125   match(Set dst (OrL src1 src2));
14126 
14127   format %{ "orr  $dst, $src1, $src2\t# int" %}
14128 
14129   ins_cost(INSN_COST);
14130   ins_encode %{
14131     __ orr(as_Register($dst$$reg),
14132            as_Register($src1$$reg),
14133            (uint64_t)($src2$$constant));
14134   %}
14135 
14136   ins_pipe(ialu_reg_imm);
14137 %}
14138 
14139 // Xor Instructions
14140 
14141 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14142   match(Set dst (XorL src1 src2));
14143 
14144   format %{ "eor  $dst, $src1, $src2\t# int" %}
14145 
14146   ins_cost(INSN_COST);
14147   ins_encode %{
14148     __ eor(as_Register($dst$$reg),
14149            as_Register($src1$$reg),
14150            as_Register($src2$$reg));
14151   %}
14152 
14153   ins_pipe(ialu_reg_reg);
14154 %}
14155 
14156 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14157   match(Set dst (XorL src1 src2));
14158 
14159   ins_cost(INSN_COST);
14160   format %{ "eor  $dst, $src1, $src2\t# int" %}
14161 
14162   ins_encode %{
14163     __ eor(as_Register($dst$$reg),
14164            as_Register($src1$$reg),
14165            (uint64_t)($src2$$constant));
14166   %}
14167 
14168   ins_pipe(ialu_reg_imm);
14169 %}
14170 
14171 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
14172 %{
14173   match(Set dst (ConvI2L src));
14174 
14175   ins_cost(INSN_COST);
14176   format %{ "sxtw  $dst, $src\t# i2l" %}
14177   ins_encode %{
14178     __ sbfm($dst$$Register, $src$$Register, 0, 31);
14179   %}
14180   ins_pipe(ialu_reg_shift);
14181 %}
14182 
14183 // this pattern occurs in bigmath arithmetic
14184 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
14185 %{
14186   match(Set dst (AndL (ConvI2L src) mask));
14187 
14188   ins_cost(INSN_COST);
14189   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
14190   ins_encode %{
14191     __ ubfm($dst$$Register, $src$$Register, 0, 31);
14192   %}
14193 
14194   ins_pipe(ialu_reg_shift);
14195 %}
14196 
14197 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
14198   match(Set dst (ConvL2I src));
14199 
14200   ins_cost(INSN_COST);
14201   format %{ "movw  $dst, $src \t// l2i" %}
14202 
14203   ins_encode %{
14204     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
14205   %}
14206 
14207   ins_pipe(ialu_reg);
14208 %}
14209 
14210 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
14211 %{
14212   match(Set dst (Conv2B src));
14213   effect(KILL cr);
14214 
14215   format %{
14216     "cmpw $src, zr\n\t"
14217     "cset $dst, ne"
14218   %}
14219 
14220   ins_encode %{
14221     __ cmpw(as_Register($src$$reg), zr);
14222     __ cset(as_Register($dst$$reg), Assembler::NE);
14223   %}
14224 
14225   ins_pipe(ialu_reg);
14226 %}
14227 
14228 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
14229 %{
14230   match(Set dst (Conv2B src));
14231   effect(KILL cr);
14232 
14233   format %{
14234     "cmp  $src, zr\n\t"
14235     "cset $dst, ne"
14236   %}
14237 
14238   ins_encode %{
14239     __ cmp(as_Register($src$$reg), zr);
14240     __ cset(as_Register($dst$$reg), Assembler::NE);
14241   %}
14242 
14243   ins_pipe(ialu_reg);
14244 %}
14245 
14246 instruct convD2F_reg(vRegF dst, vRegD src) %{
14247   match(Set dst (ConvD2F src));
14248 
14249   ins_cost(INSN_COST * 5);
14250   format %{ "fcvtd  $dst, $src \t// d2f" %}
14251 
14252   ins_encode %{
14253     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14254   %}
14255 
14256   ins_pipe(fp_d2f);
14257 %}
14258 
14259 instruct convF2D_reg(vRegD dst, vRegF src) %{
14260   match(Set dst (ConvF2D src));
14261 
14262   ins_cost(INSN_COST * 5);
14263   format %{ "fcvts  $dst, $src \t// f2d" %}
14264 
14265   ins_encode %{
14266     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14267   %}
14268 
14269   ins_pipe(fp_f2d);
14270 %}
14271 
14272 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14273   match(Set dst (ConvF2I src));
14274 
14275   ins_cost(INSN_COST * 5);
14276   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
14277 
14278   ins_encode %{
14279     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14280   %}
14281 
14282   ins_pipe(fp_f2i);
14283 %}
14284 
14285 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
14286   match(Set dst (ConvF2L src));
14287 
14288   ins_cost(INSN_COST * 5);
14289   format %{ "fcvtzs  $dst, $src \t// f2l" %}
14290 
14291   ins_encode %{
14292     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14293   %}
14294 
14295   ins_pipe(fp_f2l);
14296 %}
14297 
14298 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
14299   match(Set dst (ConvI2F src));
14300 
14301   ins_cost(INSN_COST * 5);
14302   format %{ "scvtfws  $dst, $src \t// i2f" %}
14303 
14304   ins_encode %{
14305     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14306   %}
14307 
14308   ins_pipe(fp_i2f);
14309 %}
14310 
14311 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
14312   match(Set dst (ConvL2F src));
14313 
14314   ins_cost(INSN_COST * 5);
14315   format %{ "scvtfs  $dst, $src \t// l2f" %}
14316 
14317   ins_encode %{
14318     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14319   %}
14320 
14321   ins_pipe(fp_l2f);
14322 %}
14323 
14324 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
14325   match(Set dst (ConvD2I src));
14326 
14327   ins_cost(INSN_COST * 5);
14328   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
14329 
14330   ins_encode %{
14331     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14332   %}
14333 
14334   ins_pipe(fp_d2i);
14335 %}
14336 
14337 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14338   match(Set dst (ConvD2L src));
14339 
14340   ins_cost(INSN_COST * 5);
14341   format %{ "fcvtzd  $dst, $src \t// d2l" %}
14342 
14343   ins_encode %{
14344     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14345   %}
14346 
14347   ins_pipe(fp_d2l);
14348 %}
14349 
14350 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
14351   match(Set dst (ConvI2D src));
14352 
14353   ins_cost(INSN_COST * 5);
14354   format %{ "scvtfwd  $dst, $src \t// i2d" %}
14355 
14356   ins_encode %{
14357     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14358   %}
14359 
14360   ins_pipe(fp_i2d);
14361 %}
14362 
14363 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
14364   match(Set dst (ConvL2D src));
14365 
14366   ins_cost(INSN_COST * 5);
14367   format %{ "scvtfd  $dst, $src \t// l2d" %}
14368 
14369   ins_encode %{
14370     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14371   %}
14372 
14373   ins_pipe(fp_l2d);
14374 %}
14375 
14376 // stack <-> reg and reg <-> reg shuffles with no conversion
14377 
14378 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
14379 
14380   match(Set dst (MoveF2I src));
14381 
14382   effect(DEF dst, USE src);
14383 
14384   ins_cost(4 * INSN_COST);
14385 
14386   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
14387 
14388   ins_encode %{
14389     __ ldrw($dst$$Register, Address(sp, $src$$disp));
14390   %}
14391 
14392   ins_pipe(iload_reg_reg);
14393 
14394 %}
14395 
14396 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
14397 
14398   match(Set dst (MoveI2F src));
14399 
14400   effect(DEF dst, USE src);
14401 
14402   ins_cost(4 * INSN_COST);
14403 
14404   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
14405 
14406   ins_encode %{
14407     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14408   %}
14409 
14410   ins_pipe(pipe_class_memory);
14411 
14412 %}
14413 
14414 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
14415 
14416   match(Set dst (MoveD2L src));
14417 
14418   effect(DEF dst, USE src);
14419 
14420   ins_cost(4 * INSN_COST);
14421 
14422   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
14423 
14424   ins_encode %{
14425     __ ldr($dst$$Register, Address(sp, $src$$disp));
14426   %}
14427 
14428   ins_pipe(iload_reg_reg);
14429 
14430 %}
14431 
14432 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
14433 
14434   match(Set dst (MoveL2D src));
14435 
14436   effect(DEF dst, USE src);
14437 
14438   ins_cost(4 * INSN_COST);
14439 
14440   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
14441 
14442   ins_encode %{
14443     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14444   %}
14445 
14446   ins_pipe(pipe_class_memory);
14447 
14448 %}
14449 
14450 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
14451 
14452   match(Set dst (MoveF2I src));
14453 
14454   effect(DEF dst, USE src);
14455 
14456   ins_cost(INSN_COST);
14457 
14458   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
14459 
14460   ins_encode %{
14461     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14462   %}
14463 
14464   ins_pipe(pipe_class_memory);
14465 
14466 %}
14467 
14468 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
14469 
14470   match(Set dst (MoveI2F src));
14471 
14472   effect(DEF dst, USE src);
14473 
14474   ins_cost(INSN_COST);
14475 
14476   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
14477 
14478   ins_encode %{
14479     __ strw($src$$Register, Address(sp, $dst$$disp));
14480   %}
14481 
14482   ins_pipe(istore_reg_reg);
14483 
14484 %}
14485 
14486 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
14487 
14488   match(Set dst (MoveD2L src));
14489 
14490   effect(DEF dst, USE src);
14491 
14492   ins_cost(INSN_COST);
14493 
14494   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
14495 
14496   ins_encode %{
14497     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14498   %}
14499 
14500   ins_pipe(pipe_class_memory);
14501 
14502 %}
14503 
14504 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
14505 
14506   match(Set dst (MoveL2D src));
14507 
14508   effect(DEF dst, USE src);
14509 
14510   ins_cost(INSN_COST);
14511 
14512   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
14513 
14514   ins_encode %{
14515     __ str($src$$Register, Address(sp, $dst$$disp));
14516   %}
14517 
14518   ins_pipe(istore_reg_reg);
14519 
14520 %}
14521 
14522 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14523 
14524   match(Set dst (MoveF2I src));
14525 
14526   effect(DEF dst, USE src);
14527 
14528   ins_cost(INSN_COST);
14529 
14530   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
14531 
14532   ins_encode %{
14533     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
14534   %}
14535 
14536   ins_pipe(fp_f2i);
14537 
14538 %}
14539 
14540 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
14541 
14542   match(Set dst (MoveI2F src));
14543 
14544   effect(DEF dst, USE src);
14545 
14546   ins_cost(INSN_COST);
14547 
14548   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
14549 
14550   ins_encode %{
14551     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
14552   %}
14553 
14554   ins_pipe(fp_i2f);
14555 
14556 %}
14557 
14558 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14559 
14560   match(Set dst (MoveD2L src));
14561 
14562   effect(DEF dst, USE src);
14563 
14564   ins_cost(INSN_COST);
14565 
14566   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
14567 
14568   ins_encode %{
14569     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
14570   %}
14571 
14572   ins_pipe(fp_d2l);
14573 
14574 %}
14575 
14576 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
14577 
14578   match(Set dst (MoveL2D src));
14579 
14580   effect(DEF dst, USE src);
14581 
14582   ins_cost(INSN_COST);
14583 
14584   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
14585 
14586   ins_encode %{
14587     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
14588   %}
14589 
14590   ins_pipe(fp_l2d);
14591 
14592 %}
14593 
14594 // ============================================================================
14595 // clearing of an array
14596 
14597 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
14598 %{
14599   match(Set dummy (ClearArray cnt base));
14600   effect(USE_KILL cnt, USE_KILL base, KILL cr);
14601 
14602   ins_cost(4 * INSN_COST);
14603   format %{ "ClearArray $cnt, $base" %}
14604 
14605   ins_encode %{
14606     __ zero_words($base$$Register, $cnt$$Register);
14607   %}
14608 
14609   ins_pipe(pipe_class_memory);
14610 %}
14611 
14612 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
14613 %{
14614   predicate((uint64_t)n->in(2)->get_long()
14615             < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
14616   match(Set dummy (ClearArray cnt base));
14617   effect(USE_KILL base);
14618 
14619   ins_cost(4 * INSN_COST);
14620   format %{ "ClearArray $cnt, $base" %}
14621 
14622   ins_encode %{
14623     __ zero_words($base$$Register, (uint64_t)$cnt$$constant);
14624   %}
14625 
14626   ins_pipe(pipe_class_memory);
14627 %}
14628 
14629 // ============================================================================
14630 // Overflow Math Instructions
14631 
14632 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14633 %{
14634   match(Set cr (OverflowAddI op1 op2));
14635 
14636   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
14637   ins_cost(INSN_COST);
14638   ins_encode %{
14639     __ cmnw($op1$$Register, $op2$$Register);
14640   %}
14641 
14642   ins_pipe(icmp_reg_reg);
14643 %}
14644 
14645 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
14646 %{
14647   match(Set cr (OverflowAddI op1 op2));
14648 
14649   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
14650   ins_cost(INSN_COST);
14651   ins_encode %{
14652     __ cmnw($op1$$Register, $op2$$constant);
14653   %}
14654 
14655   ins_pipe(icmp_reg_imm);
14656 %}
14657 
14658 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14659 %{
14660   match(Set cr (OverflowAddL op1 op2));
14661 
14662   format %{ "cmn   $op1, $op2\t# overflow check long" %}
14663   ins_cost(INSN_COST);
14664   ins_encode %{
14665     __ cmn($op1$$Register, $op2$$Register);
14666   %}
14667 
14668   ins_pipe(icmp_reg_reg);
14669 %}
14670 
14671 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
14672 %{
14673   match(Set cr (OverflowAddL op1 op2));
14674 
14675   format %{ "cmn   $op1, $op2\t# overflow check long" %}
14676   ins_cost(INSN_COST);
14677   ins_encode %{
14678     __ cmn($op1$$Register, $op2$$constant);
14679   %}
14680 
14681   ins_pipe(icmp_reg_imm);
14682 %}
14683 
14684 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14685 %{
14686   match(Set cr (OverflowSubI op1 op2));
14687 
14688   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
14689   ins_cost(INSN_COST);
14690   ins_encode %{
14691     __ cmpw($op1$$Register, $op2$$Register);
14692   %}
14693 
14694   ins_pipe(icmp_reg_reg);
14695 %}
14696 
14697 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
14698 %{
14699   match(Set cr (OverflowSubI op1 op2));
14700 
14701   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
14702   ins_cost(INSN_COST);
14703   ins_encode %{
14704     __ cmpw($op1$$Register, $op2$$constant);
14705   %}
14706 
14707   ins_pipe(icmp_reg_imm);
14708 %}
14709 
14710 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14711 %{
14712   match(Set cr (OverflowSubL op1 op2));
14713 
14714   format %{ "cmp   $op1, $op2\t# overflow check long" %}
14715   ins_cost(INSN_COST);
14716   ins_encode %{
14717     __ cmp($op1$$Register, $op2$$Register);
14718   %}
14719 
14720   ins_pipe(icmp_reg_reg);
14721 %}
14722 
14723 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
14724 %{
14725   match(Set cr (OverflowSubL op1 op2));
14726 
14727   format %{ "cmp   $op1, $op2\t# overflow check long" %}
14728   ins_cost(INSN_COST);
14729   ins_encode %{
14730     __ subs(zr, $op1$$Register, $op2$$constant);
14731   %}
14732 
14733   ins_pipe(icmp_reg_imm);
14734 %}
14735 
14736 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
14737 %{
14738   match(Set cr (OverflowSubI zero op1));
14739 
14740   format %{ "cmpw  zr, $op1\t# overflow check int" %}
14741   ins_cost(INSN_COST);
14742   ins_encode %{
14743     __ cmpw(zr, $op1$$Register);
14744   %}
14745 
14746   ins_pipe(icmp_reg_imm);
14747 %}
14748 
14749 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
14750 %{
14751   match(Set cr (OverflowSubL zero op1));
14752 
14753   format %{ "cmp   zr, $op1\t# overflow check long" %}
14754   ins_cost(INSN_COST);
14755   ins_encode %{
14756     __ cmp(zr, $op1$$Register);
14757   %}
14758 
14759   ins_pipe(icmp_reg_imm);
14760 %}
14761 
14762 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14763 %{
14764   match(Set cr (OverflowMulI op1 op2));
14765 
14766   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14767             "cmp   rscratch1, rscratch1, sxtw\n\t"
14768             "movw  rscratch1, #0x80000000\n\t"
14769             "cselw rscratch1, rscratch1, zr, NE\n\t"
14770             "cmpw  rscratch1, #1" %}
14771   ins_cost(5 * INSN_COST);
14772   ins_encode %{
14773     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14774     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14775     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
14776     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
14777     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
14778   %}
14779 
14780   ins_pipe(pipe_slow);
14781 %}
14782 
14783 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
14784 %{
14785   match(If cmp (OverflowMulI op1 op2));
14786   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14787             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14788   effect(USE labl, KILL cr);
14789 
14790   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14791             "cmp   rscratch1, rscratch1, sxtw\n\t"
14792             "b$cmp   $labl" %}
14793   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
14794   ins_encode %{
14795     Label* L = $labl$$label;
14796     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14797     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14798     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14799     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14800   %}
14801 
14802   ins_pipe(pipe_serial);
14803 %}
14804 
14805 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14806 %{
14807   match(Set cr (OverflowMulL op1 op2));
14808 
14809   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
14810             "smulh rscratch2, $op1, $op2\n\t"
14811             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14812             "movw  rscratch1, #0x80000000\n\t"
14813             "cselw rscratch1, rscratch1, zr, NE\n\t"
14814             "cmpw  rscratch1, #1" %}
14815   ins_cost(6 * INSN_COST);
14816   ins_encode %{
14817     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14818     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14819     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14820     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
14821     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
14822     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
14823   %}
14824 
14825   ins_pipe(pipe_slow);
14826 %}
14827 
14828 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
14829 %{
14830   match(If cmp (OverflowMulL op1 op2));
14831   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14832             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14833   effect(USE labl, KILL cr);
14834 
14835   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
14836             "smulh rscratch2, $op1, $op2\n\t"
14837             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14838             "b$cmp $labl" %}
14839   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
14840   ins_encode %{
14841     Label* L = $labl$$label;
14842     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14843     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14844     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14845     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14846     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14847   %}
14848 
14849   ins_pipe(pipe_serial);
14850 %}
14851 
14852 // ============================================================================
14853 // Compare Instructions
14854 
14855 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
14856 %{
14857   match(Set cr (CmpI op1 op2));
14858 
14859   effect(DEF cr, USE op1, USE op2);
14860 
14861   ins_cost(INSN_COST);
14862   format %{ "cmpw  $op1, $op2" %}
14863 
14864   ins_encode(aarch64_enc_cmpw(op1, op2));
14865 
14866   ins_pipe(icmp_reg_reg);
14867 %}
14868 
14869 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
14870 %{
14871   match(Set cr (CmpI op1 zero));
14872 
14873   effect(DEF cr, USE op1);
14874 
14875   ins_cost(INSN_COST);
14876   format %{ "cmpw $op1, 0" %}
14877 
14878   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14879 
14880   ins_pipe(icmp_reg_imm);
14881 %}
14882 
14883 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
14884 %{
14885   match(Set cr (CmpI op1 op2));
14886 
14887   effect(DEF cr, USE op1);
14888 
14889   ins_cost(INSN_COST);
14890   format %{ "cmpw  $op1, $op2" %}
14891 
14892   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14893 
14894   ins_pipe(icmp_reg_imm);
14895 %}
14896 
14897 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
14898 %{
14899   match(Set cr (CmpI op1 op2));
14900 
14901   effect(DEF cr, USE op1);
14902 
14903   ins_cost(INSN_COST * 2);
14904   format %{ "cmpw  $op1, $op2" %}
14905 
14906   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14907 
14908   ins_pipe(icmp_reg_imm);
14909 %}
14910 
14911 // Unsigned compare Instructions; really, same as signed compare
14912 // except it should only be used to feed an If or a CMovI which takes a
14913 // cmpOpU.
14914 
14915 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
14916 %{
14917   match(Set cr (CmpU op1 op2));
14918 
14919   effect(DEF cr, USE op1, USE op2);
14920 
14921   ins_cost(INSN_COST);
14922   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14923 
14924   ins_encode(aarch64_enc_cmpw(op1, op2));
14925 
14926   ins_pipe(icmp_reg_reg);
14927 %}
14928 
14929 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
14930 %{
14931   match(Set cr (CmpU op1 zero));
14932 
14933   effect(DEF cr, USE op1);
14934 
14935   ins_cost(INSN_COST);
14936   format %{ "cmpw $op1, #0\t# unsigned" %}
14937 
14938   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14939 
14940   ins_pipe(icmp_reg_imm);
14941 %}
14942 
14943 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
14944 %{
14945   match(Set cr (CmpU op1 op2));
14946 
14947   effect(DEF cr, USE op1);
14948 
14949   ins_cost(INSN_COST);
14950   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14951 
14952   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14953 
14954   ins_pipe(icmp_reg_imm);
14955 %}
14956 
14957 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
14958 %{
14959   match(Set cr (CmpU op1 op2));
14960 
14961   effect(DEF cr, USE op1);
14962 
14963   ins_cost(INSN_COST * 2);
14964   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14965 
14966   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14967 
14968   ins_pipe(icmp_reg_imm);
14969 %}
14970 
14971 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14972 %{
14973   match(Set cr (CmpL op1 op2));
14974 
14975   effect(DEF cr, USE op1, USE op2);
14976 
14977   ins_cost(INSN_COST);
14978   format %{ "cmp  $op1, $op2" %}
14979 
14980   ins_encode(aarch64_enc_cmp(op1, op2));
14981 
14982   ins_pipe(icmp_reg_reg);
14983 %}
14984 
14985 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
14986 %{
14987   match(Set cr (CmpL op1 zero));
14988 
14989   effect(DEF cr, USE op1);
14990 
14991   ins_cost(INSN_COST);
14992   format %{ "tst  $op1" %}
14993 
14994   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14995 
14996   ins_pipe(icmp_reg_imm);
14997 %}
14998 
14999 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
15000 %{
15001   match(Set cr (CmpL op1 op2));
15002 
15003   effect(DEF cr, USE op1);
15004 
15005   ins_cost(INSN_COST);
15006   format %{ "cmp  $op1, $op2" %}
15007 
15008   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15009 
15010   ins_pipe(icmp_reg_imm);
15011 %}
15012 
15013 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
15014 %{
15015   match(Set cr (CmpL op1 op2));
15016 
15017   effect(DEF cr, USE op1);
15018 
15019   ins_cost(INSN_COST * 2);
15020   format %{ "cmp  $op1, $op2" %}
15021 
15022   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15023 
15024   ins_pipe(icmp_reg_imm);
15025 %}
15026 
15027 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
15028 %{
15029   match(Set cr (CmpUL op1 op2));
15030 
15031   effect(DEF cr, USE op1, USE op2);
15032 
15033   ins_cost(INSN_COST);
15034   format %{ "cmp  $op1, $op2" %}
15035 
15036   ins_encode(aarch64_enc_cmp(op1, op2));
15037 
15038   ins_pipe(icmp_reg_reg);
15039 %}
15040 
15041 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
15042 %{
15043   match(Set cr (CmpUL op1 zero));
15044 
15045   effect(DEF cr, USE op1);
15046 
15047   ins_cost(INSN_COST);
15048   format %{ "tst  $op1" %}
15049 
15050   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
15051 
15052   ins_pipe(icmp_reg_imm);
15053 %}
15054 
15055 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
15056 %{
15057   match(Set cr (CmpUL op1 op2));
15058 
15059   effect(DEF cr, USE op1);
15060 
15061   ins_cost(INSN_COST);
15062   format %{ "cmp  $op1, $op2" %}
15063 
15064   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15065 
15066   ins_pipe(icmp_reg_imm);
15067 %}
15068 
15069 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
15070 %{
15071   match(Set cr (CmpUL op1 op2));
15072 
15073   effect(DEF cr, USE op1);
15074 
15075   ins_cost(INSN_COST * 2);
15076   format %{ "cmp  $op1, $op2" %}
15077 
15078   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15079 
15080   ins_pipe(icmp_reg_imm);
15081 %}
15082 
15083 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
15084 %{
15085   match(Set cr (CmpP op1 op2));
15086 
15087   effect(DEF cr, USE op1, USE op2);
15088 
15089   ins_cost(INSN_COST);
15090   format %{ "cmp  $op1, $op2\t // ptr" %}
15091 
15092   ins_encode(aarch64_enc_cmpp(op1, op2));
15093 
15094   ins_pipe(icmp_reg_reg);
15095 %}
15096 
15097 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
15098 %{
15099   match(Set cr (CmpN op1 op2));
15100 
15101   effect(DEF cr, USE op1, USE op2);
15102 
15103   ins_cost(INSN_COST);
15104   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
15105 
15106   ins_encode(aarch64_enc_cmpn(op1, op2));
15107 
15108   ins_pipe(icmp_reg_reg);
15109 %}
15110 
15111 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
15112 %{
15113   match(Set cr (CmpP op1 zero));
15114 
15115   effect(DEF cr, USE op1, USE zero);
15116 
15117   ins_cost(INSN_COST);
15118   format %{ "cmp  $op1, 0\t // ptr" %}
15119 
15120   ins_encode(aarch64_enc_testp(op1));
15121 
15122   ins_pipe(icmp_reg_imm);
15123 %}
15124 
15125 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
15126 %{
15127   match(Set cr (CmpN op1 zero));
15128 
15129   effect(DEF cr, USE op1, USE zero);
15130 
15131   ins_cost(INSN_COST);
15132   format %{ "cmp  $op1, 0\t // compressed ptr" %}
15133 
15134   ins_encode(aarch64_enc_testn(op1));
15135 
15136   ins_pipe(icmp_reg_imm);
15137 %}
15138 
15139 // FP comparisons
15140 //
15141 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
15142 // using normal cmpOp. See declaration of rFlagsReg for details.
15143 
15144 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
15145 %{
15146   match(Set cr (CmpF src1 src2));
15147 
15148   ins_cost(3 * INSN_COST);
15149   format %{ "fcmps $src1, $src2" %}
15150 
15151   ins_encode %{
15152     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15153   %}
15154 
15155   ins_pipe(pipe_class_compare);
15156 %}
15157 
15158 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
15159 %{
15160   match(Set cr (CmpF src1 src2));
15161 
15162   ins_cost(3 * INSN_COST);
15163   format %{ "fcmps $src1, 0.0" %}
15164 
15165   ins_encode %{
15166     __ fcmps(as_FloatRegister($src1$$reg), 0.0);
15167   %}
15168 
15169   ins_pipe(pipe_class_compare);
15170 %}
15171 // FROM HERE
15172 
15173 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
15174 %{
15175   match(Set cr (CmpD src1 src2));
15176 
15177   ins_cost(3 * INSN_COST);
15178   format %{ "fcmpd $src1, $src2" %}
15179 
15180   ins_encode %{
15181     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15182   %}
15183 
15184   ins_pipe(pipe_class_compare);
15185 %}
15186 
15187 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
15188 %{
15189   match(Set cr (CmpD src1 src2));
15190 
15191   ins_cost(3 * INSN_COST);
15192   format %{ "fcmpd $src1, 0.0" %}
15193 
15194   ins_encode %{
15195     __ fcmpd(as_FloatRegister($src1$$reg), 0.0);
15196   %}
15197 
15198   ins_pipe(pipe_class_compare);
15199 %}
15200 
15201 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
15202 %{
15203   match(Set dst (CmpF3 src1 src2));
15204   effect(KILL cr);
15205 
15206   ins_cost(5 * INSN_COST);
15207   format %{ "fcmps $src1, $src2\n\t"
15208             "csinvw($dst, zr, zr, eq\n\t"
15209             "csnegw($dst, $dst, $dst, lt)"
15210   %}
15211 
15212   ins_encode %{
15213     Label done;
15214     FloatRegister s1 = as_FloatRegister($src1$$reg);
15215     FloatRegister s2 = as_FloatRegister($src2$$reg);
15216     Register d = as_Register($dst$$reg);
15217     __ fcmps(s1, s2);
15218     // installs 0 if EQ else -1
15219     __ csinvw(d, zr, zr, Assembler::EQ);
15220     // keeps -1 if less or unordered else installs 1
15221     __ csnegw(d, d, d, Assembler::LT);
15222     __ bind(done);
15223   %}
15224 
15225   ins_pipe(pipe_class_default);
15226 
15227 %}
15228 
15229 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
15230 %{
15231   match(Set dst (CmpD3 src1 src2));
15232   effect(KILL cr);
15233 
15234   ins_cost(5 * INSN_COST);
15235   format %{ "fcmpd $src1, $src2\n\t"
15236             "csinvw($dst, zr, zr, eq\n\t"
15237             "csnegw($dst, $dst, $dst, lt)"
15238   %}
15239 
15240   ins_encode %{
15241     Label done;
15242     FloatRegister s1 = as_FloatRegister($src1$$reg);
15243     FloatRegister s2 = as_FloatRegister($src2$$reg);
15244     Register d = as_Register($dst$$reg);
15245     __ fcmpd(s1, s2);
15246     // installs 0 if EQ else -1
15247     __ csinvw(d, zr, zr, Assembler::EQ);
15248     // keeps -1 if less or unordered else installs 1
15249     __ csnegw(d, d, d, Assembler::LT);
15250     __ bind(done);
15251   %}
15252   ins_pipe(pipe_class_default);
15253 
15254 %}
15255 
15256 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
15257 %{
15258   match(Set dst (CmpF3 src1 zero));
15259   effect(KILL cr);
15260 
15261   ins_cost(5 * INSN_COST);
15262   format %{ "fcmps $src1, 0.0\n\t"
15263             "csinvw($dst, zr, zr, eq\n\t"
15264             "csnegw($dst, $dst, $dst, lt)"
15265   %}
15266 
15267   ins_encode %{
15268     Label done;
15269     FloatRegister s1 = as_FloatRegister($src1$$reg);
15270     Register d = as_Register($dst$$reg);
15271     __ fcmps(s1, 0.0);
15272     // installs 0 if EQ else -1
15273     __ csinvw(d, zr, zr, Assembler::EQ);
15274     // keeps -1 if less or unordered else installs 1
15275     __ csnegw(d, d, d, Assembler::LT);
15276     __ bind(done);
15277   %}
15278 
15279   ins_pipe(pipe_class_default);
15280 
15281 %}
15282 
15283 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
15284 %{
15285   match(Set dst (CmpD3 src1 zero));
15286   effect(KILL cr);
15287 
15288   ins_cost(5 * INSN_COST);
15289   format %{ "fcmpd $src1, 0.0\n\t"
15290             "csinvw($dst, zr, zr, eq\n\t"
15291             "csnegw($dst, $dst, $dst, lt)"
15292   %}
15293 
15294   ins_encode %{
15295     Label done;
15296     FloatRegister s1 = as_FloatRegister($src1$$reg);
15297     Register d = as_Register($dst$$reg);
15298     __ fcmpd(s1, 0.0);
15299     // installs 0 if EQ else -1
15300     __ csinvw(d, zr, zr, Assembler::EQ);
15301     // keeps -1 if less or unordered else installs 1
15302     __ csnegw(d, d, d, Assembler::LT);
15303     __ bind(done);
15304   %}
15305   ins_pipe(pipe_class_default);
15306 
15307 %}
15308 
15309 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
15310 %{
15311   match(Set dst (CmpLTMask p q));
15312   effect(KILL cr);
15313 
15314   ins_cost(3 * INSN_COST);
15315 
15316   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
15317             "csetw $dst, lt\n\t"
15318             "subw $dst, zr, $dst"
15319   %}
15320 
15321   ins_encode %{
15322     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
15323     __ csetw(as_Register($dst$$reg), Assembler::LT);
15324     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
15325   %}
15326 
15327   ins_pipe(ialu_reg_reg);
15328 %}
15329 
15330 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
15331 %{
15332   match(Set dst (CmpLTMask src zero));
15333   effect(KILL cr);
15334 
15335   ins_cost(INSN_COST);
15336 
15337   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
15338 
15339   ins_encode %{
15340     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
15341   %}
15342 
15343   ins_pipe(ialu_reg_shift);
15344 %}
15345 
15346 // ============================================================================
15347 // Max and Min
15348 
15349 instruct cmovI_reg_reg_lt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
15350 %{
15351   effect( DEF dst, USE src1, USE src2, USE cr );
15352 
15353   ins_cost(INSN_COST * 2);
15354   format %{ "cselw $dst, $src1, $src2 lt\t"  %}
15355 
15356   ins_encode %{
15357     __ cselw(as_Register($dst$$reg),
15358              as_Register($src1$$reg),
15359              as_Register($src2$$reg),
15360              Assembler::LT);
15361   %}
15362 
15363   ins_pipe(icond_reg_reg);
15364 %}
15365 
15366 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
15367 %{
15368   match(Set dst (MinI src1 src2));
15369   ins_cost(INSN_COST * 3);
15370 
15371   expand %{
15372     rFlagsReg cr;
15373     compI_reg_reg(cr, src1, src2);
15374     cmovI_reg_reg_lt(dst, src1, src2, cr);
15375   %}
15376 
15377 %}
15378 // FROM HERE
15379 
15380 instruct cmovI_reg_reg_gt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
15381 %{
15382   effect( DEF dst, USE src1, USE src2, USE cr );
15383 
15384   ins_cost(INSN_COST * 2);
15385   format %{ "cselw $dst, $src1, $src2 gt\t"  %}
15386 
15387   ins_encode %{
15388     __ cselw(as_Register($dst$$reg),
15389              as_Register($src1$$reg),
15390              as_Register($src2$$reg),
15391              Assembler::GT);
15392   %}
15393 
15394   ins_pipe(icond_reg_reg);
15395 %}
15396 
15397 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
15398 %{
15399   match(Set dst (MaxI src1 src2));
15400   ins_cost(INSN_COST * 3);
15401   expand %{
15402     rFlagsReg cr;
15403     compI_reg_reg(cr, src1, src2);
15404     cmovI_reg_reg_gt(dst, src1, src2, cr);
15405   %}
15406 %}
15407 
15408 // ============================================================================
15409 // Branch Instructions
15410 
15411 // Direct Branch.
15412 instruct branch(label lbl)
15413 %{
15414   match(Goto);
15415 
15416   effect(USE lbl);
15417 
15418   ins_cost(BRANCH_COST);
15419   format %{ "b  $lbl" %}
15420 
15421   ins_encode(aarch64_enc_b(lbl));
15422 
15423   ins_pipe(pipe_branch);
15424 %}
15425 
15426 // Conditional Near Branch
15427 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
15428 %{
15429   // Same match rule as `branchConFar'.
15430   match(If cmp cr);
15431 
15432   effect(USE lbl);
15433 
15434   ins_cost(BRANCH_COST);
15435   // If set to 1 this indicates that the current instruction is a
15436   // short variant of a long branch. This avoids using this
15437   // instruction in first-pass matching. It will then only be used in
15438   // the `Shorten_branches' pass.
15439   // ins_short_branch(1);
15440   format %{ "b$cmp  $lbl" %}
15441 
15442   ins_encode(aarch64_enc_br_con(cmp, lbl));
15443 
15444   ins_pipe(pipe_branch_cond);
15445 %}
15446 
15447 // Conditional Near Branch Unsigned
15448 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
15449 %{
15450   // Same match rule as `branchConFar'.
15451   match(If cmp cr);
15452 
15453   effect(USE lbl);
15454 
15455   ins_cost(BRANCH_COST);
15456   // If set to 1 this indicates that the current instruction is a
15457   // short variant of a long branch. This avoids using this
15458   // instruction in first-pass matching. It will then only be used in
15459   // the `Shorten_branches' pass.
15460   // ins_short_branch(1);
15461   format %{ "b$cmp  $lbl\t# unsigned" %}
15462 
15463   ins_encode(aarch64_enc_br_conU(cmp, lbl));
15464 
15465   ins_pipe(pipe_branch_cond);
15466 %}
15467 
15468 // Make use of CBZ and CBNZ.  These instructions, as well as being
15469 // shorter than (cmp; branch), have the additional benefit of not
15470 // killing the flags.
15471 
15472 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
15473   match(If cmp (CmpI op1 op2));
15474   effect(USE labl);
15475 
15476   ins_cost(BRANCH_COST);
15477   format %{ "cbw$cmp   $op1, $labl" %}
15478   ins_encode %{
15479     Label* L = $labl$$label;
15480     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15481     if (cond == Assembler::EQ)
15482       __ cbzw($op1$$Register, *L);
15483     else
15484       __ cbnzw($op1$$Register, *L);
15485   %}
15486   ins_pipe(pipe_cmp_branch);
15487 %}
15488 
15489 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
15490   match(If cmp (CmpL op1 op2));
15491   effect(USE labl);
15492 
15493   ins_cost(BRANCH_COST);
15494   format %{ "cb$cmp   $op1, $labl" %}
15495   ins_encode %{
15496     Label* L = $labl$$label;
15497     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15498     if (cond == Assembler::EQ)
15499       __ cbz($op1$$Register, *L);
15500     else
15501       __ cbnz($op1$$Register, *L);
15502   %}
15503   ins_pipe(pipe_cmp_branch);
15504 %}
15505 
15506 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
15507   match(If cmp (CmpP op1 op2));
15508   effect(USE labl);
15509 
15510   ins_cost(BRANCH_COST);
15511   format %{ "cb$cmp   $op1, $labl" %}
15512   ins_encode %{
15513     Label* L = $labl$$label;
15514     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15515     if (cond == Assembler::EQ)
15516       __ cbz($op1$$Register, *L);
15517     else
15518       __ cbnz($op1$$Register, *L);
15519   %}
15520   ins_pipe(pipe_cmp_branch);
15521 %}
15522 
15523 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
15524   match(If cmp (CmpN op1 op2));
15525   effect(USE labl);
15526 
15527   ins_cost(BRANCH_COST);
15528   format %{ "cbw$cmp   $op1, $labl" %}
15529   ins_encode %{
15530     Label* L = $labl$$label;
15531     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15532     if (cond == Assembler::EQ)
15533       __ cbzw($op1$$Register, *L);
15534     else
15535       __ cbnzw($op1$$Register, *L);
15536   %}
15537   ins_pipe(pipe_cmp_branch);
15538 %}
15539 
15540 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
15541   match(If cmp (CmpP (DecodeN oop) zero));
15542   effect(USE labl);
15543 
15544   ins_cost(BRANCH_COST);
15545   format %{ "cb$cmp   $oop, $labl" %}
15546   ins_encode %{
15547     Label* L = $labl$$label;
15548     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15549     if (cond == Assembler::EQ)
15550       __ cbzw($oop$$Register, *L);
15551     else
15552       __ cbnzw($oop$$Register, *L);
15553   %}
15554   ins_pipe(pipe_cmp_branch);
15555 %}
15556 
15557 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
15558   match(If cmp (CmpU op1 op2));
15559   effect(USE labl);
15560 
15561   ins_cost(BRANCH_COST);
15562   format %{ "cbw$cmp   $op1, $labl" %}
15563   ins_encode %{
15564     Label* L = $labl$$label;
15565     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15566     if (cond == Assembler::EQ || cond == Assembler::LS)
15567       __ cbzw($op1$$Register, *L);
15568     else
15569       __ cbnzw($op1$$Register, *L);
15570   %}
15571   ins_pipe(pipe_cmp_branch);
15572 %}
15573 
15574 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
15575   match(If cmp (CmpUL op1 op2));
15576   effect(USE labl);
15577 
15578   ins_cost(BRANCH_COST);
15579   format %{ "cb$cmp   $op1, $labl" %}
15580   ins_encode %{
15581     Label* L = $labl$$label;
15582     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15583     if (cond == Assembler::EQ || cond == Assembler::LS)
15584       __ cbz($op1$$Register, *L);
15585     else
15586       __ cbnz($op1$$Register, *L);
15587   %}
15588   ins_pipe(pipe_cmp_branch);
15589 %}
15590 
15591 // Test bit and Branch
15592 
15593 // Patterns for short (< 32KiB) variants
15594 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
15595   match(If cmp (CmpL op1 op2));
15596   effect(USE labl);
15597 
15598   ins_cost(BRANCH_COST);
15599   format %{ "cb$cmp   $op1, $labl # long" %}
15600   ins_encode %{
15601     Label* L = $labl$$label;
15602     Assembler::Condition cond =
15603       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15604     __ tbr(cond, $op1$$Register, 63, *L);
15605   %}
15606   ins_pipe(pipe_cmp_branch);
15607   ins_short_branch(1);
15608 %}
15609 
15610 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15611   match(If cmp (CmpI op1 op2));
15612   effect(USE labl);
15613 
15614   ins_cost(BRANCH_COST);
15615   format %{ "cb$cmp   $op1, $labl # int" %}
15616   ins_encode %{
15617     Label* L = $labl$$label;
15618     Assembler::Condition cond =
15619       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15620     __ tbr(cond, $op1$$Register, 31, *L);
15621   %}
15622   ins_pipe(pipe_cmp_branch);
15623   ins_short_branch(1);
15624 %}
15625 
15626 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
15627   match(If cmp (CmpL (AndL op1 op2) op3));
15628   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
15629   effect(USE labl);
15630 
15631   ins_cost(BRANCH_COST);
15632   format %{ "tb$cmp   $op1, $op2, $labl" %}
15633   ins_encode %{
15634     Label* L = $labl$$label;
15635     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15636     int bit = exact_log2_long($op2$$constant);
15637     __ tbr(cond, $op1$$Register, bit, *L);
15638   %}
15639   ins_pipe(pipe_cmp_branch);
15640   ins_short_branch(1);
15641 %}
15642 
15643 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
15644   match(If cmp (CmpI (AndI op1 op2) op3));
15645   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
15646   effect(USE labl);
15647 
15648   ins_cost(BRANCH_COST);
15649   format %{ "tb$cmp   $op1, $op2, $labl" %}
15650   ins_encode %{
15651     Label* L = $labl$$label;
15652     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15653     int bit = exact_log2((juint)$op2$$constant);
15654     __ tbr(cond, $op1$$Register, bit, *L);
15655   %}
15656   ins_pipe(pipe_cmp_branch);
15657   ins_short_branch(1);
15658 %}
15659 
15660 // And far variants
15661 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
15662   match(If cmp (CmpL op1 op2));
15663   effect(USE labl);
15664 
15665   ins_cost(BRANCH_COST);
15666   format %{ "cb$cmp   $op1, $labl # long" %}
15667   ins_encode %{
15668     Label* L = $labl$$label;
15669     Assembler::Condition cond =
15670       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15671     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
15672   %}
15673   ins_pipe(pipe_cmp_branch);
15674 %}
15675 
15676 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15677   match(If cmp (CmpI op1 op2));
15678   effect(USE labl);
15679 
15680   ins_cost(BRANCH_COST);
15681   format %{ "cb$cmp   $op1, $labl # int" %}
15682   ins_encode %{
15683     Label* L = $labl$$label;
15684     Assembler::Condition cond =
15685       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15686     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
15687   %}
15688   ins_pipe(pipe_cmp_branch);
15689 %}
15690 
15691 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
15692   match(If cmp (CmpL (AndL op1 op2) op3));
15693   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
15694   effect(USE labl);
15695 
15696   ins_cost(BRANCH_COST);
15697   format %{ "tb$cmp   $op1, $op2, $labl" %}
15698   ins_encode %{
15699     Label* L = $labl$$label;
15700     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15701     int bit = exact_log2_long($op2$$constant);
15702     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
15703   %}
15704   ins_pipe(pipe_cmp_branch);
15705 %}
15706 
15707 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
15708   match(If cmp (CmpI (AndI op1 op2) op3));
15709   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
15710   effect(USE labl);
15711 
15712   ins_cost(BRANCH_COST);
15713   format %{ "tb$cmp   $op1, $op2, $labl" %}
15714   ins_encode %{
15715     Label* L = $labl$$label;
15716     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15717     int bit = exact_log2((juint)$op2$$constant);
15718     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
15719   %}
15720   ins_pipe(pipe_cmp_branch);
15721 %}
15722 
15723 // Test bits
15724 
15725 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
15726   match(Set cr (CmpL (AndL op1 op2) op3));
15727   predicate(Assembler::operand_valid_for_logical_immediate
15728             (/*is_32*/false, n->in(1)->in(2)->get_long()));
15729 
15730   ins_cost(INSN_COST);
15731   format %{ "tst $op1, $op2 # long" %}
15732   ins_encode %{
15733     __ tst($op1$$Register, $op2$$constant);
15734   %}
15735   ins_pipe(ialu_reg_reg);
15736 %}
15737 
15738 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
15739   match(Set cr (CmpI (AndI op1 op2) op3));
15740   predicate(Assembler::operand_valid_for_logical_immediate
15741             (/*is_32*/true, n->in(1)->in(2)->get_int()));
15742 
15743   ins_cost(INSN_COST);
15744   format %{ "tst $op1, $op2 # int" %}
15745   ins_encode %{
15746     __ tstw($op1$$Register, $op2$$constant);
15747   %}
15748   ins_pipe(ialu_reg_reg);
15749 %}
15750 
15751 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
15752   match(Set cr (CmpL (AndL op1 op2) op3));
15753 
15754   ins_cost(INSN_COST);
15755   format %{ "tst $op1, $op2 # long" %}
15756   ins_encode %{
15757     __ tst($op1$$Register, $op2$$Register);
15758   %}
15759   ins_pipe(ialu_reg_reg);
15760 %}
15761 
15762 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
15763   match(Set cr (CmpI (AndI op1 op2) op3));
15764 
15765   ins_cost(INSN_COST);
15766   format %{ "tstw $op1, $op2 # int" %}
15767   ins_encode %{
15768     __ tstw($op1$$Register, $op2$$Register);
15769   %}
15770   ins_pipe(ialu_reg_reg);
15771 %}
15772 
15773 
15774 // Conditional Far Branch
15775 // Conditional Far Branch Unsigned
15776 // TODO: fixme
15777 
15778 // counted loop end branch near
15779 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
15780 %{
15781   match(CountedLoopEnd cmp cr);
15782 
15783   effect(USE lbl);
15784 
15785   ins_cost(BRANCH_COST);
15786   // short variant.
15787   // ins_short_branch(1);
15788   format %{ "b$cmp $lbl \t// counted loop end" %}
15789 
15790   ins_encode(aarch64_enc_br_con(cmp, lbl));
15791 
15792   ins_pipe(pipe_branch);
15793 %}
15794 
15795 // counted loop end branch near Unsigned
15796 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
15797 %{
15798   match(CountedLoopEnd cmp cr);
15799 
15800   effect(USE lbl);
15801 
15802   ins_cost(BRANCH_COST);
15803   // short variant.
15804   // ins_short_branch(1);
15805   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
15806 
15807   ins_encode(aarch64_enc_br_conU(cmp, lbl));
15808 
15809   ins_pipe(pipe_branch);
15810 %}
15811 
15812 // counted loop end branch far
15813 // counted loop end branch far unsigned
15814 // TODO: fixme
15815 
15816 // ============================================================================
15817 // inlined locking and unlocking
15818 
15819 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
15820 %{
15821   match(Set cr (FastLock object box));
15822   effect(TEMP tmp, TEMP tmp2);
15823 
15824   // TODO
15825   // identify correct cost
15826   ins_cost(5 * INSN_COST);
15827   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
15828 
15829   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
15830 
15831   ins_pipe(pipe_serial);
15832 %}
15833 
15834 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
15835 %{
15836   match(Set cr (FastUnlock object box));
15837   effect(TEMP tmp, TEMP tmp2);
15838 
15839   ins_cost(5 * INSN_COST);
15840   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
15841 
15842   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
15843 
15844   ins_pipe(pipe_serial);
15845 %}
15846 
15847 
15848 // ============================================================================
15849 // Safepoint Instructions
15850 
15851 // TODO
15852 // provide a near and far version of this code
15853 
15854 instruct safePoint(rFlagsReg cr, iRegP poll)
15855 %{
15856   match(SafePoint poll);
15857   effect(KILL cr);
15858 
15859   format %{
15860     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
15861   %}
15862   ins_encode %{
15863     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
15864   %}
15865   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
15866 %}
15867 
15868 
15869 // ============================================================================
15870 // Procedure Call/Return Instructions
15871 
15872 // Call Java Static Instruction
15873 
15874 instruct CallStaticJavaDirect(method meth)
15875 %{
15876   match(CallStaticJava);
15877 
15878   effect(USE meth);
15879 
15880   ins_cost(CALL_COST);
15881 
15882   format %{ "call,static $meth \t// ==> " %}
15883 
15884   ins_encode( aarch64_enc_java_static_call(meth),
15885               aarch64_enc_call_epilog );
15886 
15887   ins_pipe(pipe_class_call);
15888 %}
15889 
15890 // TO HERE
15891 
15892 // Call Java Dynamic Instruction
15893 instruct CallDynamicJavaDirect(method meth)
15894 %{
15895   match(CallDynamicJava);
15896 
15897   effect(USE meth);
15898 
15899   ins_cost(CALL_COST);
15900 
15901   format %{ "CALL,dynamic $meth \t// ==> " %}
15902 
15903   ins_encode( aarch64_enc_java_dynamic_call(meth),
15904                aarch64_enc_call_epilog );
15905 
15906   ins_pipe(pipe_class_call);
15907 %}
15908 
15909 // Call Runtime Instruction
15910 
15911 instruct CallRuntimeDirect(method meth)
15912 %{
15913   match(CallRuntime);
15914 
15915   effect(USE meth);
15916 
15917   ins_cost(CALL_COST);
15918 
15919   format %{ "CALL, runtime $meth" %}
15920 
15921   ins_encode( aarch64_enc_java_to_runtime(meth) );
15922 
15923   ins_pipe(pipe_class_call);
15924 %}
15925 
15926 // Call Runtime Instruction
15927 
15928 instruct CallLeafDirect(method meth)
15929 %{
15930   match(CallLeaf);
15931 
15932   effect(USE meth);
15933 
15934   ins_cost(CALL_COST);
15935 
15936   format %{ "CALL, runtime leaf $meth" %}
15937 
15938   ins_encode( aarch64_enc_java_to_runtime(meth) );
15939 
15940   ins_pipe(pipe_class_call);
15941 %}
15942 
15943 // Call Runtime Instruction
15944 
15945 instruct CallLeafNoFPDirect(method meth)
15946 %{
15947   match(CallLeafNoFP);
15948 
15949   effect(USE meth);
15950 
15951   ins_cost(CALL_COST);
15952 
15953   format %{ "CALL, runtime leaf nofp $meth" %}
15954 
15955   ins_encode( aarch64_enc_java_to_runtime(meth) );
15956 
15957   ins_pipe(pipe_class_call);
15958 %}
15959 
15960 // Tail Call; Jump from runtime stub to Java code.
15961 // Also known as an 'interprocedural jump'.
15962 // Target of jump will eventually return to caller.
15963 // TailJump below removes the return address.
15964 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_ptr)
15965 %{
15966   match(TailCall jump_target method_ptr);
15967 
15968   ins_cost(CALL_COST);
15969 
15970   format %{ "br $jump_target\t# $method_ptr holds method" %}
15971 
15972   ins_encode(aarch64_enc_tail_call(jump_target));
15973 
15974   ins_pipe(pipe_class_call);
15975 %}
15976 
15977 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
15978 %{
15979   match(TailJump jump_target ex_oop);
15980 
15981   ins_cost(CALL_COST);
15982 
15983   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
15984 
15985   ins_encode(aarch64_enc_tail_jmp(jump_target));
15986 
15987   ins_pipe(pipe_class_call);
15988 %}
15989 
15990 // Create exception oop: created by stack-crawling runtime code.
15991 // Created exception is now available to this handler, and is setup
15992 // just prior to jumping to this handler. No code emitted.
15993 // TODO check
15994 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
15995 instruct CreateException(iRegP_R0 ex_oop)
15996 %{
15997   match(Set ex_oop (CreateEx));
15998 
15999   format %{ " -- \t// exception oop; no code emitted" %}
16000 
16001   size(0);
16002 
16003   ins_encode( /*empty*/ );
16004 
16005   ins_pipe(pipe_class_empty);
16006 %}
16007 
16008 // Rethrow exception: The exception oop will come in the first
16009 // argument position. Then JUMP (not call) to the rethrow stub code.
16010 instruct RethrowException() %{
16011   match(Rethrow);
16012   ins_cost(CALL_COST);
16013 
16014   format %{ "b rethrow_stub" %}
16015 
16016   ins_encode( aarch64_enc_rethrow() );
16017 
16018   ins_pipe(pipe_class_call);
16019 %}
16020 
16021 
16022 // Return Instruction
16023 // epilog node loads ret address into lr as part of frame pop
16024 instruct Ret()
16025 %{
16026   match(Return);
16027 
16028   format %{ "ret\t// return register" %}
16029 
16030   ins_encode( aarch64_enc_ret() );
16031 
16032   ins_pipe(pipe_branch);
16033 %}
16034 
16035 // Die now.
16036 instruct ShouldNotReachHere() %{
16037   match(Halt);
16038 
16039   ins_cost(CALL_COST);
16040   format %{ "ShouldNotReachHere" %}
16041 
16042   ins_encode %{
16043     if (is_reachable()) {
16044       __ stop(_halt_reason);
16045     }
16046   %}
16047 
16048   ins_pipe(pipe_class_default);
16049 %}
16050 
16051 // ============================================================================
16052 // Partial Subtype Check
16053 //
16054 // superklass array for an instance of the superklass.  Set a hidden
16055 // internal cache on a hit (cache is checked with exposed code in
16056 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
16057 // encoding ALSO sets flags.
16058 
16059 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
16060 %{
16061   match(Set result (PartialSubtypeCheck sub super));
16062   effect(KILL cr, KILL temp);
16063 
16064   ins_cost(1100);  // slightly larger than the next version
16065   format %{ "partialSubtypeCheck $result, $sub, $super" %}
16066 
16067   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
16068 
16069   opcode(0x1); // Force zero of result reg on hit
16070 
16071   ins_pipe(pipe_class_memory);
16072 %}
16073 
16074 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
16075 %{
16076   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
16077   effect(KILL temp, KILL result);
16078 
16079   ins_cost(1100);  // slightly larger than the next version
16080   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
16081 
16082   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
16083 
16084   opcode(0x0); // Don't zero result reg on hit
16085 
16086   ins_pipe(pipe_class_memory);
16087 %}
16088 
16089 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16090                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16091 %{
16092   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
16093   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16094   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16095 
16096   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16097   ins_encode %{
16098     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16099     __ string_compare($str1$$Register, $str2$$Register,
16100                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16101                       $tmp1$$Register, $tmp2$$Register,
16102                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::UU);
16103   %}
16104   ins_pipe(pipe_class_memory);
16105 %}
16106 
16107 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16108                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16109 %{
16110   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
16111   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16112   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16113 
16114   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16115   ins_encode %{
16116     __ string_compare($str1$$Register, $str2$$Register,
16117                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16118                       $tmp1$$Register, $tmp2$$Register,
16119                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::LL);
16120   %}
16121   ins_pipe(pipe_class_memory);
16122 %}
16123 
16124 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16125                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16126                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16127 %{
16128   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
16129   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16130   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16131          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16132 
16133   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16134   ins_encode %{
16135     __ string_compare($str1$$Register, $str2$$Register,
16136                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16137                       $tmp1$$Register, $tmp2$$Register,
16138                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16139                       $vtmp3$$FloatRegister, StrIntrinsicNode::UL);
16140   %}
16141   ins_pipe(pipe_class_memory);
16142 %}
16143 
16144 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16145                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16146                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16147 %{
16148   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
16149   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16150   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16151          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16152 
16153   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16154   ins_encode %{
16155     __ string_compare($str1$$Register, $str2$$Register,
16156                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16157                       $tmp1$$Register, $tmp2$$Register,
16158                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16159                       $vtmp3$$FloatRegister,StrIntrinsicNode::LU);
16160   %}
16161   ins_pipe(pipe_class_memory);
16162 %}
16163 
16164 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16165        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
16166        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
16167 %{
16168   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16169   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16170   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16171          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
16172   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
16173 
16174   ins_encode %{
16175     __ string_indexof($str1$$Register, $str2$$Register,
16176                       $cnt1$$Register, $cnt2$$Register,
16177                       $tmp1$$Register, $tmp2$$Register,
16178                       $tmp3$$Register, $tmp4$$Register,
16179                       $tmp5$$Register, $tmp6$$Register,
16180                       -1, $result$$Register, StrIntrinsicNode::UU);
16181   %}
16182   ins_pipe(pipe_class_memory);
16183 %}
16184 
16185 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16186        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
16187        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
16188 %{
16189   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16190   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16191   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16192          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
16193   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
16194 
16195   ins_encode %{
16196     __ string_indexof($str1$$Register, $str2$$Register,
16197                       $cnt1$$Register, $cnt2$$Register,
16198                       $tmp1$$Register, $tmp2$$Register,
16199                       $tmp3$$Register, $tmp4$$Register,
16200                       $tmp5$$Register, $tmp6$$Register,
16201                       -1, $result$$Register, StrIntrinsicNode::LL);
16202   %}
16203   ins_pipe(pipe_class_memory);
16204 %}
16205 
16206 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16207        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
16208        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
16209 %{
16210   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16211   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16212   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16213          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
16214   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
16215 
16216   ins_encode %{
16217     __ string_indexof($str1$$Register, $str2$$Register,
16218                       $cnt1$$Register, $cnt2$$Register,
16219                       $tmp1$$Register, $tmp2$$Register,
16220                       $tmp3$$Register, $tmp4$$Register,
16221                       $tmp5$$Register, $tmp6$$Register,
16222                       -1, $result$$Register, StrIntrinsicNode::UL);
16223   %}
16224   ins_pipe(pipe_class_memory);
16225 %}
16226 
16227 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16228                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16229                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16230 %{
16231   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16232   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16233   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16234          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16235   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
16236 
16237   ins_encode %{
16238     int icnt2 = (int)$int_cnt2$$constant;
16239     __ string_indexof($str1$$Register, $str2$$Register,
16240                       $cnt1$$Register, zr,
16241                       $tmp1$$Register, $tmp2$$Register,
16242                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16243                       icnt2, $result$$Register, StrIntrinsicNode::UU);
16244   %}
16245   ins_pipe(pipe_class_memory);
16246 %}
16247 
16248 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16249                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16250                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16251 %{
16252   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16253   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16254   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16255          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16256   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
16257 
16258   ins_encode %{
16259     int icnt2 = (int)$int_cnt2$$constant;
16260     __ string_indexof($str1$$Register, $str2$$Register,
16261                       $cnt1$$Register, zr,
16262                       $tmp1$$Register, $tmp2$$Register,
16263                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16264                       icnt2, $result$$Register, StrIntrinsicNode::LL);
16265   %}
16266   ins_pipe(pipe_class_memory);
16267 %}
16268 
16269 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16270                  immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16271                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16272 %{
16273   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16274   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16275   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16276          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16277   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
16278 
16279   ins_encode %{
16280     int icnt2 = (int)$int_cnt2$$constant;
16281     __ string_indexof($str1$$Register, $str2$$Register,
16282                       $cnt1$$Register, zr,
16283                       $tmp1$$Register, $tmp2$$Register,
16284                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16285                       icnt2, $result$$Register, StrIntrinsicNode::UL);
16286   %}
16287   ins_pipe(pipe_class_memory);
16288 %}
16289 
16290 instruct string_indexofU_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16291                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16292                               iRegINoSp tmp3, rFlagsReg cr)
16293 %{
16294   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
16295   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
16296          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16297 
16298   format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result" %}
16299 
16300   ins_encode %{
16301     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
16302                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
16303                            $tmp3$$Register);
16304   %}
16305   ins_pipe(pipe_class_memory);
16306 %}
16307 
16308 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
16309                         iRegI_R0 result, rFlagsReg cr)
16310 %{
16311   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
16312   match(Set result (StrEquals (Binary str1 str2) cnt));
16313   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
16314 
16315   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
16316   ins_encode %{
16317     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16318     __ string_equals($str1$$Register, $str2$$Register,
16319                      $result$$Register, $cnt$$Register, 1);
16320   %}
16321   ins_pipe(pipe_class_memory);
16322 %}
16323 
16324 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
16325                         iRegI_R0 result, rFlagsReg cr)
16326 %{
16327   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
16328   match(Set result (StrEquals (Binary str1 str2) cnt));
16329   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
16330 
16331   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
16332   ins_encode %{
16333     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16334     __ string_equals($str1$$Register, $str2$$Register,
16335                      $result$$Register, $cnt$$Register, 2);
16336   %}
16337   ins_pipe(pipe_class_memory);
16338 %}
16339 
16340 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
16341                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
16342                        iRegP_R10 tmp, rFlagsReg cr)
16343 %{
16344   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
16345   match(Set result (AryEq ary1 ary2));
16346   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16347 
16348   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
16349   ins_encode %{
16350     __ arrays_equals($ary1$$Register, $ary2$$Register,
16351                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
16352                      $result$$Register, $tmp$$Register, 1);
16353     %}
16354   ins_pipe(pipe_class_memory);
16355 %}
16356 
16357 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
16358                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
16359                        iRegP_R10 tmp, rFlagsReg cr)
16360 %{
16361   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
16362   match(Set result (AryEq ary1 ary2));
16363   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16364 
16365   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
16366   ins_encode %{
16367     __ arrays_equals($ary1$$Register, $ary2$$Register,
16368                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
16369                      $result$$Register, $tmp$$Register, 2);
16370   %}
16371   ins_pipe(pipe_class_memory);
16372 %}
16373 
16374 instruct has_negatives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
16375 %{
16376   match(Set result (HasNegatives ary1 len));
16377   effect(USE_KILL ary1, USE_KILL len, KILL cr);
16378   format %{ "has negatives byte[] $ary1,$len -> $result" %}
16379   ins_encode %{
16380     __ has_negatives($ary1$$Register, $len$$Register, $result$$Register);
16381   %}
16382   ins_pipe( pipe_slow );
16383 %}
16384 
16385 // fast char[] to byte[] compression
16386 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
16387                          vRegD_V0 tmp1, vRegD_V1 tmp2,
16388                          vRegD_V2 tmp3, vRegD_V3 tmp4,
16389                          iRegI_R0 result, rFlagsReg cr)
16390 %{
16391   match(Set result (StrCompressedCopy src (Binary dst len)));
16392   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
16393 
16394   format %{ "String Compress $src,$dst -> $result    // KILL R1, R2, R3, R4" %}
16395   ins_encode %{
16396     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
16397                            $tmp1$$FloatRegister, $tmp2$$FloatRegister,
16398                            $tmp3$$FloatRegister, $tmp4$$FloatRegister,
16399                            $result$$Register);
16400   %}
16401   ins_pipe( pipe_slow );
16402 %}
16403 
16404 // fast byte[] to char[] inflation
16405 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len,
16406                         vRegD_V0 tmp1, vRegD_V1 tmp2, vRegD_V2 tmp3, iRegP_R3 tmp4, rFlagsReg cr)
16407 %{
16408   match(Set dummy (StrInflatedCopy src (Binary dst len)));
16409   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
16410 
16411   format %{ "String Inflate $src,$dst    // KILL $tmp1, $tmp2" %}
16412   ins_encode %{
16413     __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
16414                           $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister, $tmp4$$Register);
16415   %}
16416   ins_pipe(pipe_class_memory);
16417 %}
16418 
16419 // encode char[] to byte[] in ISO_8859_1
16420 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
16421                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
16422                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
16423                           iRegI_R0 result, rFlagsReg cr)
16424 %{
16425   match(Set result (EncodeISOArray src (Binary dst len)));
16426   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
16427          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
16428 
16429   format %{ "Encode array $src,$dst,$len -> $result" %}
16430   ins_encode %{
16431     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
16432          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
16433          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
16434   %}
16435   ins_pipe( pipe_class_memory );
16436 %}
16437 
16438 // ============================================================================
16439 // This name is KNOWN by the ADLC and cannot be changed.
16440 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
16441 // for this guy.
16442 instruct tlsLoadP(thread_RegP dst)
16443 %{
16444   match(Set dst (ThreadLocal));
16445 
16446   ins_cost(0);
16447 
16448   format %{ " -- \t// $dst=Thread::current(), empty" %}
16449 
16450   size(0);
16451 
16452   ins_encode( /*empty*/ );
16453 
16454   ins_pipe(pipe_class_empty);
16455 %}
16456 
16457 // ====================VECTOR INSTRUCTIONS=====================================
16458 
16459 // Load vector (32 bits)
16460 instruct loadV4(vecD dst, vmem4 mem)
16461 %{
16462   predicate(n->as_LoadVector()->memory_size() == 4);
16463   match(Set dst (LoadVector mem));
16464   ins_cost(4 * INSN_COST);
16465   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
16466   ins_encode( aarch64_enc_ldrvS(dst, mem) );
16467   ins_pipe(vload_reg_mem64);
16468 %}
16469 
16470 // Load vector (64 bits)
16471 instruct loadV8(vecD dst, vmem8 mem)
16472 %{
16473   predicate(n->as_LoadVector()->memory_size() == 8);
16474   match(Set dst (LoadVector mem));
16475   ins_cost(4 * INSN_COST);
16476   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
16477   ins_encode( aarch64_enc_ldrvD(dst, mem) );
16478   ins_pipe(vload_reg_mem64);
16479 %}
16480 
16481 // Load Vector (128 bits)
16482 instruct loadV16(vecX dst, vmem16 mem)
16483 %{
16484   predicate(UseSVE == 0 && n->as_LoadVector()->memory_size() == 16);
16485   match(Set dst (LoadVector mem));
16486   ins_cost(4 * INSN_COST);
16487   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
16488   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
16489   ins_pipe(vload_reg_mem128);
16490 %}
16491 
16492 // Store Vector (32 bits)
16493 instruct storeV4(vecD src, vmem4 mem)
16494 %{
16495   predicate(n->as_StoreVector()->memory_size() == 4);
16496   match(Set mem (StoreVector mem src));
16497   ins_cost(4 * INSN_COST);
16498   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
16499   ins_encode( aarch64_enc_strvS(src, mem) );
16500   ins_pipe(vstore_reg_mem64);
16501 %}
16502 
16503 // Store Vector (64 bits)
16504 instruct storeV8(vecD src, vmem8 mem)
16505 %{
16506   predicate(n->as_StoreVector()->memory_size() == 8);
16507   match(Set mem (StoreVector mem src));
16508   ins_cost(4 * INSN_COST);
16509   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
16510   ins_encode( aarch64_enc_strvD(src, mem) );
16511   ins_pipe(vstore_reg_mem64);
16512 %}
16513 
16514 // Store Vector (128 bits)
16515 instruct storeV16(vecX src, vmem16 mem)
16516 %{
16517   predicate(n->as_StoreVector()->memory_size() == 16);
16518   match(Set mem (StoreVector mem src));
16519   ins_cost(4 * INSN_COST);
16520   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
16521   ins_encode( aarch64_enc_strvQ(src, mem) );
16522   ins_pipe(vstore_reg_mem128);
16523 %}
16524 
16525 instruct replicate8B(vecD dst, iRegIorL2I src)
16526 %{
16527   predicate(n->as_Vector()->length() == 4 ||
16528             n->as_Vector()->length() == 8);
16529   match(Set dst (ReplicateB src));
16530   ins_cost(INSN_COST);
16531   format %{ "dup  $dst, $src\t# vector (8B)" %}
16532   ins_encode %{
16533     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
16534   %}
16535   ins_pipe(vdup_reg_reg64);
16536 %}
16537 
16538 instruct replicate16B(vecX dst, iRegIorL2I src)
16539 %{
16540   predicate(UseSVE == 0 && n->as_Vector()->length() == 16);
16541   match(Set dst (ReplicateB src));
16542   ins_cost(INSN_COST);
16543   format %{ "dup  $dst, $src\t# vector (16B)" %}
16544   ins_encode %{
16545     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
16546   %}
16547   ins_pipe(vdup_reg_reg128);
16548 %}
16549 
16550 instruct replicate8B_imm(vecD dst, immI con)
16551 %{
16552   predicate(n->as_Vector()->length() == 4 ||
16553             n->as_Vector()->length() == 8);
16554   match(Set dst (ReplicateB con));
16555   ins_cost(INSN_COST);
16556   format %{ "movi  $dst, $con\t# vector(8B)" %}
16557   ins_encode %{
16558     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
16559   %}
16560   ins_pipe(vmovi_reg_imm64);
16561 %}
16562 
16563 instruct replicate16B_imm(vecX dst, immI con)
16564 %{
16565   predicate(UseSVE == 0 && n->as_Vector()->length() == 16);
16566   match(Set dst (ReplicateB con));
16567   ins_cost(INSN_COST);
16568   format %{ "movi  $dst, $con\t# vector(16B)" %}
16569   ins_encode %{
16570     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
16571   %}
16572   ins_pipe(vmovi_reg_imm128);
16573 %}
16574 
16575 instruct replicate4S(vecD dst, iRegIorL2I src)
16576 %{
16577   predicate(n->as_Vector()->length() == 2 ||
16578             n->as_Vector()->length() == 4);
16579   match(Set dst (ReplicateS src));
16580   ins_cost(INSN_COST);
16581   format %{ "dup  $dst, $src\t# vector (4S)" %}
16582   ins_encode %{
16583     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
16584   %}
16585   ins_pipe(vdup_reg_reg64);
16586 %}
16587 
16588 instruct replicate8S(vecX dst, iRegIorL2I src)
16589 %{
16590   predicate(UseSVE == 0 && n->as_Vector()->length() == 8);
16591   match(Set dst (ReplicateS src));
16592   ins_cost(INSN_COST);
16593   format %{ "dup  $dst, $src\t# vector (8S)" %}
16594   ins_encode %{
16595     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
16596   %}
16597   ins_pipe(vdup_reg_reg128);
16598 %}
16599 
16600 instruct replicate4S_imm(vecD dst, immI con)
16601 %{
16602   predicate(n->as_Vector()->length() == 2 ||
16603             n->as_Vector()->length() == 4);
16604   match(Set dst (ReplicateS con));
16605   ins_cost(INSN_COST);
16606   format %{ "movi  $dst, $con\t# vector(4H)" %}
16607   ins_encode %{
16608     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
16609   %}
16610   ins_pipe(vmovi_reg_imm64);
16611 %}
16612 
16613 instruct replicate8S_imm(vecX dst, immI con)
16614 %{
16615   predicate(UseSVE == 0 && n->as_Vector()->length() == 8);
16616   match(Set dst (ReplicateS con));
16617   ins_cost(INSN_COST);
16618   format %{ "movi  $dst, $con\t# vector(8H)" %}
16619   ins_encode %{
16620     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
16621   %}
16622   ins_pipe(vmovi_reg_imm128);
16623 %}
16624 
16625 instruct replicate2I(vecD dst, iRegIorL2I src)
16626 %{
16627   predicate(n->as_Vector()->length() == 2);
16628   match(Set dst (ReplicateI src));
16629   ins_cost(INSN_COST);
16630   format %{ "dup  $dst, $src\t# vector (2I)" %}
16631   ins_encode %{
16632     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
16633   %}
16634   ins_pipe(vdup_reg_reg64);
16635 %}
16636 
16637 instruct replicate4I(vecX dst, iRegIorL2I src)
16638 %{
16639   predicate(UseSVE == 0 && n->as_Vector()->length() == 4);
16640   match(Set dst (ReplicateI src));
16641   ins_cost(INSN_COST);
16642   format %{ "dup  $dst, $src\t# vector (4I)" %}
16643   ins_encode %{
16644     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
16645   %}
16646   ins_pipe(vdup_reg_reg128);
16647 %}
16648 
16649 instruct replicate2I_imm(vecD dst, immI con)
16650 %{
16651   predicate(n->as_Vector()->length() == 2);
16652   match(Set dst (ReplicateI con));
16653   ins_cost(INSN_COST);
16654   format %{ "movi  $dst, $con\t# vector(2I)" %}
16655   ins_encode %{
16656     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
16657   %}
16658   ins_pipe(vmovi_reg_imm64);
16659 %}
16660 
16661 instruct replicate4I_imm(vecX dst, immI con)
16662 %{
16663   predicate(UseSVE == 0 && n->as_Vector()->length() == 4);
16664   match(Set dst (ReplicateI con));
16665   ins_cost(INSN_COST);
16666   format %{ "movi  $dst, $con\t# vector(4I)" %}
16667   ins_encode %{
16668     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
16669   %}
16670   ins_pipe(vmovi_reg_imm128);
16671 %}
16672 
16673 instruct replicate2L(vecX dst, iRegL src)
16674 %{
16675   predicate(UseSVE == 0 && n->as_Vector()->length() == 2);
16676   match(Set dst (ReplicateL src));
16677   ins_cost(INSN_COST);
16678   format %{ "dup  $dst, $src\t# vector (2L)" %}
16679   ins_encode %{
16680     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
16681   %}
16682   ins_pipe(vdup_reg_reg128);
16683 %}
16684 
16685 instruct replicate2L_zero(vecX dst, immI0 zero)
16686 %{
16687   predicate(UseSVE == 0 && n->as_Vector()->length() == 2);
16688   match(Set dst (ReplicateI zero));
16689   ins_cost(INSN_COST);
16690   format %{ "movi  $dst, $zero\t# vector(4I)" %}
16691   ins_encode %{
16692     __ eor(as_FloatRegister($dst$$reg), __ T16B,
16693            as_FloatRegister($dst$$reg),
16694            as_FloatRegister($dst$$reg));
16695   %}
16696   ins_pipe(vmovi_reg_imm128);
16697 %}
16698 
16699 instruct replicate2F(vecD dst, vRegF src)
16700 %{
16701   predicate(n->as_Vector()->length() == 2);
16702   match(Set dst (ReplicateF src));
16703   ins_cost(INSN_COST);
16704   format %{ "dup  $dst, $src\t# vector (2F)" %}
16705   ins_encode %{
16706     __ dup(as_FloatRegister($dst$$reg), __ T2S,
16707            as_FloatRegister($src$$reg));
16708   %}
16709   ins_pipe(vdup_reg_freg64);
16710 %}
16711 
16712 instruct replicate4F(vecX dst, vRegF src)
16713 %{
16714   predicate(UseSVE == 0 && n->as_Vector()->length() == 4);
16715   match(Set dst (ReplicateF src));
16716   ins_cost(INSN_COST);
16717   format %{ "dup  $dst, $src\t# vector (4F)" %}
16718   ins_encode %{
16719     __ dup(as_FloatRegister($dst$$reg), __ T4S,
16720            as_FloatRegister($src$$reg));
16721   %}
16722   ins_pipe(vdup_reg_freg128);
16723 %}
16724 
16725 instruct replicate2D(vecX dst, vRegD src)
16726 %{
16727   predicate(UseSVE == 0 && n->as_Vector()->length() == 2);
16728   match(Set dst (ReplicateD src));
16729   ins_cost(INSN_COST);
16730   format %{ "dup  $dst, $src\t# vector (2D)" %}
16731   ins_encode %{
16732     __ dup(as_FloatRegister($dst$$reg), __ T2D,
16733            as_FloatRegister($src$$reg));
16734   %}
16735   ins_pipe(vdup_reg_dreg128);
16736 %}
16737 
16738 // ====================REDUCTION ARITHMETIC====================================
16739 
16740 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I isrc, vecD vsrc, iRegINoSp tmp, iRegINoSp tmp2)
16741 %{
16742   match(Set dst (AddReductionVI isrc vsrc));
16743   ins_cost(INSN_COST);
16744   effect(TEMP tmp, TEMP tmp2);
16745   format %{ "umov  $tmp, $vsrc, S, 0\n\t"
16746             "umov  $tmp2, $vsrc, S, 1\n\t"
16747             "addw  $tmp, $isrc, $tmp\n\t"
16748             "addw  $dst, $tmp, $tmp2\t# add reduction2I"
16749   %}
16750   ins_encode %{
16751     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ S, 0);
16752     __ umov($tmp2$$Register, as_FloatRegister($vsrc$$reg), __ S, 1);
16753     __ addw($tmp$$Register, $isrc$$Register, $tmp$$Register);
16754     __ addw($dst$$Register, $tmp$$Register, $tmp2$$Register);
16755   %}
16756   ins_pipe(pipe_class_default);
16757 %}
16758 
16759 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I isrc, vecX vsrc, vecX vtmp, iRegINoSp itmp)
16760 %{
16761   match(Set dst (AddReductionVI isrc vsrc));
16762   ins_cost(INSN_COST);
16763   effect(TEMP vtmp, TEMP itmp);
16764   format %{ "addv  $vtmp, T4S, $vsrc\n\t"
16765             "umov  $itmp, $vtmp, S, 0\n\t"
16766             "addw  $dst, $itmp, $isrc\t# add reduction4I"
16767   %}
16768   ins_encode %{
16769     __ addv(as_FloatRegister($vtmp$$reg), __ T4S,
16770             as_FloatRegister($vsrc$$reg));
16771     __ umov($itmp$$Register, as_FloatRegister($vtmp$$reg), __ S, 0);
16772     __ addw($dst$$Register, $itmp$$Register, $isrc$$Register);
16773   %}
16774   ins_pipe(pipe_class_default);
16775 %}
16776 
16777 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I isrc, vecD vsrc, iRegINoSp tmp)
16778 %{
16779   match(Set dst (MulReductionVI isrc vsrc));
16780   ins_cost(INSN_COST);
16781   effect(TEMP tmp, TEMP dst);
16782   format %{ "umov  $tmp, $vsrc, S, 0\n\t"
16783             "mul   $dst, $tmp, $isrc\n\t"
16784             "umov  $tmp, $vsrc, S, 1\n\t"
16785             "mul   $dst, $tmp, $dst\t# mul reduction2I"
16786   %}
16787   ins_encode %{
16788     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ S, 0);
16789     __ mul($dst$$Register, $tmp$$Register, $isrc$$Register);
16790     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ S, 1);
16791     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
16792   %}
16793   ins_pipe(pipe_class_default);
16794 %}
16795 
16796 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I isrc, vecX vsrc, vecX vtmp, iRegINoSp itmp)
16797 %{
16798   match(Set dst (MulReductionVI isrc vsrc));
16799   ins_cost(INSN_COST);
16800   effect(TEMP vtmp, TEMP itmp, TEMP dst);
16801   format %{ "ins   $vtmp, D, $vsrc, 0, 1\n\t"
16802             "mulv  $vtmp, T2S, $vtmp, $vsrc\n\t"
16803             "umov  $itmp, $vtmp, S, 0\n\t"
16804             "mul   $dst, $itmp, $isrc\n\t"
16805             "umov  $itmp, $vtmp, S, 1\n\t"
16806             "mul   $dst, $itmp, $dst\t# mul reduction4I"
16807   %}
16808   ins_encode %{
16809     __ ins(as_FloatRegister($vtmp$$reg), __ D,
16810            as_FloatRegister($vsrc$$reg), 0, 1);
16811     __ mulv(as_FloatRegister($vtmp$$reg), __ T2S,
16812             as_FloatRegister($vtmp$$reg), as_FloatRegister($vsrc$$reg));
16813     __ umov($itmp$$Register, as_FloatRegister($vtmp$$reg), __ S, 0);
16814     __ mul($dst$$Register, $itmp$$Register, $isrc$$Register);
16815     __ umov($itmp$$Register, as_FloatRegister($vtmp$$reg), __ S, 1);
16816     __ mul($dst$$Register, $itmp$$Register, $dst$$Register);
16817   %}
16818   ins_pipe(pipe_class_default);
16819 %}
16820 
16821 instruct reduce_add2F(vRegF dst, vRegF fsrc, vecD vsrc, vecD tmp)
16822 %{
16823   match(Set dst (AddReductionVF fsrc vsrc));
16824   ins_cost(INSN_COST);
16825   effect(TEMP tmp, TEMP dst);
16826   format %{ "fadds $dst, $fsrc, $vsrc\n\t"
16827             "ins   $tmp, S, $vsrc, 0, 1\n\t"
16828             "fadds $dst, $dst, $tmp\t# add reduction2F"
16829   %}
16830   ins_encode %{
16831     __ fadds(as_FloatRegister($dst$$reg),
16832              as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
16833     __ ins(as_FloatRegister($tmp$$reg), __ S,
16834            as_FloatRegister($vsrc$$reg), 0, 1);
16835     __ fadds(as_FloatRegister($dst$$reg),
16836              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16837   %}
16838   ins_pipe(pipe_class_default);
16839 %}
16840 
16841 instruct reduce_add4F(vRegF dst, vRegF fsrc, vecX vsrc, vecX tmp)
16842 %{
16843   match(Set dst (AddReductionVF fsrc vsrc));
16844   ins_cost(INSN_COST);
16845   effect(TEMP tmp, TEMP dst);
16846   format %{ "fadds $dst, $fsrc, $vsrc\n\t"
16847             "ins   $tmp, S, $vsrc, 0, 1\n\t"
16848             "fadds $dst, $dst, $tmp\n\t"
16849             "ins   $tmp, S, $vsrc, 0, 2\n\t"
16850             "fadds $dst, $dst, $tmp\n\t"
16851             "ins   $tmp, S, $vsrc, 0, 3\n\t"
16852             "fadds $dst, $dst, $tmp\t# add reduction4F"
16853   %}
16854   ins_encode %{
16855     __ fadds(as_FloatRegister($dst$$reg),
16856              as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
16857     __ ins(as_FloatRegister($tmp$$reg), __ S,
16858            as_FloatRegister($vsrc$$reg), 0, 1);
16859     __ fadds(as_FloatRegister($dst$$reg),
16860              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16861     __ ins(as_FloatRegister($tmp$$reg), __ S,
16862            as_FloatRegister($vsrc$$reg), 0, 2);
16863     __ fadds(as_FloatRegister($dst$$reg),
16864              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16865     __ ins(as_FloatRegister($tmp$$reg), __ S,
16866            as_FloatRegister($vsrc$$reg), 0, 3);
16867     __ fadds(as_FloatRegister($dst$$reg),
16868              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16869   %}
16870   ins_pipe(pipe_class_default);
16871 %}
16872 
16873 instruct reduce_mul2F(vRegF dst, vRegF fsrc, vecD vsrc, vecD tmp)
16874 %{
16875   match(Set dst (MulReductionVF fsrc vsrc));
16876   ins_cost(INSN_COST);
16877   effect(TEMP tmp, TEMP dst);
16878   format %{ "fmuls $dst, $fsrc, $vsrc\n\t"
16879             "ins   $tmp, S, $vsrc, 0, 1\n\t"
16880             "fmuls $dst, $dst, $tmp\t# mul reduction2F"
16881   %}
16882   ins_encode %{
16883     __ fmuls(as_FloatRegister($dst$$reg),
16884              as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
16885     __ ins(as_FloatRegister($tmp$$reg), __ S,
16886            as_FloatRegister($vsrc$$reg), 0, 1);
16887     __ fmuls(as_FloatRegister($dst$$reg),
16888              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16889   %}
16890   ins_pipe(pipe_class_default);
16891 %}
16892 
16893 instruct reduce_mul4F(vRegF dst, vRegF fsrc, vecX vsrc, vecX tmp)
16894 %{
16895   match(Set dst (MulReductionVF fsrc vsrc));
16896   ins_cost(INSN_COST);
16897   effect(TEMP tmp, TEMP dst);
16898   format %{ "fmuls $dst, $fsrc, $vsrc\n\t"
16899             "ins   $tmp, S, $vsrc, 0, 1\n\t"
16900             "fmuls $dst, $dst, $tmp\n\t"
16901             "ins   $tmp, S, $vsrc, 0, 2\n\t"
16902             "fmuls $dst, $dst, $tmp\n\t"
16903             "ins   $tmp, S, $vsrc, 0, 3\n\t"
16904             "fmuls $dst, $dst, $tmp\t# mul reduction4F"
16905   %}
16906   ins_encode %{
16907     __ fmuls(as_FloatRegister($dst$$reg),
16908              as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
16909     __ ins(as_FloatRegister($tmp$$reg), __ S,
16910            as_FloatRegister($vsrc$$reg), 0, 1);
16911     __ fmuls(as_FloatRegister($dst$$reg),
16912              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16913     __ ins(as_FloatRegister($tmp$$reg), __ S,
16914            as_FloatRegister($vsrc$$reg), 0, 2);
16915     __ fmuls(as_FloatRegister($dst$$reg),
16916              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16917     __ ins(as_FloatRegister($tmp$$reg), __ S,
16918            as_FloatRegister($vsrc$$reg), 0, 3);
16919     __ fmuls(as_FloatRegister($dst$$reg),
16920              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16921   %}
16922   ins_pipe(pipe_class_default);
16923 %}
16924 
16925 instruct reduce_add2D(vRegD dst, vRegD dsrc, vecX vsrc, vecX tmp)
16926 %{
16927   match(Set dst (AddReductionVD dsrc vsrc));
16928   ins_cost(INSN_COST);
16929   effect(TEMP tmp, TEMP dst);
16930   format %{ "faddd $dst, $dsrc, $vsrc\n\t"
16931             "ins   $tmp, D, $vsrc, 0, 1\n\t"
16932             "faddd $dst, $dst, $tmp\t# add reduction2D"
16933   %}
16934   ins_encode %{
16935     __ faddd(as_FloatRegister($dst$$reg),
16936              as_FloatRegister($dsrc$$reg), as_FloatRegister($vsrc$$reg));
16937     __ ins(as_FloatRegister($tmp$$reg), __ D,
16938            as_FloatRegister($vsrc$$reg), 0, 1);
16939     __ faddd(as_FloatRegister($dst$$reg),
16940              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16941   %}
16942   ins_pipe(pipe_class_default);
16943 %}
16944 
16945 instruct reduce_mul2D(vRegD dst, vRegD dsrc, vecX vsrc, vecX tmp)
16946 %{
16947   match(Set dst (MulReductionVD dsrc vsrc));
16948   ins_cost(INSN_COST);
16949   effect(TEMP tmp, TEMP dst);
16950   format %{ "fmuld $dst, $dsrc, $vsrc\n\t"
16951             "ins   $tmp, D, $vsrc, 0, 1\n\t"
16952             "fmuld $dst, $dst, $tmp\t# mul reduction2D"
16953   %}
16954   ins_encode %{
16955     __ fmuld(as_FloatRegister($dst$$reg),
16956              as_FloatRegister($dsrc$$reg), as_FloatRegister($vsrc$$reg));
16957     __ ins(as_FloatRegister($tmp$$reg), __ D,
16958            as_FloatRegister($vsrc$$reg), 0, 1);
16959     __ fmuld(as_FloatRegister($dst$$reg),
16960              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16961   %}
16962   ins_pipe(pipe_class_default);
16963 %}
16964 
16965 instruct reduce_max2F(vRegF dst, vRegF fsrc, vecD vsrc, vecD tmp) %{
16966   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16967   match(Set dst (MaxReductionV fsrc vsrc));
16968   ins_cost(INSN_COST);
16969   effect(TEMP_DEF dst, TEMP tmp);
16970   format %{ "fmaxs $dst, $fsrc, $vsrc\n\t"
16971             "ins   $tmp, S, $vsrc, 0, 1\n\t"
16972             "fmaxs $dst, $dst, $tmp\t# max reduction2F" %}
16973   ins_encode %{
16974     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
16975     __ ins(as_FloatRegister($tmp$$reg), __ S, as_FloatRegister($vsrc$$reg), 0, 1);
16976     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16977   %}
16978   ins_pipe(pipe_class_default);
16979 %}
16980 
16981 instruct reduce_max4F(vRegF dst, vRegF fsrc, vecX vsrc) %{
16982   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16983   match(Set dst (MaxReductionV fsrc vsrc));
16984   ins_cost(INSN_COST);
16985   effect(TEMP_DEF dst);
16986   format %{ "fmaxv $dst, T4S, $vsrc\n\t"
16987             "fmaxs $dst, $dst, $fsrc\t# max reduction4F" %}
16988   ins_encode %{
16989     __ fmaxv(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($vsrc$$reg));
16990     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($fsrc$$reg));
16991   %}
16992   ins_pipe(pipe_class_default);
16993 %}
16994 
16995 instruct reduce_max2D(vRegD dst, vRegD dsrc, vecX vsrc, vecX tmp) %{
16996   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
16997   match(Set dst (MaxReductionV dsrc vsrc));
16998   ins_cost(INSN_COST);
16999   effect(TEMP_DEF dst, TEMP tmp);
17000   format %{ "fmaxd $dst, $dsrc, $vsrc\n\t"
17001             "ins   $tmp, D, $vsrc, 0, 1\n\t"
17002             "fmaxd $dst, $dst, $tmp\t# max reduction2D" %}
17003   ins_encode %{
17004     __ fmaxd(as_FloatRegister($dst$$reg), as_FloatRegister($dsrc$$reg), as_FloatRegister($vsrc$$reg));
17005     __ ins(as_FloatRegister($tmp$$reg), __ D, as_FloatRegister($vsrc$$reg), 0, 1);
17006     __ fmaxd(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17007   %}
17008   ins_pipe(pipe_class_default);
17009 %}
17010 
17011 instruct reduce_min2F(vRegF dst, vRegF fsrc, vecD vsrc, vecD tmp) %{
17012   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
17013   match(Set dst (MinReductionV fsrc vsrc));
17014   ins_cost(INSN_COST);
17015   effect(TEMP_DEF dst, TEMP tmp);
17016   format %{ "fmins $dst, $fsrc, $vsrc\n\t"
17017             "ins   $tmp, S, $vsrc, 0, 1\n\t"
17018             "fmins $dst, $dst, $tmp\t# min reduction2F" %}
17019   ins_encode %{
17020     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
17021     __ ins(as_FloatRegister($tmp$$reg), __ S, as_FloatRegister($vsrc$$reg), 0, 1);
17022     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17023   %}
17024   ins_pipe(pipe_class_default);
17025 %}
17026 
17027 instruct reduce_min4F(vRegF dst, vRegF fsrc, vecX vsrc) %{
17028   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
17029   match(Set dst (MinReductionV fsrc vsrc));
17030   ins_cost(INSN_COST);
17031   effect(TEMP_DEF dst);
17032   format %{ "fminv $dst, T4S, $vsrc\n\t"
17033             "fmins $dst, $dst, $fsrc\t# min reduction4F" %}
17034   ins_encode %{
17035     __ fminv(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($vsrc$$reg));
17036     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($fsrc$$reg));
17037   %}
17038   ins_pipe(pipe_class_default);
17039 %}
17040 
17041 instruct reduce_min2D(vRegD dst, vRegD dsrc, vecX vsrc, vecX tmp) %{
17042   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
17043   match(Set dst (MinReductionV dsrc vsrc));
17044   ins_cost(INSN_COST);
17045   effect(TEMP_DEF dst, TEMP tmp);
17046   format %{ "fmind $dst, $dsrc, $vsrc\n\t"
17047             "ins   $tmp, D, $vsrc, 0, 1\n\t"
17048             "fmind $dst, $dst, $tmp\t# min reduction2D" %}
17049   ins_encode %{
17050     __ fmind(as_FloatRegister($dst$$reg), as_FloatRegister($dsrc$$reg), as_FloatRegister($vsrc$$reg));
17051     __ ins(as_FloatRegister($tmp$$reg), __ D, as_FloatRegister($vsrc$$reg), 0, 1);
17052     __ fmind(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17053   %}
17054   ins_pipe(pipe_class_default);
17055 %}
17056 
17057 // ====================VECTOR ARITHMETIC=======================================
17058 
17059 // --------------------------------- ADD --------------------------------------
17060 
17061 instruct vadd8B(vecD dst, vecD src1, vecD src2)
17062 %{
17063   predicate(n->as_Vector()->length() == 4 ||
17064             n->as_Vector()->length() == 8);
17065   match(Set dst (AddVB src1 src2));
17066   ins_cost(INSN_COST);
17067   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
17068   ins_encode %{
17069     __ addv(as_FloatRegister($dst$$reg), __ T8B,
17070             as_FloatRegister($src1$$reg),
17071             as_FloatRegister($src2$$reg));
17072   %}
17073   ins_pipe(vdop64);
17074 %}
17075 
17076 instruct vadd16B(vecX dst, vecX src1, vecX src2)
17077 %{
17078   predicate(n->as_Vector()->length() == 16);
17079   match(Set dst (AddVB src1 src2));
17080   ins_cost(INSN_COST);
17081   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
17082   ins_encode %{
17083     __ addv(as_FloatRegister($dst$$reg), __ T16B,
17084             as_FloatRegister($src1$$reg),
17085             as_FloatRegister($src2$$reg));
17086   %}
17087   ins_pipe(vdop128);
17088 %}
17089 
17090 instruct vadd4S(vecD dst, vecD src1, vecD src2)
17091 %{
17092   predicate(n->as_Vector()->length() == 2 ||
17093             n->as_Vector()->length() == 4);
17094   match(Set dst (AddVS src1 src2));
17095   ins_cost(INSN_COST);
17096   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
17097   ins_encode %{
17098     __ addv(as_FloatRegister($dst$$reg), __ T4H,
17099             as_FloatRegister($src1$$reg),
17100             as_FloatRegister($src2$$reg));
17101   %}
17102   ins_pipe(vdop64);
17103 %}
17104 
17105 instruct vadd8S(vecX dst, vecX src1, vecX src2)
17106 %{
17107   predicate(n->as_Vector()->length() == 8);
17108   match(Set dst (AddVS src1 src2));
17109   ins_cost(INSN_COST);
17110   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
17111   ins_encode %{
17112     __ addv(as_FloatRegister($dst$$reg), __ T8H,
17113             as_FloatRegister($src1$$reg),
17114             as_FloatRegister($src2$$reg));
17115   %}
17116   ins_pipe(vdop128);
17117 %}
17118 
17119 instruct vadd2I(vecD dst, vecD src1, vecD src2)
17120 %{
17121   predicate(n->as_Vector()->length() == 2);
17122   match(Set dst (AddVI src1 src2));
17123   ins_cost(INSN_COST);
17124   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
17125   ins_encode %{
17126     __ addv(as_FloatRegister($dst$$reg), __ T2S,
17127             as_FloatRegister($src1$$reg),
17128             as_FloatRegister($src2$$reg));
17129   %}
17130   ins_pipe(vdop64);
17131 %}
17132 
17133 instruct vadd4I(vecX dst, vecX src1, vecX src2)
17134 %{
17135   predicate(n->as_Vector()->length() == 4);
17136   match(Set dst (AddVI src1 src2));
17137   ins_cost(INSN_COST);
17138   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
17139   ins_encode %{
17140     __ addv(as_FloatRegister($dst$$reg), __ T4S,
17141             as_FloatRegister($src1$$reg),
17142             as_FloatRegister($src2$$reg));
17143   %}
17144   ins_pipe(vdop128);
17145 %}
17146 
17147 instruct vadd2L(vecX dst, vecX src1, vecX src2)
17148 %{
17149   predicate(n->as_Vector()->length() == 2);
17150   match(Set dst (AddVL src1 src2));
17151   ins_cost(INSN_COST);
17152   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
17153   ins_encode %{
17154     __ addv(as_FloatRegister($dst$$reg), __ T2D,
17155             as_FloatRegister($src1$$reg),
17156             as_FloatRegister($src2$$reg));
17157   %}
17158   ins_pipe(vdop128);
17159 %}
17160 
17161 instruct vadd2F(vecD dst, vecD src1, vecD src2)
17162 %{
17163   predicate(n->as_Vector()->length() == 2);
17164   match(Set dst (AddVF src1 src2));
17165   ins_cost(INSN_COST);
17166   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
17167   ins_encode %{
17168     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
17169             as_FloatRegister($src1$$reg),
17170             as_FloatRegister($src2$$reg));
17171   %}
17172   ins_pipe(vdop_fp64);
17173 %}
17174 
17175 instruct vadd4F(vecX dst, vecX src1, vecX src2)
17176 %{
17177   predicate(n->as_Vector()->length() == 4);
17178   match(Set dst (AddVF src1 src2));
17179   ins_cost(INSN_COST);
17180   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
17181   ins_encode %{
17182     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
17183             as_FloatRegister($src1$$reg),
17184             as_FloatRegister($src2$$reg));
17185   %}
17186   ins_pipe(vdop_fp128);
17187 %}
17188 
17189 instruct vadd2D(vecX dst, vecX src1, vecX src2)
17190 %{
17191   match(Set dst (AddVD src1 src2));
17192   ins_cost(INSN_COST);
17193   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
17194   ins_encode %{
17195     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
17196             as_FloatRegister($src1$$reg),
17197             as_FloatRegister($src2$$reg));
17198   %}
17199   ins_pipe(vdop_fp128);
17200 %}
17201 
17202 // --------------------------------- SUB --------------------------------------
17203 
17204 instruct vsub8B(vecD dst, vecD src1, vecD src2)
17205 %{
17206   predicate(n->as_Vector()->length() == 4 ||
17207             n->as_Vector()->length() == 8);
17208   match(Set dst (SubVB src1 src2));
17209   ins_cost(INSN_COST);
17210   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
17211   ins_encode %{
17212     __ subv(as_FloatRegister($dst$$reg), __ T8B,
17213             as_FloatRegister($src1$$reg),
17214             as_FloatRegister($src2$$reg));
17215   %}
17216   ins_pipe(vdop64);
17217 %}
17218 
17219 instruct vsub16B(vecX dst, vecX src1, vecX src2)
17220 %{
17221   predicate(n->as_Vector()->length() == 16);
17222   match(Set dst (SubVB src1 src2));
17223   ins_cost(INSN_COST);
17224   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
17225   ins_encode %{
17226     __ subv(as_FloatRegister($dst$$reg), __ T16B,
17227             as_FloatRegister($src1$$reg),
17228             as_FloatRegister($src2$$reg));
17229   %}
17230   ins_pipe(vdop128);
17231 %}
17232 
17233 instruct vsub4S(vecD dst, vecD src1, vecD src2)
17234 %{
17235   predicate(n->as_Vector()->length() == 2 ||
17236             n->as_Vector()->length() == 4);
17237   match(Set dst (SubVS src1 src2));
17238   ins_cost(INSN_COST);
17239   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
17240   ins_encode %{
17241     __ subv(as_FloatRegister($dst$$reg), __ T4H,
17242             as_FloatRegister($src1$$reg),
17243             as_FloatRegister($src2$$reg));
17244   %}
17245   ins_pipe(vdop64);
17246 %}
17247 
17248 instruct vsub8S(vecX dst, vecX src1, vecX src2)
17249 %{
17250   predicate(n->as_Vector()->length() == 8);
17251   match(Set dst (SubVS src1 src2));
17252   ins_cost(INSN_COST);
17253   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
17254   ins_encode %{
17255     __ subv(as_FloatRegister($dst$$reg), __ T8H,
17256             as_FloatRegister($src1$$reg),
17257             as_FloatRegister($src2$$reg));
17258   %}
17259   ins_pipe(vdop128);
17260 %}
17261 
17262 instruct vsub2I(vecD dst, vecD src1, vecD src2)
17263 %{
17264   predicate(n->as_Vector()->length() == 2);
17265   match(Set dst (SubVI src1 src2));
17266   ins_cost(INSN_COST);
17267   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
17268   ins_encode %{
17269     __ subv(as_FloatRegister($dst$$reg), __ T2S,
17270             as_FloatRegister($src1$$reg),
17271             as_FloatRegister($src2$$reg));
17272   %}
17273   ins_pipe(vdop64);
17274 %}
17275 
17276 instruct vsub4I(vecX dst, vecX src1, vecX src2)
17277 %{
17278   predicate(n->as_Vector()->length() == 4);
17279   match(Set dst (SubVI src1 src2));
17280   ins_cost(INSN_COST);
17281   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
17282   ins_encode %{
17283     __ subv(as_FloatRegister($dst$$reg), __ T4S,
17284             as_FloatRegister($src1$$reg),
17285             as_FloatRegister($src2$$reg));
17286   %}
17287   ins_pipe(vdop128);
17288 %}
17289 
17290 instruct vsub2L(vecX dst, vecX src1, vecX src2)
17291 %{
17292   predicate(n->as_Vector()->length() == 2);
17293   match(Set dst (SubVL src1 src2));
17294   ins_cost(INSN_COST);
17295   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
17296   ins_encode %{
17297     __ subv(as_FloatRegister($dst$$reg), __ T2D,
17298             as_FloatRegister($src1$$reg),
17299             as_FloatRegister($src2$$reg));
17300   %}
17301   ins_pipe(vdop128);
17302 %}
17303 
17304 instruct vsub2F(vecD dst, vecD src1, vecD src2)
17305 %{
17306   predicate(n->as_Vector()->length() == 2);
17307   match(Set dst (SubVF src1 src2));
17308   ins_cost(INSN_COST);
17309   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
17310   ins_encode %{
17311     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
17312             as_FloatRegister($src1$$reg),
17313             as_FloatRegister($src2$$reg));
17314   %}
17315   ins_pipe(vdop_fp64);
17316 %}
17317 
17318 instruct vsub4F(vecX dst, vecX src1, vecX src2)
17319 %{
17320   predicate(n->as_Vector()->length() == 4);
17321   match(Set dst (SubVF src1 src2));
17322   ins_cost(INSN_COST);
17323   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
17324   ins_encode %{
17325     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
17326             as_FloatRegister($src1$$reg),
17327             as_FloatRegister($src2$$reg));
17328   %}
17329   ins_pipe(vdop_fp128);
17330 %}
17331 
17332 instruct vsub2D(vecX dst, vecX src1, vecX src2)
17333 %{
17334   predicate(n->as_Vector()->length() == 2);
17335   match(Set dst (SubVD src1 src2));
17336   ins_cost(INSN_COST);
17337   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
17338   ins_encode %{
17339     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
17340             as_FloatRegister($src1$$reg),
17341             as_FloatRegister($src2$$reg));
17342   %}
17343   ins_pipe(vdop_fp128);
17344 %}
17345 
17346 // --------------------------------- MUL --------------------------------------
17347 
17348 instruct vmul8B(vecD dst, vecD src1, vecD src2)
17349 %{
17350   predicate(n->as_Vector()->length() == 4 ||
17351             n->as_Vector()->length() == 8);
17352   match(Set dst (MulVB src1 src2));
17353   ins_cost(INSN_COST);
17354   format %{ "mulv  $dst,$src1,$src2\t# vector (8B)" %}
17355   ins_encode %{
17356     __ mulv(as_FloatRegister($dst$$reg), __ T8B,
17357             as_FloatRegister($src1$$reg),
17358             as_FloatRegister($src2$$reg));
17359   %}
17360   ins_pipe(vmul64);
17361 %}
17362 
17363 instruct vmul16B(vecX dst, vecX src1, vecX src2)
17364 %{
17365   predicate(n->as_Vector()->length() == 16);
17366   match(Set dst (MulVB src1 src2));
17367   ins_cost(INSN_COST);
17368   format %{ "mulv  $dst,$src1,$src2\t# vector (16B)" %}
17369   ins_encode %{
17370     __ mulv(as_FloatRegister($dst$$reg), __ T16B,
17371             as_FloatRegister($src1$$reg),
17372             as_FloatRegister($src2$$reg));
17373   %}
17374   ins_pipe(vmul128);
17375 %}
17376 
17377 instruct vmul4S(vecD dst, vecD src1, vecD src2)
17378 %{
17379   predicate(n->as_Vector()->length() == 2 ||
17380             n->as_Vector()->length() == 4);
17381   match(Set dst (MulVS src1 src2));
17382   ins_cost(INSN_COST);
17383   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
17384   ins_encode %{
17385     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
17386             as_FloatRegister($src1$$reg),
17387             as_FloatRegister($src2$$reg));
17388   %}
17389   ins_pipe(vmul64);
17390 %}
17391 
17392 instruct vmul8S(vecX dst, vecX src1, vecX src2)
17393 %{
17394   predicate(n->as_Vector()->length() == 8);
17395   match(Set dst (MulVS src1 src2));
17396   ins_cost(INSN_COST);
17397   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
17398   ins_encode %{
17399     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
17400             as_FloatRegister($src1$$reg),
17401             as_FloatRegister($src2$$reg));
17402   %}
17403   ins_pipe(vmul128);
17404 %}
17405 
17406 instruct vmul2I(vecD dst, vecD src1, vecD src2)
17407 %{
17408   predicate(n->as_Vector()->length() == 2);
17409   match(Set dst (MulVI src1 src2));
17410   ins_cost(INSN_COST);
17411   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
17412   ins_encode %{
17413     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
17414             as_FloatRegister($src1$$reg),
17415             as_FloatRegister($src2$$reg));
17416   %}
17417   ins_pipe(vmul64);
17418 %}
17419 
17420 instruct vmul4I(vecX dst, vecX src1, vecX src2)
17421 %{
17422   predicate(n->as_Vector()->length() == 4);
17423   match(Set dst (MulVI src1 src2));
17424   ins_cost(INSN_COST);
17425   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
17426   ins_encode %{
17427     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
17428             as_FloatRegister($src1$$reg),
17429             as_FloatRegister($src2$$reg));
17430   %}
17431   ins_pipe(vmul128);
17432 %}
17433 
17434 instruct vmul2F(vecD dst, vecD src1, vecD src2)
17435 %{
17436   predicate(n->as_Vector()->length() == 2);
17437   match(Set dst (MulVF src1 src2));
17438   ins_cost(INSN_COST);
17439   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
17440   ins_encode %{
17441     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
17442             as_FloatRegister($src1$$reg),
17443             as_FloatRegister($src2$$reg));
17444   %}
17445   ins_pipe(vmuldiv_fp64);
17446 %}
17447 
17448 instruct vmul4F(vecX dst, vecX src1, vecX src2)
17449 %{
17450   predicate(n->as_Vector()->length() == 4);
17451   match(Set dst (MulVF src1 src2));
17452   ins_cost(INSN_COST);
17453   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
17454   ins_encode %{
17455     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
17456             as_FloatRegister($src1$$reg),
17457             as_FloatRegister($src2$$reg));
17458   %}
17459   ins_pipe(vmuldiv_fp128);
17460 %}
17461 
17462 instruct vmul2D(vecX dst, vecX src1, vecX src2)
17463 %{
17464   predicate(n->as_Vector()->length() == 2);
17465   match(Set dst (MulVD src1 src2));
17466   ins_cost(INSN_COST);
17467   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
17468   ins_encode %{
17469     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
17470             as_FloatRegister($src1$$reg),
17471             as_FloatRegister($src2$$reg));
17472   %}
17473   ins_pipe(vmuldiv_fp128);
17474 %}
17475 
17476 // --------------------------------- MLA --------------------------------------
17477 
17478 instruct vmla4S(vecD dst, vecD src1, vecD src2)
17479 %{
17480   predicate(n->as_Vector()->length() == 2 ||
17481             n->as_Vector()->length() == 4);
17482   match(Set dst (AddVS dst (MulVS src1 src2)));
17483   ins_cost(INSN_COST);
17484   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
17485   ins_encode %{
17486     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
17487             as_FloatRegister($src1$$reg),
17488             as_FloatRegister($src2$$reg));
17489   %}
17490   ins_pipe(vmla64);
17491 %}
17492 
17493 instruct vmla8S(vecX dst, vecX src1, vecX src2)
17494 %{
17495   predicate(n->as_Vector()->length() == 8);
17496   match(Set dst (AddVS dst (MulVS src1 src2)));
17497   ins_cost(INSN_COST);
17498   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
17499   ins_encode %{
17500     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
17501             as_FloatRegister($src1$$reg),
17502             as_FloatRegister($src2$$reg));
17503   %}
17504   ins_pipe(vmla128);
17505 %}
17506 
17507 instruct vmla2I(vecD dst, vecD src1, vecD src2)
17508 %{
17509   predicate(n->as_Vector()->length() == 2);
17510   match(Set dst (AddVI dst (MulVI src1 src2)));
17511   ins_cost(INSN_COST);
17512   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
17513   ins_encode %{
17514     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
17515             as_FloatRegister($src1$$reg),
17516             as_FloatRegister($src2$$reg));
17517   %}
17518   ins_pipe(vmla64);
17519 %}
17520 
17521 instruct vmla4I(vecX dst, vecX src1, vecX src2)
17522 %{
17523   predicate(n->as_Vector()->length() == 4);
17524   match(Set dst (AddVI dst (MulVI src1 src2)));
17525   ins_cost(INSN_COST);
17526   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
17527   ins_encode %{
17528     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
17529             as_FloatRegister($src1$$reg),
17530             as_FloatRegister($src2$$reg));
17531   %}
17532   ins_pipe(vmla128);
17533 %}
17534 
17535 // dst + src1 * src2
17536 instruct vmla2F(vecD dst, vecD src1, vecD src2) %{
17537   predicate(UseFMA && n->as_Vector()->length() == 2);
17538   match(Set dst (FmaVF  dst (Binary src1 src2)));
17539   format %{ "fmla  $dst,$src1,$src2\t# vector (2S)" %}
17540   ins_cost(INSN_COST);
17541   ins_encode %{
17542     __ fmla(as_FloatRegister($dst$$reg), __ T2S,
17543             as_FloatRegister($src1$$reg),
17544             as_FloatRegister($src2$$reg));
17545   %}
17546   ins_pipe(vmuldiv_fp64);
17547 %}
17548 
17549 // dst + src1 * src2
17550 instruct vmla4F(vecX dst, vecX src1, vecX src2) %{
17551   predicate(UseFMA && n->as_Vector()->length() == 4);
17552   match(Set dst (FmaVF  dst (Binary src1 src2)));
17553   format %{ "fmla  $dst,$src1,$src2\t# vector (4S)" %}
17554   ins_cost(INSN_COST);
17555   ins_encode %{
17556     __ fmla(as_FloatRegister($dst$$reg), __ T4S,
17557             as_FloatRegister($src1$$reg),
17558             as_FloatRegister($src2$$reg));
17559   %}
17560   ins_pipe(vmuldiv_fp128);
17561 %}
17562 
17563 // dst + src1 * src2
17564 instruct vmla2D(vecX dst, vecX src1, vecX src2) %{
17565   predicate(UseFMA && n->as_Vector()->length() == 2);
17566   match(Set dst (FmaVD  dst (Binary src1 src2)));
17567   format %{ "fmla  $dst,$src1,$src2\t# vector (2D)" %}
17568   ins_cost(INSN_COST);
17569   ins_encode %{
17570     __ fmla(as_FloatRegister($dst$$reg), __ T2D,
17571             as_FloatRegister($src1$$reg),
17572             as_FloatRegister($src2$$reg));
17573   %}
17574   ins_pipe(vmuldiv_fp128);
17575 %}
17576 
17577 // --------------------------------- MLS --------------------------------------
17578 
17579 instruct vmls4S(vecD dst, vecD src1, vecD src2)
17580 %{
17581   predicate(n->as_Vector()->length() == 2 ||
17582             n->as_Vector()->length() == 4);
17583   match(Set dst (SubVS dst (MulVS src1 src2)));
17584   ins_cost(INSN_COST);
17585   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
17586   ins_encode %{
17587     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
17588             as_FloatRegister($src1$$reg),
17589             as_FloatRegister($src2$$reg));
17590   %}
17591   ins_pipe(vmla64);
17592 %}
17593 
17594 instruct vmls8S(vecX dst, vecX src1, vecX src2)
17595 %{
17596   predicate(n->as_Vector()->length() == 8);
17597   match(Set dst (SubVS dst (MulVS src1 src2)));
17598   ins_cost(INSN_COST);
17599   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
17600   ins_encode %{
17601     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
17602             as_FloatRegister($src1$$reg),
17603             as_FloatRegister($src2$$reg));
17604   %}
17605   ins_pipe(vmla128);
17606 %}
17607 
17608 instruct vmls2I(vecD dst, vecD src1, vecD src2)
17609 %{
17610   predicate(n->as_Vector()->length() == 2);
17611   match(Set dst (SubVI dst (MulVI src1 src2)));
17612   ins_cost(INSN_COST);
17613   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
17614   ins_encode %{
17615     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
17616             as_FloatRegister($src1$$reg),
17617             as_FloatRegister($src2$$reg));
17618   %}
17619   ins_pipe(vmla64);
17620 %}
17621 
17622 instruct vmls4I(vecX dst, vecX src1, vecX src2)
17623 %{
17624   predicate(n->as_Vector()->length() == 4);
17625   match(Set dst (SubVI dst (MulVI src1 src2)));
17626   ins_cost(INSN_COST);
17627   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
17628   ins_encode %{
17629     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
17630             as_FloatRegister($src1$$reg),
17631             as_FloatRegister($src2$$reg));
17632   %}
17633   ins_pipe(vmla128);
17634 %}
17635 
17636 // dst - src1 * src2
17637 instruct vmls2F(vecD dst, vecD src1, vecD src2) %{
17638   predicate(UseFMA && n->as_Vector()->length() == 2);
17639   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
17640   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
17641   format %{ "fmls  $dst,$src1,$src2\t# vector (2S)" %}
17642   ins_cost(INSN_COST);
17643   ins_encode %{
17644     __ fmls(as_FloatRegister($dst$$reg), __ T2S,
17645             as_FloatRegister($src1$$reg),
17646             as_FloatRegister($src2$$reg));
17647   %}
17648   ins_pipe(vmuldiv_fp64);
17649 %}
17650 
17651 // dst - src1 * src2
17652 instruct vmls4F(vecX dst, vecX src1, vecX src2) %{
17653   predicate(UseFMA && n->as_Vector()->length() == 4);
17654   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
17655   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
17656   format %{ "fmls  $dst,$src1,$src2\t# vector (4S)" %}
17657   ins_cost(INSN_COST);
17658   ins_encode %{
17659     __ fmls(as_FloatRegister($dst$$reg), __ T4S,
17660             as_FloatRegister($src1$$reg),
17661             as_FloatRegister($src2$$reg));
17662   %}
17663   ins_pipe(vmuldiv_fp128);
17664 %}
17665 
17666 // dst - src1 * src2
17667 instruct vmls2D(vecX dst, vecX src1, vecX src2) %{
17668   predicate(UseFMA && n->as_Vector()->length() == 2);
17669   match(Set dst (FmaVD  dst (Binary (NegVD src1) src2)));
17670   match(Set dst (FmaVD  dst (Binary src1 (NegVD src2))));
17671   format %{ "fmls  $dst,$src1,$src2\t# vector (2D)" %}
17672   ins_cost(INSN_COST);
17673   ins_encode %{
17674     __ fmls(as_FloatRegister($dst$$reg), __ T2D,
17675             as_FloatRegister($src1$$reg),
17676             as_FloatRegister($src2$$reg));
17677   %}
17678   ins_pipe(vmuldiv_fp128);
17679 %}
17680 
17681 // --------------- Vector Multiply-Add Shorts into Integer --------------------
17682 
17683 instruct vmuladdS2I(vecX dst, vecX src1, vecX src2, vecX tmp) %{
17684   predicate(n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
17685   match(Set dst (MulAddVS2VI src1 src2));
17686   ins_cost(INSN_COST);
17687   effect(TEMP_DEF dst, TEMP tmp);
17688   format %{ "smullv  $tmp, $src1, $src2\t# vector (4H)\n\t"
17689             "smullv  $dst, $src1, $src2\t# vector (8H)\n\t"
17690             "addpv   $dst, $tmp, $dst\t# vector (4S)\n\t" %}
17691   ins_encode %{
17692     __ smullv(as_FloatRegister($tmp$$reg), __ T4H,
17693               as_FloatRegister($src1$$reg),
17694               as_FloatRegister($src2$$reg));
17695     __ smullv(as_FloatRegister($dst$$reg), __ T8H,
17696               as_FloatRegister($src1$$reg),
17697               as_FloatRegister($src2$$reg));
17698     __ addpv(as_FloatRegister($dst$$reg), __ T4S,
17699              as_FloatRegister($tmp$$reg),
17700              as_FloatRegister($dst$$reg));
17701   %}
17702   ins_pipe(vmuldiv_fp128);
17703 %}
17704 
17705 // --------------------------------- DIV --------------------------------------
17706 
17707 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
17708 %{
17709   predicate(n->as_Vector()->length() == 2);
17710   match(Set dst (DivVF src1 src2));
17711   ins_cost(INSN_COST);
17712   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
17713   ins_encode %{
17714     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
17715             as_FloatRegister($src1$$reg),
17716             as_FloatRegister($src2$$reg));
17717   %}
17718   ins_pipe(vmuldiv_fp64);
17719 %}
17720 
17721 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
17722 %{
17723   predicate(n->as_Vector()->length() == 4);
17724   match(Set dst (DivVF src1 src2));
17725   ins_cost(INSN_COST);
17726   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
17727   ins_encode %{
17728     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
17729             as_FloatRegister($src1$$reg),
17730             as_FloatRegister($src2$$reg));
17731   %}
17732   ins_pipe(vmuldiv_fp128);
17733 %}
17734 
17735 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
17736 %{
17737   predicate(n->as_Vector()->length() == 2);
17738   match(Set dst (DivVD src1 src2));
17739   ins_cost(INSN_COST);
17740   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
17741   ins_encode %{
17742     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
17743             as_FloatRegister($src1$$reg),
17744             as_FloatRegister($src2$$reg));
17745   %}
17746   ins_pipe(vmuldiv_fp128);
17747 %}
17748 
17749 // --------------------------------- SQRT -------------------------------------
17750 
17751 instruct vsqrt2F(vecD dst, vecD src)
17752 %{
17753   predicate(n->as_Vector()->length() == 2);
17754   match(Set dst (SqrtVF src));
17755   format %{ "fsqrt  $dst, $src\t# vector (2F)" %}
17756   ins_encode %{
17757     __ fsqrt(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($src$$reg));
17758   %}
17759   ins_pipe(vunop_fp64);
17760 %}
17761 
17762 instruct vsqrt4F(vecX dst, vecX src)
17763 %{
17764   predicate(n->as_Vector()->length() == 4);
17765   match(Set dst (SqrtVF src));
17766   format %{ "fsqrt  $dst, $src\t# vector (4F)" %}
17767   ins_encode %{
17768     __ fsqrt(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src$$reg));
17769   %}
17770   ins_pipe(vsqrt_fp128);
17771 %}
17772 
17773 instruct vsqrt2D(vecX dst, vecX src)
17774 %{
17775   predicate(n->as_Vector()->length() == 2);
17776   match(Set dst (SqrtVD src));
17777   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
17778   ins_encode %{
17779     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
17780              as_FloatRegister($src$$reg));
17781   %}
17782   ins_pipe(vsqrt_fp128);
17783 %}
17784 
17785 // --------------------------------- ABS --------------------------------------
17786 
17787 instruct vabs8B(vecD dst, vecD src)
17788 %{
17789   predicate(n->as_Vector()->length() == 4 ||
17790             n->as_Vector()->length() == 8);
17791   match(Set dst (AbsVB src));
17792   ins_cost(INSN_COST);
17793   format %{ "abs  $dst, $src\t# vector (8B)" %}
17794   ins_encode %{
17795     __ absr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($src$$reg));
17796   %}
17797   ins_pipe(vlogical64);
17798 %}
17799 
17800 instruct vabs16B(vecX dst, vecX src)
17801 %{
17802   predicate(n->as_Vector()->length() == 16);
17803   match(Set dst (AbsVB src));
17804   ins_cost(INSN_COST);
17805   format %{ "abs  $dst, $src\t# vector (16B)" %}
17806   ins_encode %{
17807     __ absr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($src$$reg));
17808   %}
17809   ins_pipe(vlogical128);
17810 %}
17811 
17812 instruct vabs4S(vecD dst, vecD src)
17813 %{
17814   predicate(n->as_Vector()->length() == 4);
17815   match(Set dst (AbsVS src));
17816   ins_cost(INSN_COST);
17817   format %{ "abs  $dst, $src\t# vector (4H)" %}
17818   ins_encode %{
17819     __ absr(as_FloatRegister($dst$$reg), __ T4H, as_FloatRegister($src$$reg));
17820   %}
17821   ins_pipe(vlogical64);
17822 %}
17823 
17824 instruct vabs8S(vecX dst, vecX src)
17825 %{
17826   predicate(n->as_Vector()->length() == 8);
17827   match(Set dst (AbsVS src));
17828   ins_cost(INSN_COST);
17829   format %{ "abs  $dst, $src\t# vector (8H)" %}
17830   ins_encode %{
17831     __ absr(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg));
17832   %}
17833   ins_pipe(vlogical128);
17834 %}
17835 
17836 instruct vabs2I(vecD dst, vecD src)
17837 %{
17838   predicate(n->as_Vector()->length() == 2);
17839   match(Set dst (AbsVI src));
17840   ins_cost(INSN_COST);
17841   format %{ "abs  $dst, $src\t# vector (2S)" %}
17842   ins_encode %{
17843     __ absr(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($src$$reg));
17844   %}
17845   ins_pipe(vlogical64);
17846 %}
17847 
17848 instruct vabs4I(vecX dst, vecX src)
17849 %{
17850   predicate(n->as_Vector()->length() == 4);
17851   match(Set dst (AbsVI src));
17852   ins_cost(INSN_COST);
17853   format %{ "abs  $dst, $src\t# vector (4S)" %}
17854   ins_encode %{
17855     __ absr(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src$$reg));
17856   %}
17857   ins_pipe(vlogical128);
17858 %}
17859 
17860 instruct vabs2L(vecX dst, vecX src)
17861 %{
17862   predicate(n->as_Vector()->length() == 2);
17863   match(Set dst (AbsVL src));
17864   ins_cost(INSN_COST);
17865   format %{ "abs  $dst, $src\t# vector (2D)" %}
17866   ins_encode %{
17867     __ absr(as_FloatRegister($dst$$reg), __ T2D, as_FloatRegister($src$$reg));
17868   %}
17869   ins_pipe(vlogical128);
17870 %}
17871 
17872 instruct vabs2F(vecD dst, vecD src)
17873 %{
17874   predicate(n->as_Vector()->length() == 2);
17875   match(Set dst (AbsVF src));
17876   ins_cost(INSN_COST * 3);
17877   format %{ "fabs  $dst,$src\t# vector (2S)" %}
17878   ins_encode %{
17879     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
17880             as_FloatRegister($src$$reg));
17881   %}
17882   ins_pipe(vunop_fp64);
17883 %}
17884 
17885 instruct vabs4F(vecX dst, vecX src)
17886 %{
17887   predicate(n->as_Vector()->length() == 4);
17888   match(Set dst (AbsVF src));
17889   ins_cost(INSN_COST * 3);
17890   format %{ "fabs  $dst,$src\t# vector (4S)" %}
17891   ins_encode %{
17892     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
17893             as_FloatRegister($src$$reg));
17894   %}
17895   ins_pipe(vunop_fp128);
17896 %}
17897 
17898 instruct vabs2D(vecX dst, vecX src)
17899 %{
17900   predicate(n->as_Vector()->length() == 2);
17901   match(Set dst (AbsVD src));
17902   ins_cost(INSN_COST * 3);
17903   format %{ "fabs  $dst,$src\t# vector (2D)" %}
17904   ins_encode %{
17905     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
17906             as_FloatRegister($src$$reg));
17907   %}
17908   ins_pipe(vunop_fp128);
17909 %}
17910 
17911 // --------------------------------- NEG --------------------------------------
17912 
17913 instruct vneg2F(vecD dst, vecD src)
17914 %{
17915   predicate(n->as_Vector()->length() == 2);
17916   match(Set dst (NegVF src));
17917   ins_cost(INSN_COST * 3);
17918   format %{ "fneg  $dst,$src\t# vector (2S)" %}
17919   ins_encode %{
17920     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
17921             as_FloatRegister($src$$reg));
17922   %}
17923   ins_pipe(vunop_fp64);
17924 %}
17925 
17926 instruct vneg4F(vecX dst, vecX src)
17927 %{
17928   predicate(n->as_Vector()->length() == 4);
17929   match(Set dst (NegVF src));
17930   ins_cost(INSN_COST * 3);
17931   format %{ "fneg  $dst,$src\t# vector (4S)" %}
17932   ins_encode %{
17933     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
17934             as_FloatRegister($src$$reg));
17935   %}
17936   ins_pipe(vunop_fp128);
17937 %}
17938 
17939 instruct vneg2D(vecX dst, vecX src)
17940 %{
17941   predicate(n->as_Vector()->length() == 2);
17942   match(Set dst (NegVD src));
17943   ins_cost(INSN_COST * 3);
17944   format %{ "fneg  $dst,$src\t# vector (2D)" %}
17945   ins_encode %{
17946     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
17947             as_FloatRegister($src$$reg));
17948   %}
17949   ins_pipe(vunop_fp128);
17950 %}
17951 
17952 // --------------------------------- AND --------------------------------------
17953 
17954 instruct vand8B(vecD dst, vecD src1, vecD src2)
17955 %{
17956   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17957             n->as_Vector()->length_in_bytes() == 8);
17958   match(Set dst (AndV src1 src2));
17959   ins_cost(INSN_COST);
17960   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
17961   ins_encode %{
17962     __ andr(as_FloatRegister($dst$$reg), __ T8B,
17963             as_FloatRegister($src1$$reg),
17964             as_FloatRegister($src2$$reg));
17965   %}
17966   ins_pipe(vlogical64);
17967 %}
17968 
17969 instruct vand16B(vecX dst, vecX src1, vecX src2)
17970 %{
17971   predicate(n->as_Vector()->length_in_bytes() == 16);
17972   match(Set dst (AndV src1 src2));
17973   ins_cost(INSN_COST);
17974   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
17975   ins_encode %{
17976     __ andr(as_FloatRegister($dst$$reg), __ T16B,
17977             as_FloatRegister($src1$$reg),
17978             as_FloatRegister($src2$$reg));
17979   %}
17980   ins_pipe(vlogical128);
17981 %}
17982 
17983 // --------------------------------- OR ---------------------------------------
17984 
17985 instruct vor8B(vecD dst, vecD src1, vecD src2)
17986 %{
17987   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17988             n->as_Vector()->length_in_bytes() == 8);
17989   match(Set dst (OrV src1 src2));
17990   ins_cost(INSN_COST);
17991   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
17992   ins_encode %{
17993     __ orr(as_FloatRegister($dst$$reg), __ T8B,
17994             as_FloatRegister($src1$$reg),
17995             as_FloatRegister($src2$$reg));
17996   %}
17997   ins_pipe(vlogical64);
17998 %}
17999 
18000 instruct vor16B(vecX dst, vecX src1, vecX src2)
18001 %{
18002   predicate(n->as_Vector()->length_in_bytes() == 16);
18003   match(Set dst (OrV src1 src2));
18004   ins_cost(INSN_COST);
18005   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
18006   ins_encode %{
18007     __ orr(as_FloatRegister($dst$$reg), __ T16B,
18008             as_FloatRegister($src1$$reg),
18009             as_FloatRegister($src2$$reg));
18010   %}
18011   ins_pipe(vlogical128);
18012 %}
18013 
18014 // --------------------------------- XOR --------------------------------------
18015 
18016 instruct vxor8B(vecD dst, vecD src1, vecD src2)
18017 %{
18018   predicate(n->as_Vector()->length_in_bytes() == 4 ||
18019             n->as_Vector()->length_in_bytes() == 8);
18020   match(Set dst (XorV src1 src2));
18021   ins_cost(INSN_COST);
18022   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
18023   ins_encode %{
18024     __ eor(as_FloatRegister($dst$$reg), __ T8B,
18025             as_FloatRegister($src1$$reg),
18026             as_FloatRegister($src2$$reg));
18027   %}
18028   ins_pipe(vlogical64);
18029 %}
18030 
18031 instruct vxor16B(vecX dst, vecX src1, vecX src2)
18032 %{
18033   predicate(n->as_Vector()->length_in_bytes() == 16);
18034   match(Set dst (XorV src1 src2));
18035   ins_cost(INSN_COST);
18036   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
18037   ins_encode %{
18038     __ eor(as_FloatRegister($dst$$reg), __ T16B,
18039             as_FloatRegister($src1$$reg),
18040             as_FloatRegister($src2$$reg));
18041   %}
18042   ins_pipe(vlogical128);
18043 %}
18044 
18045 // ------------------------------ Shift ---------------------------------------
18046 instruct vshiftcnt8B(vecD dst, iRegIorL2I cnt) %{
18047   predicate(n->as_Vector()->length_in_bytes() == 8);
18048   match(Set dst (LShiftCntV cnt));
18049   match(Set dst (RShiftCntV cnt));
18050   format %{ "dup  $dst, $cnt\t# shift count vector (8B)" %}
18051   ins_encode %{
18052     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($cnt$$reg));
18053   %}
18054   ins_pipe(vdup_reg_reg64);
18055 %}
18056 
18057 instruct vshiftcnt16B(vecX dst, iRegIorL2I cnt) %{
18058   predicate(n->as_Vector()->length_in_bytes() == 16);
18059   match(Set dst (LShiftCntV cnt));
18060   match(Set dst (RShiftCntV cnt));
18061   format %{ "dup  $dst, $cnt\t# shift count vector (16B)" %}
18062   ins_encode %{
18063     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
18064   %}
18065   ins_pipe(vdup_reg_reg128);
18066 %}
18067 
18068 instruct vsll8B(vecD dst, vecD src, vecD shift) %{
18069   predicate(n->as_Vector()->length() == 4 ||
18070             n->as_Vector()->length() == 8);
18071   match(Set dst (LShiftVB src shift));
18072   ins_cost(INSN_COST);
18073   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
18074   ins_encode %{
18075     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
18076             as_FloatRegister($src$$reg),
18077             as_FloatRegister($shift$$reg));
18078   %}
18079   ins_pipe(vshift64);
18080 %}
18081 
18082 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
18083   predicate(n->as_Vector()->length() == 16);
18084   match(Set dst (LShiftVB src shift));
18085   ins_cost(INSN_COST);
18086   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
18087   ins_encode %{
18088     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
18089             as_FloatRegister($src$$reg),
18090             as_FloatRegister($shift$$reg));
18091   %}
18092   ins_pipe(vshift128);
18093 %}
18094 
18095 // Right shifts with vector shift count on aarch64 SIMD are implemented
18096 // as left shift by negative shift count.
18097 // There are two cases for vector shift count.
18098 //
18099 // Case 1: The vector shift count is from replication.
18100 //        |            |
18101 //    LoadVector  RShiftCntV
18102 //        |       /
18103 //     RShiftVI
18104 // Note: In inner loop, multiple neg instructions are used, which can be
18105 // moved to outer loop and merge into one neg instruction.
18106 //
18107 // Case 2: The vector shift count is from loading.
18108 // This case isn't supported by middle-end now. But it's supported by
18109 // panama/vectorIntrinsics(JEP 338: Vector API).
18110 //        |            |
18111 //    LoadVector  LoadVector
18112 //        |       /
18113 //     RShiftVI
18114 //
18115 
18116 instruct vsra8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
18117   predicate(n->as_Vector()->length() == 4 ||
18118             n->as_Vector()->length() == 8);
18119   match(Set dst (RShiftVB src shift));
18120   ins_cost(INSN_COST);
18121   effect(TEMP tmp);
18122   format %{ "negr  $tmp,$shift\t"
18123             "sshl  $dst,$src,$tmp\t# vector (8B)" %}
18124   ins_encode %{
18125     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
18126             as_FloatRegister($shift$$reg));
18127     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
18128             as_FloatRegister($src$$reg),
18129             as_FloatRegister($tmp$$reg));
18130   %}
18131   ins_pipe(vshift64);
18132 %}
18133 
18134 instruct vsra16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
18135   predicate(n->as_Vector()->length() == 16);
18136   match(Set dst (RShiftVB src shift));
18137   ins_cost(INSN_COST);
18138   effect(TEMP tmp);
18139   format %{ "negr  $tmp,$shift\t"
18140             "sshl  $dst,$src,$tmp\t# vector (16B)" %}
18141   ins_encode %{
18142     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
18143             as_FloatRegister($shift$$reg));
18144     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
18145             as_FloatRegister($src$$reg),
18146             as_FloatRegister($tmp$$reg));
18147   %}
18148   ins_pipe(vshift128);
18149 %}
18150 
18151 instruct vsrl8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
18152   predicate(n->as_Vector()->length() == 4 ||
18153             n->as_Vector()->length() == 8);
18154   match(Set dst (URShiftVB src shift));
18155   ins_cost(INSN_COST);
18156   effect(TEMP tmp);
18157   format %{ "negr  $tmp,$shift\t"
18158             "ushl  $dst,$src,$tmp\t# vector (8B)" %}
18159   ins_encode %{
18160     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
18161             as_FloatRegister($shift$$reg));
18162     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
18163             as_FloatRegister($src$$reg),
18164             as_FloatRegister($tmp$$reg));
18165   %}
18166   ins_pipe(vshift64);
18167 %}
18168 
18169 instruct vsrl16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
18170   predicate(n->as_Vector()->length() == 16);
18171   match(Set dst (URShiftVB src shift));
18172   ins_cost(INSN_COST);
18173   effect(TEMP tmp);
18174   format %{ "negr  $tmp,$shift\t"
18175             "ushl  $dst,$src,$tmp\t# vector (16B)" %}
18176   ins_encode %{
18177     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
18178             as_FloatRegister($shift$$reg));
18179     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
18180             as_FloatRegister($src$$reg),
18181             as_FloatRegister($tmp$$reg));
18182   %}
18183   ins_pipe(vshift128);
18184 %}
18185 
18186 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
18187   predicate(n->as_Vector()->length() == 4 ||
18188             n->as_Vector()->length() == 8);
18189   match(Set dst (LShiftVB src (LShiftCntV shift)));
18190   ins_cost(INSN_COST);
18191   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
18192   ins_encode %{
18193     int sh = (int)$shift$$constant;
18194     if (sh >= 8) {
18195       __ eor(as_FloatRegister($dst$$reg), __ T8B,
18196              as_FloatRegister($src$$reg),
18197              as_FloatRegister($src$$reg));
18198     } else {
18199       __ shl(as_FloatRegister($dst$$reg), __ T8B,
18200              as_FloatRegister($src$$reg), sh);
18201     }
18202   %}
18203   ins_pipe(vshift64_imm);
18204 %}
18205 
18206 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
18207   predicate(n->as_Vector()->length() == 16);
18208   match(Set dst (LShiftVB src (LShiftCntV shift)));
18209   ins_cost(INSN_COST);
18210   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
18211   ins_encode %{
18212     int sh = (int)$shift$$constant;
18213     if (sh >= 8) {
18214       __ eor(as_FloatRegister($dst$$reg), __ T16B,
18215              as_FloatRegister($src$$reg),
18216              as_FloatRegister($src$$reg));
18217     } else {
18218       __ shl(as_FloatRegister($dst$$reg), __ T16B,
18219              as_FloatRegister($src$$reg), sh);
18220     }
18221   %}
18222   ins_pipe(vshift128_imm);
18223 %}
18224 
18225 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
18226   predicate(n->as_Vector()->length() == 4 ||
18227             n->as_Vector()->length() == 8);
18228   match(Set dst (RShiftVB src (RShiftCntV shift)));
18229   ins_cost(INSN_COST);
18230   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
18231   ins_encode %{
18232     int sh = (int)$shift$$constant;
18233     if (sh >= 8) sh = 7;
18234     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
18235            as_FloatRegister($src$$reg), sh);
18236   %}
18237   ins_pipe(vshift64_imm);
18238 %}
18239 
18240 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
18241   predicate(n->as_Vector()->length() == 16);
18242   match(Set dst (RShiftVB src (RShiftCntV shift)));
18243   ins_cost(INSN_COST);
18244   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
18245   ins_encode %{
18246     int sh = (int)$shift$$constant;
18247     if (sh >= 8) sh = 7;
18248     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
18249            as_FloatRegister($src$$reg), sh);
18250   %}
18251   ins_pipe(vshift128_imm);
18252 %}
18253 
18254 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
18255   predicate(n->as_Vector()->length() == 4 ||
18256             n->as_Vector()->length() == 8);
18257   match(Set dst (URShiftVB src (RShiftCntV shift)));
18258   ins_cost(INSN_COST);
18259   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
18260   ins_encode %{
18261     int sh = (int)$shift$$constant;
18262     if (sh >= 8) {
18263       __ eor(as_FloatRegister($dst$$reg), __ T8B,
18264              as_FloatRegister($src$$reg),
18265              as_FloatRegister($src$$reg));
18266     } else {
18267       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
18268              as_FloatRegister($src$$reg), sh);
18269     }
18270   %}
18271   ins_pipe(vshift64_imm);
18272 %}
18273 
18274 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
18275   predicate(n->as_Vector()->length() == 16);
18276   match(Set dst (URShiftVB src (RShiftCntV shift)));
18277   ins_cost(INSN_COST);
18278   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
18279   ins_encode %{
18280     int sh = (int)$shift$$constant;
18281     if (sh >= 8) {
18282       __ eor(as_FloatRegister($dst$$reg), __ T16B,
18283              as_FloatRegister($src$$reg),
18284              as_FloatRegister($src$$reg));
18285     } else {
18286       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
18287              as_FloatRegister($src$$reg), sh);
18288     }
18289   %}
18290   ins_pipe(vshift128_imm);
18291 %}
18292 
18293 instruct vsll4S(vecD dst, vecD src, vecD shift) %{
18294   predicate(n->as_Vector()->length() == 2 ||
18295             n->as_Vector()->length() == 4);
18296   match(Set dst (LShiftVS src shift));
18297   ins_cost(INSN_COST);
18298   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
18299   ins_encode %{
18300     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
18301             as_FloatRegister($src$$reg),
18302             as_FloatRegister($shift$$reg));
18303   %}
18304   ins_pipe(vshift64);
18305 %}
18306 
18307 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
18308   predicate(n->as_Vector()->length() == 8);
18309   match(Set dst (LShiftVS src shift));
18310   ins_cost(INSN_COST);
18311   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
18312   ins_encode %{
18313     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
18314             as_FloatRegister($src$$reg),
18315             as_FloatRegister($shift$$reg));
18316   %}
18317   ins_pipe(vshift128);
18318 %}
18319 
18320 instruct vsra4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
18321   predicate(n->as_Vector()->length() == 2 ||
18322             n->as_Vector()->length() == 4);
18323   match(Set dst (RShiftVS src shift));
18324   ins_cost(INSN_COST);
18325   effect(TEMP tmp);
18326   format %{ "negr  $tmp,$shift\t"
18327             "sshl  $dst,$src,$tmp\t# vector (4H)" %}
18328   ins_encode %{
18329     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
18330             as_FloatRegister($shift$$reg));
18331     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
18332             as_FloatRegister($src$$reg),
18333             as_FloatRegister($tmp$$reg));
18334   %}
18335   ins_pipe(vshift64);
18336 %}
18337 
18338 instruct vsra8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
18339   predicate(n->as_Vector()->length() == 8);
18340   match(Set dst (RShiftVS src shift));
18341   ins_cost(INSN_COST);
18342   effect(TEMP tmp);
18343   format %{ "negr  $tmp,$shift\t"
18344             "sshl  $dst,$src,$tmp\t# vector (8H)" %}
18345   ins_encode %{
18346     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
18347             as_FloatRegister($shift$$reg));
18348     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
18349             as_FloatRegister($src$$reg),
18350             as_FloatRegister($tmp$$reg));
18351   %}
18352   ins_pipe(vshift128);
18353 %}
18354 
18355 instruct vsrl4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
18356   predicate(n->as_Vector()->length() == 2 ||
18357             n->as_Vector()->length() == 4);
18358   match(Set dst (URShiftVS src shift));
18359   ins_cost(INSN_COST);
18360   effect(TEMP tmp);
18361   format %{ "negr  $tmp,$shift\t"
18362             "ushl  $dst,$src,$tmp\t# vector (4H)" %}
18363   ins_encode %{
18364     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
18365             as_FloatRegister($shift$$reg));
18366     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
18367             as_FloatRegister($src$$reg),
18368             as_FloatRegister($tmp$$reg));
18369   %}
18370   ins_pipe(vshift64);
18371 %}
18372 
18373 instruct vsrl8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
18374   predicate(n->as_Vector()->length() == 8);
18375   match(Set dst (URShiftVS src shift));
18376   ins_cost(INSN_COST);
18377   effect(TEMP tmp);
18378   format %{ "negr  $tmp,$shift\t"
18379             "ushl  $dst,$src,$tmp\t# vector (8H)" %}
18380   ins_encode %{
18381     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
18382             as_FloatRegister($shift$$reg));
18383     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
18384             as_FloatRegister($src$$reg),
18385             as_FloatRegister($tmp$$reg));
18386   %}
18387   ins_pipe(vshift128);
18388 %}
18389 
18390 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
18391   predicate(n->as_Vector()->length() == 2 ||
18392             n->as_Vector()->length() == 4);
18393   match(Set dst (LShiftVS src (LShiftCntV shift)));
18394   ins_cost(INSN_COST);
18395   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
18396   ins_encode %{
18397     int sh = (int)$shift$$constant;
18398     if (sh >= 16) {
18399       __ eor(as_FloatRegister($dst$$reg), __ T8B,
18400              as_FloatRegister($src$$reg),
18401              as_FloatRegister($src$$reg));
18402     } else {
18403       __ shl(as_FloatRegister($dst$$reg), __ T4H,
18404              as_FloatRegister($src$$reg), sh);
18405     }
18406   %}
18407   ins_pipe(vshift64_imm);
18408 %}
18409 
18410 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
18411   predicate(n->as_Vector()->length() == 8);
18412   match(Set dst (LShiftVS src (LShiftCntV shift)));
18413   ins_cost(INSN_COST);
18414   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
18415   ins_encode %{
18416     int sh = (int)$shift$$constant;
18417     if (sh >= 16) {
18418       __ eor(as_FloatRegister($dst$$reg), __ T16B,
18419              as_FloatRegister($src$$reg),
18420              as_FloatRegister($src$$reg));
18421     } else {
18422       __ shl(as_FloatRegister($dst$$reg), __ T8H,
18423              as_FloatRegister($src$$reg), sh);
18424     }
18425   %}
18426   ins_pipe(vshift128_imm);
18427 %}
18428 
18429 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
18430   predicate(n->as_Vector()->length() == 2 ||
18431             n->as_Vector()->length() == 4);
18432   match(Set dst (RShiftVS src (RShiftCntV shift)));
18433   ins_cost(INSN_COST);
18434   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
18435   ins_encode %{
18436     int sh = (int)$shift$$constant;
18437     if (sh >= 16) sh = 15;
18438     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
18439            as_FloatRegister($src$$reg), sh);
18440   %}
18441   ins_pipe(vshift64_imm);
18442 %}
18443 
18444 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
18445   predicate(n->as_Vector()->length() == 8);
18446   match(Set dst (RShiftVS src (RShiftCntV shift)));
18447   ins_cost(INSN_COST);
18448   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
18449   ins_encode %{
18450     int sh = (int)$shift$$constant;
18451     if (sh >= 16) sh = 15;
18452     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
18453            as_FloatRegister($src$$reg), sh);
18454   %}
18455   ins_pipe(vshift128_imm);
18456 %}
18457 
18458 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
18459   predicate(n->as_Vector()->length() == 2 ||
18460             n->as_Vector()->length() == 4);
18461   match(Set dst (URShiftVS src (RShiftCntV shift)));
18462   ins_cost(INSN_COST);
18463   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
18464   ins_encode %{
18465     int sh = (int)$shift$$constant;
18466     if (sh >= 16) {
18467       __ eor(as_FloatRegister($dst$$reg), __ T8B,
18468              as_FloatRegister($src$$reg),
18469              as_FloatRegister($src$$reg));
18470     } else {
18471       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
18472              as_FloatRegister($src$$reg), sh);
18473     }
18474   %}
18475   ins_pipe(vshift64_imm);
18476 %}
18477 
18478 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
18479   predicate(n->as_Vector()->length() == 8);
18480   match(Set dst (URShiftVS src (RShiftCntV shift)));
18481   ins_cost(INSN_COST);
18482   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
18483   ins_encode %{
18484     int sh = (int)$shift$$constant;
18485     if (sh >= 16) {
18486       __ eor(as_FloatRegister($dst$$reg), __ T16B,
18487              as_FloatRegister($src$$reg),
18488              as_FloatRegister($src$$reg));
18489     } else {
18490       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
18491              as_FloatRegister($src$$reg), sh);
18492     }
18493   %}
18494   ins_pipe(vshift128_imm);
18495 %}
18496 
18497 instruct vsll2I(vecD dst, vecD src, vecD shift) %{
18498   predicate(n->as_Vector()->length() == 2);
18499   match(Set dst (LShiftVI src shift));
18500   ins_cost(INSN_COST);
18501   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
18502   ins_encode %{
18503     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
18504             as_FloatRegister($src$$reg),
18505             as_FloatRegister($shift$$reg));
18506   %}
18507   ins_pipe(vshift64);
18508 %}
18509 
18510 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
18511   predicate(n->as_Vector()->length() == 4);
18512   match(Set dst (LShiftVI src shift));
18513   ins_cost(INSN_COST);
18514   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
18515   ins_encode %{
18516     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
18517             as_FloatRegister($src$$reg),
18518             as_FloatRegister($shift$$reg));
18519   %}
18520   ins_pipe(vshift128);
18521 %}
18522 
18523 instruct vsra2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
18524   predicate(n->as_Vector()->length() == 2);
18525   match(Set dst (RShiftVI src shift));
18526   ins_cost(INSN_COST);
18527   effect(TEMP tmp);
18528   format %{ "negr  $tmp,$shift\t"
18529             "sshl  $dst,$src,$tmp\t# vector (2S)" %}
18530   ins_encode %{
18531     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
18532             as_FloatRegister($shift$$reg));
18533     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
18534             as_FloatRegister($src$$reg),
18535             as_FloatRegister($tmp$$reg));
18536   %}
18537   ins_pipe(vshift64);
18538 %}
18539 
18540 instruct vsra4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
18541   predicate(n->as_Vector()->length() == 4);
18542   match(Set dst (RShiftVI src shift));
18543   ins_cost(INSN_COST);
18544   effect(TEMP tmp);
18545   format %{ "negr  $tmp,$shift\t"
18546             "sshl  $dst,$src,$tmp\t# vector (4S)" %}
18547   ins_encode %{
18548     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
18549             as_FloatRegister($shift$$reg));
18550     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
18551             as_FloatRegister($src$$reg),
18552             as_FloatRegister($tmp$$reg));
18553   %}
18554   ins_pipe(vshift128);
18555 %}
18556 
18557 instruct vsrl2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
18558   predicate(n->as_Vector()->length() == 2);
18559   match(Set dst (URShiftVI src shift));
18560   ins_cost(INSN_COST);
18561   effect(TEMP tmp);
18562   format %{ "negr  $tmp,$shift\t"
18563             "ushl  $dst,$src,$tmp\t# vector (2S)" %}
18564   ins_encode %{
18565     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
18566             as_FloatRegister($shift$$reg));
18567     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
18568             as_FloatRegister($src$$reg),
18569             as_FloatRegister($tmp$$reg));
18570   %}
18571   ins_pipe(vshift64);
18572 %}
18573 
18574 instruct vsrl4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
18575   predicate(n->as_Vector()->length() == 4);
18576   match(Set dst (URShiftVI src shift));
18577   ins_cost(INSN_COST);
18578   effect(TEMP tmp);
18579   format %{ "negr  $tmp,$shift\t"
18580             "ushl  $dst,$src,$tmp\t# vector (4S)" %}
18581   ins_encode %{
18582     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
18583             as_FloatRegister($shift$$reg));
18584     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
18585             as_FloatRegister($src$$reg),
18586             as_FloatRegister($tmp$$reg));
18587   %}
18588   ins_pipe(vshift128);
18589 %}
18590 
18591 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
18592   predicate(n->as_Vector()->length() == 2);
18593   match(Set dst (LShiftVI src (LShiftCntV shift)));
18594   ins_cost(INSN_COST);
18595   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
18596   ins_encode %{
18597     __ shl(as_FloatRegister($dst$$reg), __ T2S,
18598            as_FloatRegister($src$$reg),
18599            (int)$shift$$constant);
18600   %}
18601   ins_pipe(vshift64_imm);
18602 %}
18603 
18604 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
18605   predicate(n->as_Vector()->length() == 4);
18606   match(Set dst (LShiftVI src (LShiftCntV shift)));
18607   ins_cost(INSN_COST);
18608   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
18609   ins_encode %{
18610     __ shl(as_FloatRegister($dst$$reg), __ T4S,
18611            as_FloatRegister($src$$reg),
18612            (int)$shift$$constant);
18613   %}
18614   ins_pipe(vshift128_imm);
18615 %}
18616 
18617 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
18618   predicate(n->as_Vector()->length() == 2);
18619   match(Set dst (RShiftVI src (RShiftCntV shift)));
18620   ins_cost(INSN_COST);
18621   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
18622   ins_encode %{
18623     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
18624             as_FloatRegister($src$$reg),
18625             (int)$shift$$constant);
18626   %}
18627   ins_pipe(vshift64_imm);
18628 %}
18629 
18630 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
18631   predicate(n->as_Vector()->length() == 4);
18632   match(Set dst (RShiftVI src (RShiftCntV shift)));
18633   ins_cost(INSN_COST);
18634   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
18635   ins_encode %{
18636     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
18637             as_FloatRegister($src$$reg),
18638             (int)$shift$$constant);
18639   %}
18640   ins_pipe(vshift128_imm);
18641 %}
18642 
18643 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
18644   predicate(n->as_Vector()->length() == 2);
18645   match(Set dst (URShiftVI src (RShiftCntV shift)));
18646   ins_cost(INSN_COST);
18647   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
18648   ins_encode %{
18649     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
18650             as_FloatRegister($src$$reg),
18651             (int)$shift$$constant);
18652   %}
18653   ins_pipe(vshift64_imm);
18654 %}
18655 
18656 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
18657   predicate(n->as_Vector()->length() == 4);
18658   match(Set dst (URShiftVI src (RShiftCntV shift)));
18659   ins_cost(INSN_COST);
18660   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
18661   ins_encode %{
18662     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
18663             as_FloatRegister($src$$reg),
18664             (int)$shift$$constant);
18665   %}
18666   ins_pipe(vshift128_imm);
18667 %}
18668 
18669 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
18670   predicate(n->as_Vector()->length() == 2);
18671   match(Set dst (LShiftVL src shift));
18672   ins_cost(INSN_COST);
18673   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
18674   ins_encode %{
18675     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
18676             as_FloatRegister($src$$reg),
18677             as_FloatRegister($shift$$reg));
18678   %}
18679   ins_pipe(vshift128);
18680 %}
18681 
18682 instruct vsra2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
18683   predicate(n->as_Vector()->length() == 2);
18684   match(Set dst (RShiftVL src shift));
18685   ins_cost(INSN_COST);
18686   effect(TEMP tmp);
18687   format %{ "negr  $tmp,$shift\t"
18688             "sshl  $dst,$src,$tmp\t# vector (2D)" %}
18689   ins_encode %{
18690     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
18691             as_FloatRegister($shift$$reg));
18692     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
18693             as_FloatRegister($src$$reg),
18694             as_FloatRegister($tmp$$reg));
18695   %}
18696   ins_pipe(vshift128);
18697 %}
18698 
18699 instruct vsrl2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
18700   predicate(n->as_Vector()->length() == 2);
18701   match(Set dst (URShiftVL src shift));
18702   ins_cost(INSN_COST);
18703   effect(TEMP tmp);
18704   format %{ "negr  $tmp,$shift\t"
18705             "ushl  $dst,$src,$tmp\t# vector (2D)" %}
18706   ins_encode %{
18707     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
18708             as_FloatRegister($shift$$reg));
18709     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
18710             as_FloatRegister($src$$reg),
18711             as_FloatRegister($tmp$$reg));
18712   %}
18713   ins_pipe(vshift128);
18714 %}
18715 
18716 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
18717   predicate(n->as_Vector()->length() == 2);
18718   match(Set dst (LShiftVL src (LShiftCntV shift)));
18719   ins_cost(INSN_COST);
18720   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
18721   ins_encode %{
18722     __ shl(as_FloatRegister($dst$$reg), __ T2D,
18723            as_FloatRegister($src$$reg),
18724            (int)$shift$$constant);
18725   %}
18726   ins_pipe(vshift128_imm);
18727 %}
18728 
18729 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
18730   predicate(n->as_Vector()->length() == 2);
18731   match(Set dst (RShiftVL src (RShiftCntV shift)));
18732   ins_cost(INSN_COST);
18733   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
18734   ins_encode %{
18735     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
18736             as_FloatRegister($src$$reg),
18737             (int)$shift$$constant);
18738   %}
18739   ins_pipe(vshift128_imm);
18740 %}
18741 
18742 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
18743   predicate(n->as_Vector()->length() == 2);
18744   match(Set dst (URShiftVL src (RShiftCntV shift)));
18745   ins_cost(INSN_COST);
18746   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
18747   ins_encode %{
18748     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
18749             as_FloatRegister($src$$reg),
18750             (int)$shift$$constant);
18751   %}
18752   ins_pipe(vshift128_imm);
18753 %}
18754 
18755 instruct vmax2F(vecD dst, vecD src1, vecD src2)
18756 %{
18757   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18758   match(Set dst (MaxV src1 src2));
18759   ins_cost(INSN_COST);
18760   format %{ "fmax  $dst,$src1,$src2\t# vector (2F)" %}
18761   ins_encode %{
18762     __ fmax(as_FloatRegister($dst$$reg), __ T2S,
18763             as_FloatRegister($src1$$reg),
18764             as_FloatRegister($src2$$reg));
18765   %}
18766   ins_pipe(vdop_fp64);
18767 %}
18768 
18769 instruct vmax4F(vecX dst, vecX src1, vecX src2)
18770 %{
18771   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18772   match(Set dst (MaxV src1 src2));
18773   ins_cost(INSN_COST);
18774   format %{ "fmax  $dst,$src1,$src2\t# vector (4S)" %}
18775   ins_encode %{
18776     __ fmax(as_FloatRegister($dst$$reg), __ T4S,
18777             as_FloatRegister($src1$$reg),
18778             as_FloatRegister($src2$$reg));
18779   %}
18780   ins_pipe(vdop_fp128);
18781 %}
18782 
18783 instruct vmax2D(vecX dst, vecX src1, vecX src2)
18784 %{
18785   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
18786   match(Set dst (MaxV src1 src2));
18787   ins_cost(INSN_COST);
18788   format %{ "fmax  $dst,$src1,$src2\t# vector (2D)" %}
18789   ins_encode %{
18790     __ fmax(as_FloatRegister($dst$$reg), __ T2D,
18791             as_FloatRegister($src1$$reg),
18792             as_FloatRegister($src2$$reg));
18793   %}
18794   ins_pipe(vdop_fp128);
18795 %}
18796 
18797 instruct vmin2F(vecD dst, vecD src1, vecD src2)
18798 %{
18799   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18800   match(Set dst (MinV src1 src2));
18801   ins_cost(INSN_COST);
18802   format %{ "fmin  $dst,$src1,$src2\t# vector (2F)" %}
18803   ins_encode %{
18804     __ fmin(as_FloatRegister($dst$$reg), __ T2S,
18805             as_FloatRegister($src1$$reg),
18806             as_FloatRegister($src2$$reg));
18807   %}
18808   ins_pipe(vdop_fp64);
18809 %}
18810 
18811 instruct vmin4F(vecX dst, vecX src1, vecX src2)
18812 %{
18813   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18814   match(Set dst (MinV src1 src2));
18815   ins_cost(INSN_COST);
18816   format %{ "fmin  $dst,$src1,$src2\t# vector (4S)" %}
18817   ins_encode %{
18818     __ fmin(as_FloatRegister($dst$$reg), __ T4S,
18819             as_FloatRegister($src1$$reg),
18820             as_FloatRegister($src2$$reg));
18821   %}
18822   ins_pipe(vdop_fp128);
18823 %}
18824 
18825 instruct vmin2D(vecX dst, vecX src1, vecX src2)
18826 %{
18827   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
18828   match(Set dst (MinV src1 src2));
18829   ins_cost(INSN_COST);
18830   format %{ "fmin  $dst,$src1,$src2\t# vector (2D)" %}
18831   ins_encode %{
18832     __ fmin(as_FloatRegister($dst$$reg), __ T2D,
18833             as_FloatRegister($src1$$reg),
18834             as_FloatRegister($src2$$reg));
18835   %}
18836   ins_pipe(vdop_fp128);
18837 %}
18838 
18839 instruct vround2D_reg(vecX dst, vecX src, immI rmode) %{
18840   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
18841   match(Set dst (RoundDoubleModeV src rmode));
18842   format %{ "frint  $dst, $src, $rmode" %}
18843   ins_encode %{
18844     switch ($rmode$$constant) {
18845       case RoundDoubleModeNode::rmode_rint:
18846         __ frintn(as_FloatRegister($dst$$reg), __ T2D,
18847                   as_FloatRegister($src$$reg));
18848         break;
18849       case RoundDoubleModeNode::rmode_floor:
18850         __ frintm(as_FloatRegister($dst$$reg), __ T2D,
18851                   as_FloatRegister($src$$reg));
18852         break;
18853       case RoundDoubleModeNode::rmode_ceil:
18854         __ frintp(as_FloatRegister($dst$$reg), __ T2D,
18855                   as_FloatRegister($src$$reg));
18856         break;
18857     }
18858   %}
18859   ins_pipe(vdop_fp128);
18860 %}
18861 
18862 instruct vpopcount4I(vecX dst, vecX src) %{
18863   predicate(UsePopCountInstruction && n->as_Vector()->length() == 4);
18864   match(Set dst (PopCountVI src));
18865   format %{
18866     "cnt     $dst, $src\t# vector (16B)\n\t"
18867     "uaddlp  $dst, $dst\t# vector (16B)\n\t"
18868     "uaddlp  $dst, $dst\t# vector (8H)"
18869   %}
18870   ins_encode %{
18871      __ cnt(as_FloatRegister($dst$$reg), __ T16B,
18872             as_FloatRegister($src$$reg));
18873      __ uaddlp(as_FloatRegister($dst$$reg), __ T16B,
18874                as_FloatRegister($dst$$reg));
18875      __ uaddlp(as_FloatRegister($dst$$reg), __ T8H,
18876                as_FloatRegister($dst$$reg));
18877   %}
18878   ins_pipe(pipe_class_default);
18879 %}
18880 
18881 instruct vpopcount2I(vecD dst, vecD src) %{
18882   predicate(UsePopCountInstruction && n->as_Vector()->length() == 2);
18883   match(Set dst (PopCountVI src));
18884   format %{
18885     "cnt     $dst, $src\t# vector (8B)\n\t"
18886     "uaddlp  $dst, $dst\t# vector (8B)\n\t"
18887     "uaddlp  $dst, $dst\t# vector (4H)"
18888   %}
18889   ins_encode %{
18890      __ cnt(as_FloatRegister($dst$$reg), __ T8B,
18891             as_FloatRegister($src$$reg));
18892      __ uaddlp(as_FloatRegister($dst$$reg), __ T8B,
18893                as_FloatRegister($dst$$reg));
18894      __ uaddlp(as_FloatRegister($dst$$reg), __ T4H,
18895                as_FloatRegister($dst$$reg));
18896   %}
18897   ins_pipe(pipe_class_default);
18898 %}
18899 
18900 //----------PEEPHOLE RULES-----------------------------------------------------
18901 // These must follow all instruction definitions as they use the names
18902 // defined in the instructions definitions.
18903 //
18904 // peepmatch ( root_instr_name [preceding_instruction]* );
18905 //
18906 // peepconstraint %{
18907 // (instruction_number.operand_name relational_op instruction_number.operand_name
18908 //  [, ...] );
18909 // // instruction numbers are zero-based using left to right order in peepmatch
18910 //
18911 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
18912 // // provide an instruction_number.operand_name for each operand that appears
18913 // // in the replacement instruction's match rule
18914 //
18915 // ---------VM FLAGS---------------------------------------------------------
18916 //
18917 // All peephole optimizations can be turned off using -XX:-OptoPeephole
18918 //
18919 // Each peephole rule is given an identifying number starting with zero and
18920 // increasing by one in the order seen by the parser.  An individual peephole
18921 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
18922 // on the command-line.
18923 //
18924 // ---------CURRENT LIMITATIONS----------------------------------------------
18925 //
18926 // Only match adjacent instructions in same basic block
18927 // Only equality constraints
18928 // Only constraints between operands, not (0.dest_reg == RAX_enc)
18929 // Only one replacement instruction
18930 //
18931 // ---------EXAMPLE----------------------------------------------------------
18932 //
18933 // // pertinent parts of existing instructions in architecture description
18934 // instruct movI(iRegINoSp dst, iRegI src)
18935 // %{
18936 //   match(Set dst (CopyI src));
18937 // %}
18938 //
18939 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
18940 // %{
18941 //   match(Set dst (AddI dst src));
18942 //   effect(KILL cr);
18943 // %}
18944 //
18945 // // Change (inc mov) to lea
18946 // peephole %{
18947 //   // increment preceeded by register-register move
18948 //   peepmatch ( incI_iReg movI );
18949 //   // require that the destination register of the increment
18950 //   // match the destination register of the move
18951 //   peepconstraint ( 0.dst == 1.dst );
18952 //   // construct a replacement instruction that sets
18953 //   // the destination to ( move's source register + one )
18954 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
18955 // %}
18956 //
18957 
18958 // Implementation no longer uses movX instructions since
18959 // machine-independent system no longer uses CopyX nodes.
18960 //
18961 // peephole
18962 // %{
18963 //   peepmatch (incI_iReg movI);
18964 //   peepconstraint (0.dst == 1.dst);
18965 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18966 // %}
18967 
18968 // peephole
18969 // %{
18970 //   peepmatch (decI_iReg movI);
18971 //   peepconstraint (0.dst == 1.dst);
18972 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18973 // %}
18974 
18975 // peephole
18976 // %{
18977 //   peepmatch (addI_iReg_imm movI);
18978 //   peepconstraint (0.dst == 1.dst);
18979 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18980 // %}
18981 
18982 // peephole
18983 // %{
18984 //   peepmatch (incL_iReg movL);
18985 //   peepconstraint (0.dst == 1.dst);
18986 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
18987 // %}
18988 
18989 // peephole
18990 // %{
18991 //   peepmatch (decL_iReg movL);
18992 //   peepconstraint (0.dst == 1.dst);
18993 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
18994 // %}
18995 
18996 // peephole
18997 // %{
18998 //   peepmatch (addL_iReg_imm movL);
18999 //   peepconstraint (0.dst == 1.dst);
19000 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
19001 // %}
19002 
19003 // peephole
19004 // %{
19005 //   peepmatch (addP_iReg_imm movP);
19006 //   peepconstraint (0.dst == 1.dst);
19007 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
19008 // %}
19009 
19010 // // Change load of spilled value to only a spill
19011 // instruct storeI(memory mem, iRegI src)
19012 // %{
19013 //   match(Set mem (StoreI mem src));
19014 // %}
19015 //
19016 // instruct loadI(iRegINoSp dst, memory mem)
19017 // %{
19018 //   match(Set dst (LoadI mem));
19019 // %}
19020 //
19021 
19022 //----------SMARTSPILL RULES---------------------------------------------------
19023 // These must follow all instruction definitions as they use the names
19024 // defined in the instructions definitions.
19025 
19026 // Local Variables:
19027 // mode: c++
19028 // End: