< prev index next >

src/hotspot/cpu/aarch64/aarch64.ad

Print this page

   52 //
   53 // AS  = Always-Save:   The register allocator assumes that these registers
   54 //                      must be saved before using them upon entry to the
   55 //                      method, & that they must be saved at call sites.
   56 //
   57 // Ideal Register Type is used to determine how to save & restore a
   58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
   59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
   60 //
   61 // The encoding number is the actual bit-pattern placed into the opcodes.
   62 
   63 // We must define the 64 bit int registers in two 32 bit halves, the
   64 // real lower register and a virtual upper half register. upper halves
   65 // are used by the register allocator but are not actually supplied as
   66 // operands to memory ops.
   67 //
   68 // follow the C1 compiler in making registers
   69 //
   70 //   r0-r7,r10-r26 volatile (caller save)
   71 //   r27-r32 system (no save, no allocate)
   72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
   73 //
   74 // as regards Java usage. we don't use any callee save registers
   75 // because this makes it difficult to de-optimise a frame (see comment
   76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
   77 //
   78 
   79 // General Registers
   80 
   81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
   82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
   83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
   84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
   85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
   86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
   87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
   88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
   89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
   90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
   91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
   92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
   93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
   94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
   95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
   96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );




   97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
   98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
   99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
  100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
  101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
  102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
  103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
  104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
  105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
  106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
  107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
  108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
  109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
  110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
  111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
  112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
  113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
  114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
  115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
  116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());

  123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
  124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
  125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
  126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
  127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
  128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
  129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
  130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
  131 reg_def R27     ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
  132 reg_def R27_H   ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()->next());
  133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
  134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
  135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
  136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
  137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
  138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
  139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
  140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
  141 
  142 // ----------------------------
  143 // Float/Double Registers
  144 // ----------------------------
  145 
  146 // Double Registers
  147 
  148 // The rules of ADL require that double registers be defined in pairs.
  149 // Each pair must be two 32-bit values, but not necessarily a pair of
  150 // single float registers. In each pair, ADLC-assigned register numbers
  151 // must be adjacent, with the lower number even. Finally, when the
  152 // CPU stores such a register pair to memory, the word associated with
  153 // the lower ADLC-assigned number must be stored to the lower address.
  154 
  155 // AArch64 has 32 floating-point registers. Each can store a vector of
  156 // single or double precision floating-point values up to 8 * 32
  157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
  158 // use the first float or double element of the vector.
  159 
  160 // for Java use float registers v0-v15 are always save on call whereas
  161 // the platform ABI treats v8-v15 as callee save). float registers
  162 // v16-v31 are SOC as per the platform spec
  163 
  164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
  165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
  166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
  167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
  168 
  169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
  170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
  171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
  172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
  173 
  174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
  175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
  176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
  177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
  178 
  179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
  180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
  181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
  182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
  183 
  184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
  185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
  186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
  187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
  188 
  189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
  190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
  191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
  192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
  193 
  194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
  195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
  196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
  197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
  198 
  199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
  200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
  201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
  202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
  203 
  204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
  205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
  206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
  207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
  208 
  209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
  210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
  211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
  212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
  213 
  214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
  215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
  216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
  217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
  218 
  219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
  220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
  221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
  222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
  223 
  224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
  225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
  226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
  227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
  228 
  229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
  230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
  231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
  232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
  233 
  234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
  235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
  236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
  237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
  238 
  239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
  240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
  241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
  242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
  243 
  244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
  245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
  246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
  247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
  248 
  249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
  250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
  251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
  252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
  253 
  254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
  255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
  256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
  257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
  258 
  259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
  260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
  261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
  262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
  263 
  264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
  265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
  266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
  267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
  268 
  269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
  270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
  271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
  272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
  273 
  274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
  275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
  276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
  277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
  278 
  279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
  280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
  281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
  282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
  283 
  284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
  285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
  286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
  287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
  288 
  289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
  290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
  291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
  292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
  293 
  294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
  295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
  296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
  297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
  298 
  299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
  300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
  301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
  302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
  303 
  304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
  305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
  306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
  307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
  308 
  309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
  310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
  311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
  312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
  313 
  314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
  315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
  316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
  317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
  318 
  319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
  320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
  321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
  322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
































































































































































  323 
  324 // ----------------------------
  325 // Special Registers
  326 // ----------------------------
  327 
  328 // the AArch64 CSPR status flag register is not directly acessible as
  329 // instruction operand. the FPSR status flag register is a system
  330 // register which can be written/read using MSR/MRS but again does not
  331 // appear as an operand (a code identifying the FSPR occurs as an
  332 // immediate value in the instruction).
  333 
  334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
  335 
  336 
  337 // Specify priority of register selection within phases of register
  338 // allocation.  Highest priority is first.  A useful heuristic is to
  339 // give registers a low priority when they are required by machine
  340 // instructions, like EAX and EDX on I486, and choose no-save registers
  341 // before save-on-call, & save-on-call before save-on-entry.  Registers
  342 // which participate in fixed calling sequences should come last.
  343 // Registers which are used as pairs must fall on an even boundary.
  344 
  345 alloc_class chunk0(
  346     // volatiles
  347     R10, R10_H,
  348     R11, R11_H,
  349     R12, R12_H,
  350     R13, R13_H,
  351     R14, R14_H,
  352     R15, R15_H,
  353     R16, R16_H,
  354     R17, R17_H,
  355     R18, R18_H,
  356 

  364     R6, R6_H,
  365     R7, R7_H,
  366 
  367     // non-volatiles
  368     R19, R19_H,
  369     R20, R20_H,
  370     R21, R21_H,
  371     R22, R22_H,
  372     R23, R23_H,
  373     R24, R24_H,
  374     R25, R25_H,
  375     R26, R26_H,
  376 
  377     // non-allocatable registers
  378 
  379     R27, R27_H, // heapbase
  380     R28, R28_H, // thread
  381     R29, R29_H, // fp
  382     R30, R30_H, // lr
  383     R31, R31_H, // sp


  384 );
  385 
  386 alloc_class chunk1(
  387 
  388     // no save
  389     V16, V16_H, V16_J, V16_K,
  390     V17, V17_H, V17_J, V17_K,
  391     V18, V18_H, V18_J, V18_K,
  392     V19, V19_H, V19_J, V19_K,
  393     V20, V20_H, V20_J, V20_K,
  394     V21, V21_H, V21_J, V21_K,
  395     V22, V22_H, V22_J, V22_K,
  396     V23, V23_H, V23_J, V23_K,
  397     V24, V24_H, V24_J, V24_K,
  398     V25, V25_H, V25_J, V25_K,
  399     V26, V26_H, V26_J, V26_K,
  400     V27, V27_H, V27_J, V27_K,
  401     V28, V28_H, V28_J, V28_K,
  402     V29, V29_H, V29_J, V29_K,
  403     V30, V30_H, V30_J, V30_K,
  404     V31, V31_H, V31_J, V31_K,
  405 
  406     // arg registers
  407     V0, V0_H, V0_J, V0_K,
  408     V1, V1_H, V1_J, V1_K,
  409     V2, V2_H, V2_J, V2_K,
  410     V3, V3_H, V3_J, V3_K,
  411     V4, V4_H, V4_J, V4_K,
  412     V5, V5_H, V5_J, V5_K,
  413     V6, V6_H, V6_J, V6_K,
  414     V7, V7_H, V7_J, V7_K,
  415 
  416     // non-volatiles
  417     V8, V8_H, V8_J, V8_K,
  418     V9, V9_H, V9_J, V9_K,
  419     V10, V10_H, V10_J, V10_K,
  420     V11, V11_H, V11_J, V11_K,
  421     V12, V12_H, V12_J, V12_K,
  422     V13, V13_H, V13_J, V13_K,
  423     V14, V14_H, V14_J, V14_K,
  424     V15, V15_H, V15_J, V15_K,
  425 );
  426 
  427 alloc_class chunk2(RFLAGS);




















  428 
  429 //----------Architecture Description Register Classes--------------------------
  430 // Several register classes are automatically defined based upon information in
  431 // this architecture description.
  432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
  433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
  434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
  435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
  436 //
  437 
  438 // Class for all 32 bit general purpose registers
  439 reg_class all_reg32(
  440     R0,
  441     R1,
  442     R2,
  443     R3,
  444     R4,
  445     R5,
  446     R6,
  447     R7,

  691     V14, V14_H,
  692     V15, V15_H,
  693     V16, V16_H,
  694     V17, V17_H,
  695     V18, V18_H,
  696     V19, V19_H,
  697     V20, V20_H,
  698     V21, V21_H,
  699     V22, V22_H,
  700     V23, V23_H,
  701     V24, V24_H,
  702     V25, V25_H,
  703     V26, V26_H,
  704     V27, V27_H,
  705     V28, V28_H,
  706     V29, V29_H,
  707     V30, V30_H,
  708     V31, V31_H
  709 );
  710 




































  711 // Class for all 64bit vector registers
  712 reg_class vectord_reg(
  713     V0, V0_H,
  714     V1, V1_H,
  715     V2, V2_H,
  716     V3, V3_H,
  717     V4, V4_H,
  718     V5, V5_H,
  719     V6, V6_H,
  720     V7, V7_H,
  721     V8, V8_H,
  722     V9, V9_H,
  723     V10, V10_H,
  724     V11, V11_H,
  725     V12, V12_H,
  726     V13, V13_H,
  727     V14, V14_H,
  728     V15, V15_H,
  729     V16, V16_H,
  730     V17, V17_H,

  923 // Class for 128 bit register v28
  924 reg_class v28_reg(
  925     V28, V28_H
  926 );
  927 
  928 // Class for 128 bit register v29
  929 reg_class v29_reg(
  930     V29, V29_H
  931 );
  932 
  933 // Class for 128 bit register v30
  934 reg_class v30_reg(
  935     V30, V30_H
  936 );
  937 
  938 // Class for 128 bit register v31
  939 reg_class v31_reg(
  940     V31, V31_H
  941 );
  942 

































  943 // Singleton class for condition codes
  944 reg_class int_flags(RFLAGS);
  945 
  946 %}
  947 
  948 //----------DEFINITION BLOCK---------------------------------------------------
  949 // Define name --> value mappings to inform the ADLC of an integer valued name
  950 // Current support includes integer values in the range [0, 0x7FFFFFFF]
  951 // Format:
  952 //        int_def  <name>         ( <int_value>, <expression>);
  953 // Generated Code in ad_<arch>.hpp
  954 //        #define  <name>   (<expression>)
  955 //        // value == <int_value>
  956 // Generated code in ad_<arch>.cpp adlc_verification()
  957 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
  958 //
  959 
  960 // we follow the ppc-aix port in using a simple cost model which ranks
  961 // register operations as cheap, memory ops as more expensive and
  962 // branches as most expensive. the first two have a low as well as a

 1725 }
 1726 
 1727 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 1728   // Variable size. Determine dynamically.
 1729   return MachNode::size(ra_);
 1730 }
 1731 
 1732 int MachEpilogNode::reloc() const {
 1733   // Return number of relocatable values contained in this instruction.
 1734   return 1; // 1 for polling page.
 1735 }
 1736 
 1737 const Pipeline * MachEpilogNode::pipeline() const {
 1738   return MachNode::pipeline_class();
 1739 }
 1740 
 1741 //=============================================================================
 1742 
 1743 // Figure out which register class each belongs in: rc_int, rc_float or
 1744 // rc_stack.
 1745 enum RC { rc_bad, rc_int, rc_float, rc_stack };
 1746 
 1747 static enum RC rc_class(OptoReg::Name reg) {
 1748 
 1749   if (reg == OptoReg::Bad) {
 1750     return rc_bad;
 1751   }
 1752 
 1753   // we have 30 int registers * 2 halves
 1754   // (rscratch1 and rscratch2 are omitted)
 1755   int slots_of_int_registers = RegisterImpl::max_slots_per_register * (RegisterImpl::number_of_registers - 2);
 1756 
 1757   if (reg < slots_of_int_registers) {
 1758     return rc_int;
 1759   }
 1760 
 1761   // we have 32 float register * 4 halves
 1762   if (reg < slots_of_int_registers + FloatRegisterImpl::max_slots_per_register * FloatRegisterImpl::number_of_registers) {

 1763     return rc_float;
 1764   }
 1765 
 1766   // Between float regs & stack is the flags regs.





 1767   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
 1768 
 1769   return rc_stack;
 1770 }
 1771 
 1772 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
 1773   Compile* C = ra_->C;
 1774 
 1775   // Get registers to move.
 1776   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
 1777   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
 1778   OptoReg::Name dst_hi = ra_->get_reg_second(this);
 1779   OptoReg::Name dst_lo = ra_->get_reg_first(this);
 1780 
 1781   enum RC src_hi_rc = rc_class(src_hi);
 1782   enum RC src_lo_rc = rc_class(src_lo);
 1783   enum RC dst_hi_rc = rc_class(dst_hi);
 1784   enum RC dst_lo_rc = rc_class(dst_lo);
 1785 
 1786   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
 1787 
 1788   if (src_hi != OptoReg::Bad) {
 1789     assert((src_lo&1)==0 && src_lo+1==src_hi &&
 1790            (dst_lo&1)==0 && dst_lo+1==dst_hi,
 1791            "expected aligned-adjacent pairs");
 1792   }
 1793 
 1794   if (src_lo == dst_lo && src_hi == dst_hi) {
 1795     return 0;            // Self copy, no move.
 1796   }
 1797 
 1798   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
 1799               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
 1800   int src_offset = ra_->reg2offset(src_lo);
 1801   int dst_offset = ra_->reg2offset(dst_lo);
 1802 
 1803   if (bottom_type()->isa_vect() != NULL) {
 1804     uint ireg = ideal_reg();
 1805     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
 1806     if (cbuf) {


 1807       C2_MacroAssembler _masm(cbuf);
 1808       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
 1809       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 1810         // stack->stack
 1811         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
 1812         if (ireg == Op_VecD) {
 1813           __ unspill(rscratch1, true, src_offset);
 1814           __ spill(rscratch1, true, dst_offset);
 1815         } else {
 1816           __ spill_copy128(src_offset, dst_offset);
 1817         }
 1818       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 1819         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1820                ireg == Op_VecD ? __ T8B : __ T16B,
 1821                as_FloatRegister(Matcher::_regEncode[src_lo]));
 1822       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 1823         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 1824                        ireg == Op_VecD ? __ D : __ Q,
 1825                        ra_->reg2offset(dst_lo));
 1826       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 1827         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1828                        ireg == Op_VecD ? __ D : __ Q,
 1829                        ra_->reg2offset(src_lo));
 1830       } else {
 1831         ShouldNotReachHere();
 1832       }
 1833     }
 1834   } else if (cbuf) {
 1835     C2_MacroAssembler _masm(cbuf);
 1836     switch (src_lo_rc) {
 1837     case rc_int:
 1838       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
 1839         if (is64) {
 1840             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
 1841                    as_Register(Matcher::_regEncode[src_lo]));
 1842         } else {
 1843             C2_MacroAssembler _masm(cbuf);
 1844             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
 1845                     as_Register(Matcher::_regEncode[src_lo]));
 1846         }
 1847       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
 1848         if (is64) {
 1849             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),

 1894       break;
 1895     default:
 1896       assert(false, "bad rc_class for spill");
 1897       ShouldNotReachHere();
 1898     }
 1899   }
 1900 
 1901   if (st) {
 1902     st->print("spill ");
 1903     if (src_lo_rc == rc_stack) {
 1904       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
 1905     } else {
 1906       st->print("%s -> ", Matcher::regName[src_lo]);
 1907     }
 1908     if (dst_lo_rc == rc_stack) {
 1909       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
 1910     } else {
 1911       st->print("%s", Matcher::regName[dst_lo]);
 1912     }
 1913     if (bottom_type()->isa_vect() != NULL) {
 1914       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);















 1915     } else {
 1916       st->print("\t# spill size = %d", is64 ? 64:32);
 1917     }
 1918   }
 1919 
 1920   return 0;
 1921 
 1922 }
 1923 
 1924 #ifndef PRODUCT
 1925 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1926   if (!ra_)
 1927     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
 1928   else
 1929     implementation(NULL, ra_, false, st);
 1930 }
 1931 #endif
 1932 
 1933 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 1934   implementation(&cbuf, ra_, false, NULL);
 1935 }
 1936 

 2080 // other intrinsics guarded with vector length (vlen) and element type (bt).
 2081 const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
 2082   if (!match_rule_supported(opcode)) {
 2083     return false;
 2084   }
 2085 
 2086   // Special cases which require vector length
 2087   switch (opcode) {
 2088     case Op_MulAddVS2VI: {
 2089       if (vlen != 4) {
 2090         return false;
 2091       }
 2092       break;
 2093     }
 2094   }
 2095 
 2096   return true; // Per default match rules are supported.
 2097 }
 2098 
 2099 const bool Matcher::has_predicated_vectors(void) {
 2100   return false;
 2101 }
 2102 
 2103 const int Matcher::float_pressure(int default_pressure_threshold) {
 2104   return default_pressure_threshold;
 2105 }
 2106 
 2107 int Matcher::regnum_to_fpu_offset(int regnum)
 2108 {
 2109   Unimplemented();
 2110   return 0;
 2111 }
 2112 
 2113 // Is this branch offset short enough that a short branch can be used?
 2114 //
 2115 // NOTE: If the platform does not provide any short branch variants, then
 2116 //       this method should return false for offset 0.
 2117 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
 2118   // The passed offset is relative to address of the branch.
 2119 
 2120   return (-32768 <= offset && offset < 32768);
 2121 }
 2122 
 2123 const bool Matcher::isSimpleConstant64(jlong value) {
 2124   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
 2125   // Probably always true, even if a temp register is required.
 2126   return true;
 2127 }
 2128 
 2129 // true just means we have fast l2f conversion
 2130 const bool Matcher::convL2FSupported(void) {
 2131   return true;
 2132 }
 2133 
 2134 // Vector width in bytes.
 2135 const int Matcher::vector_width_in_bytes(BasicType bt) {
 2136   int size = MIN2(16,(int)MaxVectorSize);
 2137   // Minimum 2 values in vector
 2138   if (size < 2*type2aelembytes(bt)) size = 0;
 2139   // But never < 4
 2140   if (size < 4) size = 0;
 2141   return size;
 2142 }
 2143 
 2144 // Limits on vector size (number of elements) loaded into vector.
 2145 const int Matcher::max_vector_size(const BasicType bt) {
 2146   return vector_width_in_bytes(bt)/type2aelembytes(bt);
 2147 }
 2148 const int Matcher::min_vector_size(const BasicType bt) {
 2149 //  For the moment limit the vector size to 8 bytes





 2150     int size = 8 / type2aelembytes(bt);
 2151     if (size < 2) size = 2;
 2152     return size;










 2153 }
 2154 
 2155 // Vector ideal reg.
 2156 const uint Matcher::vector_ideal_reg(int len) {



 2157   switch(len) {
 2158     case  8: return Op_VecD;
 2159     case 16: return Op_VecX;
 2160   }
 2161   ShouldNotReachHere();
 2162   return 0;
 2163 }
 2164 
 2165 // AES support not yet implemented
 2166 const bool Matcher::pass_original_key_for_aes() {
 2167   return false;
 2168 }
 2169 
 2170 // aarch64 supports misaligned vectors store/load.
 2171 const bool Matcher::misaligned_vectors_ok() {
 2172   return true;
 2173 }
 2174 
 2175 // false => size gets scaled to BytesPerLong, ok.
 2176 const bool Matcher::init_array_count_is_in_bytes = false;

 4841   constraint(ALLOC_IN_RC(float_reg));
 4842   match(RegF);
 4843 
 4844   op_cost(0);
 4845   format %{ %}
 4846   interface(REG_INTER);
 4847 %}
 4848 
 4849 // Double Register
 4850 // Double register operands
 4851 operand vRegD()
 4852 %{
 4853   constraint(ALLOC_IN_RC(double_reg));
 4854   match(RegD);
 4855 
 4856   op_cost(0);
 4857   format %{ %}
 4858   interface(REG_INTER);
 4859 %}
 4860 









 4861 operand vecD()
 4862 %{
 4863   constraint(ALLOC_IN_RC(vectord_reg));
 4864   match(VecD);
 4865 
 4866   op_cost(0);
 4867   format %{ %}
 4868   interface(REG_INTER);
 4869 %}
 4870 
 4871 operand vecX()
 4872 %{
 4873   constraint(ALLOC_IN_RC(vectorx_reg));
 4874   match(VecX);
 4875 
 4876   op_cost(0);
 4877   format %{ %}
 4878   interface(REG_INTER);
 4879 %}
 4880 

 5149 %}
 5150 
 5151 operand vRegD_V30()
 5152 %{
 5153   constraint(ALLOC_IN_RC(v30_reg));
 5154   match(RegD);
 5155   op_cost(0);
 5156   format %{ %}
 5157   interface(REG_INTER);
 5158 %}
 5159 
 5160 operand vRegD_V31()
 5161 %{
 5162   constraint(ALLOC_IN_RC(v31_reg));
 5163   match(RegD);
 5164   op_cost(0);
 5165   format %{ %}
 5166   interface(REG_INTER);
 5167 %}
 5168 









 5169 // Flags register, used as output of signed compare instructions
 5170 
 5171 // note that on AArch64 we also use this register as the output for
 5172 // for floating point compare instructions (CmpF CmpD). this ensures
 5173 // that ordered inequality tests use GT, GE, LT or LE none of which
 5174 // pass through cases where the result is unordered i.e. one or both
 5175 // inputs to the compare is a NaN. this means that the ideal code can
 5176 // replace e.g. a GT with an LE and not end up capturing the NaN case
 5177 // (where the comparison should always fail). EQ and NE tests are
 5178 // always generated in ideal code so that unordered folds into the NE
 5179 // case, matching the behaviour of AArch64 NE.
 5180 //
 5181 // This differs from x86 where the outputs of FP compares use a
 5182 // special FP flags registers and where compares based on this
 5183 // register are distinguished into ordered inequalities (cmpOpUCF) and
 5184 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
 5185 // to explicitly handle the unordered case in branches. x86 also has
 5186 // to include extra CMoveX rules to accept a cmpOpUCF input.
 5187 
 5188 operand rFlagsReg()

   52 //
   53 // AS  = Always-Save:   The register allocator assumes that these registers
   54 //                      must be saved before using them upon entry to the
   55 //                      method, & that they must be saved at call sites.
   56 //
   57 // Ideal Register Type is used to determine how to save & restore a
   58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
   59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
   60 //
   61 // The encoding number is the actual bit-pattern placed into the opcodes.
   62 
   63 // We must define the 64 bit int registers in two 32 bit halves, the
   64 // real lower register and a virtual upper half register. upper halves
   65 // are used by the register allocator but are not actually supplied as
   66 // operands to memory ops.
   67 //
   68 // follow the C1 compiler in making registers
   69 //
   70 //   r0-r7,r10-r26 volatile (caller save)
   71 //   r27-r32 system (no save, no allocate)
   72 //   r8-r9 non-allocatable (so we can use them as scratch regs)
   73 //
   74 // as regards Java usage. we don't use any callee save registers
   75 // because this makes it difficult to de-optimise a frame (see comment
   76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
   77 //
   78 
   79 // General Registers
   80 
   81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
   82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
   83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
   84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
   85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
   86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
   87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
   88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
   89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
   90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
   91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
   92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
   93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
   94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
   95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
   96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
   97 reg_def R8      ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()         ); // rscratch1, non-allocatable
   98 reg_def R8_H    ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()->next() );
   99 reg_def R9      ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()         ); // rscratch2, non-allocatable
  100 reg_def R9_H    ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()->next() );
  101 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  102 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  103 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
  104 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
  105 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
  106 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
  107 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
  108 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
  109 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
  110 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
  111 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
  112 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
  113 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
  114 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
  115 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
  116 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
  117 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
  118 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
  119 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
  120 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());

  127 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
  128 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
  129 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
  130 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
  131 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
  132 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
  133 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
  134 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
  135 reg_def R27     ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
  136 reg_def R27_H   ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()->next());
  137 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
  138 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
  139 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
  140 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
  141 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
  142 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
  143 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
  144 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
  145 
  146 // ----------------------------
  147 // Float/Double/Vector Registers
  148 // ----------------------------
  149 
  150 // Double Registers
  151 
  152 // The rules of ADL require that double registers be defined in pairs.
  153 // Each pair must be two 32-bit values, but not necessarily a pair of
  154 // single float registers. In each pair, ADLC-assigned register numbers
  155 // must be adjacent, with the lower number even. Finally, when the
  156 // CPU stores such a register pair to memory, the word associated with
  157 // the lower ADLC-assigned number must be stored to the lower address.
  158 
  159 // AArch64 has 32 floating-point registers. Each can store a vector of
  160 // single or double precision floating-point values up to 8 * 32
  161 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
  162 // use the first float or double element of the vector.
  163 
  164 // for Java use float registers v0-v15 are always save on call whereas
  165 // the platform ABI treats v8-v15 as callee save). float registers
  166 // v16-v31 are SOC as per the platform spec
  167 
  168 // For SVE vector registers, we simply extend vector register size to 8
  169 // slots. A vector register with lower 4 slots, denotes a 128-bit vector
  170 // NEON vector register. While a vector register with whole 8 slots,
  171 // indicating an SVE scalable vector register with vector size >= 128
  172 // bits (128 ~ 2048 bits, multiple of 128 bits). A 128-bit SVE vector
  173 // register also has 8 slots, but the the actual size is 128 bits, the
  174 // same as a NEON vector register. Since during JIT compilation, the
  175 // real SVE vector register size can be detected, so register allocator
  176 // is able to do the right thing with the real register size, e.g. for
  177 // spilling/unspilling.
  178 
  179   reg_def V0   ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()          );
  180   reg_def V0_H ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next()  );
  181   reg_def V0_J ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(2) );
  182   reg_def V0_K ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(3) );
  183   reg_def V0_L ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(4) );
  184   reg_def V0_M ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(5) );
  185   reg_def V0_N ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(6) );
  186   reg_def V0_O ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(7) );
  187 
  188   reg_def V1   ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()          );
  189   reg_def V1_H ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next()  );
  190   reg_def V1_J ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(2) );
  191   reg_def V1_K ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(3) );
  192   reg_def V1_L ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(4) );
  193   reg_def V1_M ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(5) );
  194   reg_def V1_N ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(6) );
  195   reg_def V1_O ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(7) );
  196 
  197   reg_def V2   ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()          );
  198   reg_def V2_H ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next()  );
  199   reg_def V2_J ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(2) );
  200   reg_def V2_K ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(3) );
  201   reg_def V2_L ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(4) );
  202   reg_def V2_M ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(5) );
  203   reg_def V2_N ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(6) );
  204   reg_def V2_O ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(7) );
  205 
  206   reg_def V3   ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()          );
  207   reg_def V3_H ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next()  );
  208   reg_def V3_J ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(2) );
  209   reg_def V3_K ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(3) );
  210   reg_def V3_L ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(4) );
  211   reg_def V3_M ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(5) );
  212   reg_def V3_N ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(6) );
  213   reg_def V3_O ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(7) );
  214 
  215   reg_def V4   ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()          );
  216   reg_def V4_H ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next()  );
  217   reg_def V4_J ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(2) );
  218   reg_def V4_K ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(3) );
  219   reg_def V4_L ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(4) );
  220   reg_def V4_M ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(5) );
  221   reg_def V4_N ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(6) );
  222   reg_def V4_O ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(7) );
  223 
  224   reg_def V5   ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()          );
  225   reg_def V5_H ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next()  );
  226   reg_def V5_J ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(2) );
  227   reg_def V5_K ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(3) );
  228   reg_def V5_L ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(4) );
  229   reg_def V5_M ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(5) );
  230   reg_def V5_N ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(6) );
  231   reg_def V5_O ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(7) );
  232 
  233   reg_def V6   ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()          );
  234   reg_def V6_H ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next()  );
  235   reg_def V6_J ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(2) );
  236   reg_def V6_K ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(3) );
  237   reg_def V6_L ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(4) );
  238   reg_def V6_M ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(5) );
  239   reg_def V6_N ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(6) );
  240   reg_def V6_O ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(7) );
  241 
  242   reg_def V7   ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()          );
  243   reg_def V7_H ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next()  );
  244   reg_def V7_J ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(2) );
  245   reg_def V7_K ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(3) );
  246   reg_def V7_L ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(4) );
  247   reg_def V7_M ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(5) );
  248   reg_def V7_N ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(6) );
  249   reg_def V7_O ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(7) );
  250 
  251   reg_def V8   ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()          );
  252   reg_def V8_H ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next()  );
  253   reg_def V8_J ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(2) );
  254   reg_def V8_K ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(3) );
  255   reg_def V8_L ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(4) );
  256   reg_def V8_M ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(5) );
  257   reg_def V8_N ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(6) );
  258   reg_def V8_O ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(7) );
  259 
  260   reg_def V9   ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()          );
  261   reg_def V9_H ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next()  );
  262   reg_def V9_J ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(2) );
  263   reg_def V9_K ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(3) );
  264   reg_def V9_L ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(4) );
  265   reg_def V9_M ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(5) );
  266   reg_def V9_N ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(6) );
  267   reg_def V9_O ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(7) );
  268 
  269   reg_def V10   ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()          );
  270   reg_def V10_H ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next()  );
  271   reg_def V10_J ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2) );
  272   reg_def V10_K ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3) );
  273   reg_def V10_L ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(4) );
  274   reg_def V10_M ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(5) );
  275   reg_def V10_N ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(6) );
  276   reg_def V10_O ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(7) );
  277 
  278   reg_def V11   ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()          );
  279   reg_def V11_H ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next()  );
  280   reg_def V11_J ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2) );
  281   reg_def V11_K ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3) );
  282   reg_def V11_L ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(4) );
  283   reg_def V11_M ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(5) );
  284   reg_def V11_N ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(6) );
  285   reg_def V11_O ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(7) );
  286 
  287   reg_def V12   ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()          );
  288   reg_def V12_H ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next()  );
  289   reg_def V12_J ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2) );
  290   reg_def V12_K ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3) );
  291   reg_def V12_L ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(4) );
  292   reg_def V12_M ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(5) );
  293   reg_def V12_N ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(6) );
  294   reg_def V12_O ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(7) );
  295 
  296   reg_def V13   ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()          );
  297   reg_def V13_H ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next()  );
  298   reg_def V13_J ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2) );
  299   reg_def V13_K ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3) );
  300   reg_def V13_L ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(4) );
  301   reg_def V13_M ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(5) );
  302   reg_def V13_N ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(6) );
  303   reg_def V13_O ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(7) );
  304 
  305   reg_def V14   ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()          );
  306   reg_def V14_H ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next()  );
  307   reg_def V14_J ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2) );
  308   reg_def V14_K ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3) );
  309   reg_def V14_L ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(4) );
  310   reg_def V14_M ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(5) );
  311   reg_def V14_N ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(6) );
  312   reg_def V14_O ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(7) );
  313 
  314   reg_def V15   ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()          );
  315   reg_def V15_H ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next()  );
  316   reg_def V15_J ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2) );
  317   reg_def V15_K ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3) );
  318   reg_def V15_L ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(4) );
  319   reg_def V15_M ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(5) );
  320   reg_def V15_N ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(6) );
  321   reg_def V15_O ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(7) );
  322 
  323   reg_def V16   ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()          );
  324   reg_def V16_H ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next()  );
  325   reg_def V16_J ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2) );
  326   reg_def V16_K ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3) );
  327   reg_def V16_L ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(4) );
  328   reg_def V16_M ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(5) );
  329   reg_def V16_N ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(6) );
  330   reg_def V16_O ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(7) );
  331 
  332   reg_def V17   ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()          );
  333   reg_def V17_H ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next()  );
  334   reg_def V17_J ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2) );
  335   reg_def V17_K ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3) );
  336   reg_def V17_L ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(4) );
  337   reg_def V17_M ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(5) );
  338   reg_def V17_N ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(6) );
  339   reg_def V17_O ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(7) );
  340 
  341   reg_def V18   ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()          );
  342   reg_def V18_H ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next()  );
  343   reg_def V18_J ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2) );
  344   reg_def V18_K ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3) );
  345   reg_def V18_L ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(4) );
  346   reg_def V18_M ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(5) );
  347   reg_def V18_N ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(6) );
  348   reg_def V18_O ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(7) );
  349 
  350   reg_def V19   ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()          );
  351   reg_def V19_H ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next()  );
  352   reg_def V19_J ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2) );
  353   reg_def V19_K ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3) );
  354   reg_def V19_L ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(4) );
  355   reg_def V19_M ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(5) );
  356   reg_def V19_N ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(6) );
  357   reg_def V19_O ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(7) );
  358 
  359   reg_def V20   ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()          );
  360   reg_def V20_H ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next()  );
  361   reg_def V20_J ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2) );
  362   reg_def V20_K ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3) );
  363   reg_def V20_L ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(4) );
  364   reg_def V20_M ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(5) );
  365   reg_def V20_N ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(6) );
  366   reg_def V20_O ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(7) );
  367 
  368   reg_def V21   ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()          );
  369   reg_def V21_H ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next()  );
  370   reg_def V21_J ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2) );
  371   reg_def V21_K ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3) );
  372   reg_def V21_L ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(4) );
  373   reg_def V21_M ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(5) );
  374   reg_def V21_N ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(6) );
  375   reg_def V21_O ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(7) );
  376 
  377   reg_def V22   ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()          );
  378   reg_def V22_H ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next()  );
  379   reg_def V22_J ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2) );
  380   reg_def V22_K ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3) );
  381   reg_def V22_L ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(4) );
  382   reg_def V22_M ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(5) );
  383   reg_def V22_N ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(6) );
  384   reg_def V22_O ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(7) );
  385 
  386   reg_def V23   ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()          );
  387   reg_def V23_H ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next()  );
  388   reg_def V23_J ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2) );
  389   reg_def V23_K ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3) );
  390   reg_def V23_L ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(4) );
  391   reg_def V23_M ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(5) );
  392   reg_def V23_N ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(6) );
  393   reg_def V23_O ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(7) );
  394 
  395   reg_def V24   ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()          );
  396   reg_def V24_H ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next()  );
  397   reg_def V24_J ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2) );
  398   reg_def V24_K ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3) );
  399   reg_def V24_L ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(4) );
  400   reg_def V24_M ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(5) );
  401   reg_def V24_N ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(6) );
  402   reg_def V24_O ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(7) );
  403 
  404   reg_def V25   ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()          );
  405   reg_def V25_H ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next()  );
  406   reg_def V25_J ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2) );
  407   reg_def V25_K ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3) );
  408   reg_def V25_L ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(4) );
  409   reg_def V25_M ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(5) );
  410   reg_def V25_N ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(6) );
  411   reg_def V25_O ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(7) );
  412 
  413   reg_def V26   ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()          );
  414   reg_def V26_H ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next()  );
  415   reg_def V26_J ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2) );
  416   reg_def V26_K ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3) );
  417   reg_def V26_L ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(4) );
  418   reg_def V26_M ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(5) );
  419   reg_def V26_N ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(6) );
  420   reg_def V26_O ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(7) );
  421 
  422   reg_def V27   ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()          );
  423   reg_def V27_H ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next()  );
  424   reg_def V27_J ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2) );
  425   reg_def V27_K ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3) );
  426   reg_def V27_L ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(4) );
  427   reg_def V27_M ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(5) );
  428   reg_def V27_N ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(6) );
  429   reg_def V27_O ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(7) );
  430 
  431   reg_def V28   ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()          );
  432   reg_def V28_H ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next()  );
  433   reg_def V28_J ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2) );
  434   reg_def V28_K ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3) );
  435   reg_def V28_L ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(4) );
  436   reg_def V28_M ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(5) );
  437   reg_def V28_N ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(6) );
  438   reg_def V28_O ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(7) );
  439 
  440   reg_def V29   ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()          );
  441   reg_def V29_H ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next()  );
  442   reg_def V29_J ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2) );
  443   reg_def V29_K ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3) );
  444   reg_def V29_L ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(4) );
  445   reg_def V29_M ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(5) );
  446   reg_def V29_N ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(6) );
  447   reg_def V29_O ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(7) );
  448 
  449   reg_def V30   ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()          );
  450   reg_def V30_H ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next()  );
  451   reg_def V30_J ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2) );
  452   reg_def V30_K ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3) );
  453   reg_def V30_L ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(4) );
  454   reg_def V30_M ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(5) );
  455   reg_def V30_N ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(6) );
  456   reg_def V30_O ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(7) );
  457 
  458   reg_def V31   ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()          );
  459   reg_def V31_H ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next()  );
  460   reg_def V31_J ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2) );
  461   reg_def V31_K ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3) );
  462   reg_def V31_L ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(4) );
  463   reg_def V31_M ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(5) );
  464   reg_def V31_N ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(6) );
  465   reg_def V31_O ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(7) );
  466 
  467 
  468 // ----------------------------
  469 // SVE Predicate Registers
  470 // ----------------------------
  471   reg_def P0 (SOC, SOC, Op_RegVMask, 0, p0->as_VMReg());
  472   reg_def P1 (SOC, SOC, Op_RegVMask, 1, p1->as_VMReg());
  473   reg_def P2 (SOC, SOC, Op_RegVMask, 2, p2->as_VMReg());
  474   reg_def P3 (SOC, SOC, Op_RegVMask, 3, p3->as_VMReg());
  475   reg_def P4 (SOC, SOC, Op_RegVMask, 4, p4->as_VMReg());
  476   reg_def P5 (SOC, SOC, Op_RegVMask, 5, p5->as_VMReg());
  477   reg_def P6 (SOC, SOC, Op_RegVMask, 6, p6->as_VMReg());
  478   reg_def P7 (SOC, SOC, Op_RegVMask, 7, p7->as_VMReg());
  479   reg_def P8 (SOC, SOC, Op_RegVMask, 8, p8->as_VMReg());
  480   reg_def P9 (SOC, SOC, Op_RegVMask, 9, p9->as_VMReg());
  481   reg_def P10 (SOC, SOC, Op_RegVMask, 10, p10->as_VMReg());
  482   reg_def P11 (SOC, SOC, Op_RegVMask, 11, p11->as_VMReg());
  483   reg_def P12 (SOC, SOC, Op_RegVMask, 12, p12->as_VMReg());
  484   reg_def P13 (SOC, SOC, Op_RegVMask, 13, p13->as_VMReg());
  485   reg_def P14 (SOC, SOC, Op_RegVMask, 14, p14->as_VMReg());
  486   reg_def P15 (SOC, SOC, Op_RegVMask, 15, p15->as_VMReg());
  487 
  488 // ----------------------------
  489 // Special Registers
  490 // ----------------------------
  491 
  492 // the AArch64 CSPR status flag register is not directly acessible as
  493 // instruction operand. the FPSR status flag register is a system
  494 // register which can be written/read using MSR/MRS but again does not
  495 // appear as an operand (a code identifying the FSPR occurs as an
  496 // immediate value in the instruction).
  497 
  498 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
  499 

  500 // Specify priority of register selection within phases of register
  501 // allocation.  Highest priority is first.  A useful heuristic is to
  502 // give registers a low priority when they are required by machine
  503 // instructions, like EAX and EDX on I486, and choose no-save registers
  504 // before save-on-call, & save-on-call before save-on-entry.  Registers
  505 // which participate in fixed calling sequences should come last.
  506 // Registers which are used as pairs must fall on an even boundary.
  507 
  508 alloc_class chunk0(
  509     // volatiles
  510     R10, R10_H,
  511     R11, R11_H,
  512     R12, R12_H,
  513     R13, R13_H,
  514     R14, R14_H,
  515     R15, R15_H,
  516     R16, R16_H,
  517     R17, R17_H,
  518     R18, R18_H,
  519 

  527     R6, R6_H,
  528     R7, R7_H,
  529 
  530     // non-volatiles
  531     R19, R19_H,
  532     R20, R20_H,
  533     R21, R21_H,
  534     R22, R22_H,
  535     R23, R23_H,
  536     R24, R24_H,
  537     R25, R25_H,
  538     R26, R26_H,
  539 
  540     // non-allocatable registers
  541 
  542     R27, R27_H, // heapbase
  543     R28, R28_H, // thread
  544     R29, R29_H, // fp
  545     R30, R30_H, // lr
  546     R31, R31_H, // sp
  547     R8, R8_H,   // rscratch1
  548     R9, R9_H,   // rscratch2
  549 );
  550 
  551 alloc_class chunk1(
  552 
  553     // no save
  554     V16, V16_H, V16_J, V16_K, V16_L, V16_M, V16_N, V16_O,
  555     V17, V17_H, V17_J, V17_K, V17_L, V17_M, V17_N, V17_O,
  556     V18, V18_H, V18_J, V18_K, V18_L, V18_M, V18_N, V18_O,
  557     V19, V19_H, V19_J, V19_K, V19_L, V19_M, V19_N, V19_O,
  558     V20, V20_H, V20_J, V20_K, V20_L, V20_M, V20_N, V20_O,
  559     V21, V21_H, V21_J, V21_K, V21_L, V21_M, V21_N, V21_O,
  560     V22, V22_H, V22_J, V22_K, V22_L, V22_M, V22_N, V22_O,
  561     V23, V23_H, V23_J, V23_K, V23_L, V23_M, V23_N, V23_O,
  562     V24, V24_H, V24_J, V24_K, V24_L, V24_M, V24_N, V24_O,
  563     V25, V25_H, V25_J, V25_K, V25_L, V25_M, V25_N, V25_O,
  564     V26, V26_H, V26_J, V26_K, V26_L, V26_M, V26_N, V26_O,
  565     V27, V27_H, V27_J, V27_K, V27_L, V27_M, V27_N, V27_O,
  566     V28, V28_H, V28_J, V28_K, V28_L, V28_M, V28_N, V28_O,
  567     V29, V29_H, V29_J, V29_K, V29_L, V29_M, V29_N, V29_O,
  568     V30, V30_H, V30_J, V30_K, V30_L, V30_M, V30_N, V30_O,
  569     V31, V31_H, V31_J, V31_K, V31_L, V31_M, V31_N, V31_O,
  570 
  571     // arg registers
  572     V0, V0_H, V0_J, V0_K, V0_L, V0_M, V0_N, V0_O,
  573     V1, V1_H, V1_J, V1_K, V1_L, V1_M, V1_N, V1_O,
  574     V2, V2_H, V2_J, V2_K, V2_L, V2_M, V2_N, V2_O,
  575     V3, V3_H, V3_J, V3_K, V3_L, V3_M, V3_N, V3_O,
  576     V4, V4_H, V4_J, V4_K, V4_L, V4_M, V4_N, V4_O,
  577     V5, V5_H, V5_J, V5_K, V5_L, V5_M, V5_N, V5_O,
  578     V6, V6_H, V6_J, V6_K, V6_L, V6_M, V6_N, V6_O,
  579     V7, V7_H, V7_J, V7_K, V7_L, V7_M, V7_N, V7_O,
  580 
  581     // non-volatiles
  582     V8, V8_H, V8_J, V8_K, V8_L, V8_M, V8_N, V8_O,
  583     V9, V9_H, V9_J, V9_K, V9_L, V9_M, V9_N, V9_O,
  584     V10, V10_H, V10_J, V10_K, V10_L, V10_M, V10_N, V10_O,
  585     V11, V11_H, V11_J, V11_K, V11_L, V11_M, V11_N, V11_O,
  586     V12, V12_H, V12_J, V12_K, V12_L, V12_M, V12_N, V12_O,
  587     V13, V13_H, V13_J, V13_K, V13_L, V13_M, V13_N, V13_O,
  588     V14, V14_H, V14_J, V14_K, V14_L, V14_M, V14_N, V14_O,
  589     V15, V15_H, V15_J, V15_K, V15_L, V15_M, V15_N, V15_O,
  590 );
  591 
  592 alloc_class chunk2 (
  593     P0,
  594     P1,
  595     P2,
  596     P3,
  597     P4,
  598     P5,
  599     P6,
  600     P7,
  601 
  602     P8,
  603     P9,
  604     P10,
  605     P11,
  606     P12,
  607     P13,
  608     P14,
  609     P15,
  610 );
  611 
  612 alloc_class chunk3(RFLAGS);
  613 
  614 //----------Architecture Description Register Classes--------------------------
  615 // Several register classes are automatically defined based upon information in
  616 // this architecture description.
  617 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
  618 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
  619 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
  620 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
  621 //
  622 
  623 // Class for all 32 bit general purpose registers
  624 reg_class all_reg32(
  625     R0,
  626     R1,
  627     R2,
  628     R3,
  629     R4,
  630     R5,
  631     R6,
  632     R7,

  876     V14, V14_H,
  877     V15, V15_H,
  878     V16, V16_H,
  879     V17, V17_H,
  880     V18, V18_H,
  881     V19, V19_H,
  882     V20, V20_H,
  883     V21, V21_H,
  884     V22, V22_H,
  885     V23, V23_H,
  886     V24, V24_H,
  887     V25, V25_H,
  888     V26, V26_H,
  889     V27, V27_H,
  890     V28, V28_H,
  891     V29, V29_H,
  892     V30, V30_H,
  893     V31, V31_H
  894 );
  895 
  896 // Class for all SVE vector registers.
  897 reg_class vectora_reg (
  898     V0, V0_H, V0_J, V0_K, V0_L, V0_M, V0_N, V0_O,
  899     V1, V1_H, V1_J, V1_K, V1_L, V1_M, V1_N, V1_O,
  900     V2, V2_H, V2_J, V2_K, V2_L, V2_M, V2_N, V2_O,
  901     V3, V3_H, V3_J, V3_K, V3_L, V3_M, V3_N, V3_O,
  902     V4, V4_H, V4_J, V4_K, V4_L, V4_M, V4_N, V4_O,
  903     V5, V5_H, V5_J, V5_K, V5_L, V5_M, V5_N, V5_O,
  904     V6, V6_H, V6_J, V6_K, V6_L, V6_M, V6_N, V6_O,
  905     V7, V7_H, V7_J, V7_K, V7_L, V7_M, V7_N, V7_O,
  906     V8, V8_H, V8_J, V8_K, V8_L, V8_M, V8_N, V8_O,
  907     V9, V9_H, V9_J, V9_K, V9_L, V9_M, V9_N, V9_O,
  908     V10, V10_H, V10_J, V10_K, V10_L, V10_M, V10_N, V10_O,
  909     V11, V11_H, V11_J, V11_K, V11_L, V11_M, V11_N, V11_O,
  910     V12, V12_H, V12_J, V12_K, V12_L, V12_M, V12_N, V12_O,
  911     V13, V13_H, V13_J, V13_K, V13_L, V13_M, V13_N, V13_O,
  912     V14, V14_H, V14_J, V14_K, V14_L, V14_M, V14_N, V14_O,
  913     V15, V15_H, V15_J, V15_K, V15_L, V15_M, V15_N, V15_O,
  914     V16, V16_H, V16_J, V16_K, V16_L, V16_M, V16_N, V16_O,
  915     V17, V17_H, V17_J, V17_K, V17_L, V17_M, V17_N, V17_O,
  916     V18, V18_H, V18_J, V18_K, V18_L, V18_M, V18_N, V18_O,
  917     V19, V19_H, V19_J, V19_K, V19_L, V19_M, V19_N, V19_O,
  918     V20, V20_H, V20_J, V20_K, V20_L, V20_M, V20_N, V20_O,
  919     V21, V21_H, V21_J, V21_K, V21_L, V21_M, V21_N, V21_O,
  920     V22, V22_H, V22_J, V22_K, V22_L, V22_M, V22_N, V22_O,
  921     V23, V23_H, V23_J, V23_K, V23_L, V23_M, V23_N, V23_O,
  922     V24, V24_H, V24_J, V24_K, V24_L, V24_M, V24_N, V24_O,
  923     V25, V25_H, V25_J, V25_K, V25_L, V25_M, V25_N, V25_O,
  924     V26, V26_H, V26_J, V26_K, V26_L, V26_M, V26_N, V26_O,
  925     V27, V27_H, V27_J, V27_K, V27_L, V27_M, V27_N, V27_O,
  926     V28, V28_H, V28_J, V28_K, V28_L, V28_M, V28_N, V28_O,
  927     V29, V29_H, V29_J, V29_K, V29_L, V29_M, V29_N, V29_O,
  928     V30, V30_H, V30_J, V30_K, V30_L, V30_M, V30_N, V30_O,
  929     V31, V31_H, V31_J, V31_K, V31_L, V31_M, V31_N, V31_O,
  930 );
  931 
  932 // Class for all 64bit vector registers
  933 reg_class vectord_reg(
  934     V0, V0_H,
  935     V1, V1_H,
  936     V2, V2_H,
  937     V3, V3_H,
  938     V4, V4_H,
  939     V5, V5_H,
  940     V6, V6_H,
  941     V7, V7_H,
  942     V8, V8_H,
  943     V9, V9_H,
  944     V10, V10_H,
  945     V11, V11_H,
  946     V12, V12_H,
  947     V13, V13_H,
  948     V14, V14_H,
  949     V15, V15_H,
  950     V16, V16_H,
  951     V17, V17_H,

 1144 // Class for 128 bit register v28
 1145 reg_class v28_reg(
 1146     V28, V28_H
 1147 );
 1148 
 1149 // Class for 128 bit register v29
 1150 reg_class v29_reg(
 1151     V29, V29_H
 1152 );
 1153 
 1154 // Class for 128 bit register v30
 1155 reg_class v30_reg(
 1156     V30, V30_H
 1157 );
 1158 
 1159 // Class for 128 bit register v31
 1160 reg_class v31_reg(
 1161     V31, V31_H
 1162 );
 1163 
 1164 // Class for all SVE predicate registers.
 1165 reg_class pr_reg (
 1166     P0,
 1167     P1,
 1168     P2,
 1169     P3,
 1170     P4,
 1171     P5,
 1172     P6,
 1173     P7,
 1174     P8,
 1175     P9,
 1176     P10,
 1177     P11,
 1178     P12,
 1179     P13,
 1180     P14,
 1181     P15
 1182 );
 1183 
 1184 // Class for SVE governing predicate registers, which are used
 1185 // to determine the active elements of a predicated instruction.
 1186 reg_class gov_pr (
 1187     P0,
 1188     P1,
 1189     P2,
 1190     P3,
 1191     P4,
 1192     P5,
 1193     P6,
 1194     P7
 1195 );
 1196 
 1197 // Singleton class for condition codes
 1198 reg_class int_flags(RFLAGS);
 1199 
 1200 %}
 1201 
 1202 //----------DEFINITION BLOCK---------------------------------------------------
 1203 // Define name --> value mappings to inform the ADLC of an integer valued name
 1204 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 1205 // Format:
 1206 //        int_def  <name>         ( <int_value>, <expression>);
 1207 // Generated Code in ad_<arch>.hpp
 1208 //        #define  <name>   (<expression>)
 1209 //        // value == <int_value>
 1210 // Generated code in ad_<arch>.cpp adlc_verification()
 1211 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 1212 //
 1213 
 1214 // we follow the ppc-aix port in using a simple cost model which ranks
 1215 // register operations as cheap, memory ops as more expensive and
 1216 // branches as most expensive. the first two have a low as well as a

 1979 }
 1980 
 1981 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 1982   // Variable size. Determine dynamically.
 1983   return MachNode::size(ra_);
 1984 }
 1985 
 1986 int MachEpilogNode::reloc() const {
 1987   // Return number of relocatable values contained in this instruction.
 1988   return 1; // 1 for polling page.
 1989 }
 1990 
 1991 const Pipeline * MachEpilogNode::pipeline() const {
 1992   return MachNode::pipeline_class();
 1993 }
 1994 
 1995 //=============================================================================
 1996 
 1997 // Figure out which register class each belongs in: rc_int, rc_float or
 1998 // rc_stack.
 1999 enum RC { rc_bad, rc_int, rc_float, rc_predicate, rc_stack };
 2000 
 2001 static enum RC rc_class(OptoReg::Name reg) {
 2002 
 2003   if (reg == OptoReg::Bad) {
 2004     return rc_bad;
 2005   }
 2006 
 2007   // we have 32 int registers * 2 halves
 2008   int slots_of_int_registers = RegisterImpl::max_slots_per_register * RegisterImpl::number_of_registers;

 2009 
 2010   if (reg < slots_of_int_registers) {
 2011     return rc_int;
 2012   }
 2013 
 2014   // we have 32 float register * 8 halves
 2015   int slots_of_float_registers = FloatRegisterImpl::max_slots_per_register * FloatRegisterImpl::number_of_registers;
 2016   if (reg < slots_of_int_registers + slots_of_float_registers) {
 2017     return rc_float;
 2018   }
 2019 
 2020   int slots_of_predicate_registers = PRegisterImpl::max_slots_per_register * PRegisterImpl::number_of_registers;
 2021   if (reg < slots_of_int_registers + slots_of_float_registers + slots_of_predicate_registers) {
 2022     return rc_predicate;
 2023   }
 2024 
 2025   // Between predicate regs & stack is the flags.
 2026   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
 2027 
 2028   return rc_stack;
 2029 }
 2030 
 2031 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
 2032   Compile* C = ra_->C;
 2033 
 2034   // Get registers to move.
 2035   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
 2036   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
 2037   OptoReg::Name dst_hi = ra_->get_reg_second(this);
 2038   OptoReg::Name dst_lo = ra_->get_reg_first(this);
 2039 
 2040   enum RC src_hi_rc = rc_class(src_hi);
 2041   enum RC src_lo_rc = rc_class(src_lo);
 2042   enum RC dst_hi_rc = rc_class(dst_hi);
 2043   enum RC dst_lo_rc = rc_class(dst_lo);
 2044 
 2045   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
 2046 
 2047   if (src_hi != OptoReg::Bad) {
 2048     assert((src_lo&1)==0 && src_lo+1==src_hi &&
 2049            (dst_lo&1)==0 && dst_lo+1==dst_hi,
 2050            "expected aligned-adjacent pairs");
 2051   }
 2052 
 2053   if (src_lo == dst_lo && src_hi == dst_hi) {
 2054     return 0;            // Self copy, no move.
 2055   }
 2056 
 2057   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
 2058               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
 2059   int src_offset = ra_->reg2offset(src_lo);
 2060   int dst_offset = ra_->reg2offset(dst_lo);
 2061 
 2062   if (bottom_type()->isa_vect() != NULL) {
 2063     uint ireg = ideal_reg();
 2064     if (ireg == Op_VecA && cbuf) {
 2065       Unimplemented();
 2066     } else if (cbuf) {
 2067       assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
 2068       C2_MacroAssembler _masm(cbuf);
 2069       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
 2070       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 2071         // stack->stack
 2072         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
 2073         if (ireg == Op_VecD) {
 2074           __ unspill(rscratch1, true, src_offset);
 2075           __ spill(rscratch1, true, dst_offset);
 2076         } else {
 2077           __ spill_copy128(src_offset, dst_offset);
 2078         }
 2079       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 2080         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2081                ireg == Op_VecD ? __ T8B : __ T16B,
 2082                as_FloatRegister(Matcher::_regEncode[src_lo]));
 2083       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 2084         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 2085                  ireg == Op_VecD ? __ D : __ Q,
 2086                  ra_->reg2offset(dst_lo));
 2087       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 2088         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2089                    ireg == Op_VecD ? __ D : __ Q,
 2090                    ra_->reg2offset(src_lo));
 2091       } else {
 2092         ShouldNotReachHere();
 2093       }
 2094     }
 2095   } else if (cbuf) {
 2096     C2_MacroAssembler _masm(cbuf);
 2097     switch (src_lo_rc) {
 2098     case rc_int:
 2099       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
 2100         if (is64) {
 2101             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
 2102                    as_Register(Matcher::_regEncode[src_lo]));
 2103         } else {
 2104             C2_MacroAssembler _masm(cbuf);
 2105             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
 2106                     as_Register(Matcher::_regEncode[src_lo]));
 2107         }
 2108       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
 2109         if (is64) {
 2110             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),

 2155       break;
 2156     default:
 2157       assert(false, "bad rc_class for spill");
 2158       ShouldNotReachHere();
 2159     }
 2160   }
 2161 
 2162   if (st) {
 2163     st->print("spill ");
 2164     if (src_lo_rc == rc_stack) {
 2165       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
 2166     } else {
 2167       st->print("%s -> ", Matcher::regName[src_lo]);
 2168     }
 2169     if (dst_lo_rc == rc_stack) {
 2170       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
 2171     } else {
 2172       st->print("%s", Matcher::regName[dst_lo]);
 2173     }
 2174     if (bottom_type()->isa_vect() != NULL) {
 2175       int vsize = 0;
 2176       switch (ideal_reg()) {
 2177       case Op_VecD:
 2178         vsize = 64;
 2179         break;
 2180       case Op_VecX:
 2181         vsize = 128;
 2182         break;
 2183       case Op_VecA:
 2184         vsize = Matcher::scalable_vector_reg_size(T_BYTE) * 8;
 2185         break;
 2186       default:
 2187         assert(false, "bad register type for spill");
 2188         ShouldNotReachHere();
 2189       }
 2190       st->print("\t# vector spill size = %d", vsize);
 2191     } else {
 2192       st->print("\t# spill size = %d", is64 ? 64 : 32);
 2193     }
 2194   }
 2195 
 2196   return 0;
 2197 
 2198 }
 2199 
 2200 #ifndef PRODUCT
 2201 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2202   if (!ra_)
 2203     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
 2204   else
 2205     implementation(NULL, ra_, false, st);
 2206 }
 2207 #endif
 2208 
 2209 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 2210   implementation(&cbuf, ra_, false, NULL);
 2211 }
 2212 

 2356 // other intrinsics guarded with vector length (vlen) and element type (bt).
 2357 const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
 2358   if (!match_rule_supported(opcode)) {
 2359     return false;
 2360   }
 2361 
 2362   // Special cases which require vector length
 2363   switch (opcode) {
 2364     case Op_MulAddVS2VI: {
 2365       if (vlen != 4) {
 2366         return false;
 2367       }
 2368       break;
 2369     }
 2370   }
 2371 
 2372   return true; // Per default match rules are supported.
 2373 }
 2374 
 2375 const bool Matcher::has_predicated_vectors(void) {
 2376   return UseSVE > 0;
 2377 }
 2378 
 2379 const int Matcher::float_pressure(int default_pressure_threshold) {
 2380   return default_pressure_threshold;
 2381 }
 2382 
 2383 int Matcher::regnum_to_fpu_offset(int regnum)
 2384 {
 2385   Unimplemented();
 2386   return 0;
 2387 }
 2388 
 2389 // Is this branch offset short enough that a short branch can be used?
 2390 //
 2391 // NOTE: If the platform does not provide any short branch variants, then
 2392 //       this method should return false for offset 0.
 2393 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
 2394   // The passed offset is relative to address of the branch.
 2395 
 2396   return (-32768 <= offset && offset < 32768);
 2397 }
 2398 
 2399 const bool Matcher::isSimpleConstant64(jlong value) {
 2400   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
 2401   // Probably always true, even if a temp register is required.
 2402   return true;
 2403 }
 2404 
 2405 // true just means we have fast l2f conversion
 2406 const bool Matcher::convL2FSupported(void) {
 2407   return true;
 2408 }
 2409 
 2410 // Vector width in bytes.
 2411 const int Matcher::vector_width_in_bytes(BasicType bt) {
 2412   int size = MIN2(16, (int)MaxVectorSize);
 2413   // Minimum 2 values in vector
 2414   if (size < 2*type2aelembytes(bt)) size = 0;
 2415   // But never < 4
 2416   if (size < 4) size = 0;
 2417   return size;
 2418 }
 2419 
 2420 // Limits on vector size (number of elements) loaded into vector.
 2421 const int Matcher::max_vector_size(const BasicType bt) {
 2422   return vector_width_in_bytes(bt)/type2aelembytes(bt);
 2423 }
 2424 const int Matcher::min_vector_size(const BasicType bt) {
 2425   int max_size = max_vector_size(bt);
 2426   if ((UseSVE > 0) && (MaxVectorSize >= 16)) {
 2427     // Currently vector length less than SVE vector register size is not supported.
 2428     return max_size;
 2429   } else {
 2430     //  For the moment limit the vector size to 8 bytes with NEON.
 2431     int size = 8 / type2aelembytes(bt);
 2432     if (size < 2) size = 2;
 2433     return size;
 2434   }
 2435 }
 2436 
 2437 const bool Matcher::supports_scalable_vector() {
 2438   return UseSVE > 0;
 2439 }
 2440 
 2441 // Actual max scalable vector register length.
 2442 const int Matcher::scalable_vector_reg_size(const BasicType bt) {
 2443   return Matcher::max_vector_size(bt);
 2444 }
 2445 
 2446 // Vector ideal reg.
 2447 const uint Matcher::vector_ideal_reg(int len) {
 2448   if (UseSVE > 0 && 16 <= len && len <= 256) {
 2449     return Op_VecA;
 2450   }
 2451   switch(len) {
 2452     case  8: return Op_VecD;
 2453     case 16: return Op_VecX;
 2454   }
 2455   ShouldNotReachHere();
 2456   return 0;
 2457 }
 2458 
 2459 // AES support not yet implemented
 2460 const bool Matcher::pass_original_key_for_aes() {
 2461   return false;
 2462 }
 2463 
 2464 // aarch64 supports misaligned vectors store/load.
 2465 const bool Matcher::misaligned_vectors_ok() {
 2466   return true;
 2467 }
 2468 
 2469 // false => size gets scaled to BytesPerLong, ok.
 2470 const bool Matcher::init_array_count_is_in_bytes = false;

 5135   constraint(ALLOC_IN_RC(float_reg));
 5136   match(RegF);
 5137 
 5138   op_cost(0);
 5139   format %{ %}
 5140   interface(REG_INTER);
 5141 %}
 5142 
 5143 // Double Register
 5144 // Double register operands
 5145 operand vRegD()
 5146 %{
 5147   constraint(ALLOC_IN_RC(double_reg));
 5148   match(RegD);
 5149 
 5150   op_cost(0);
 5151   format %{ %}
 5152   interface(REG_INTER);
 5153 %}
 5154 
 5155 operand vecA()
 5156 %{
 5157   constraint(ALLOC_IN_RC(vectora_reg));
 5158   match(VecA);
 5159   op_cost(0);
 5160   format %{ %}
 5161   interface(REG_INTER);
 5162 %}
 5163 
 5164 operand vecD()
 5165 %{
 5166   constraint(ALLOC_IN_RC(vectord_reg));
 5167   match(VecD);
 5168 
 5169   op_cost(0);
 5170   format %{ %}
 5171   interface(REG_INTER);
 5172 %}
 5173 
 5174 operand vecX()
 5175 %{
 5176   constraint(ALLOC_IN_RC(vectorx_reg));
 5177   match(VecX);
 5178 
 5179   op_cost(0);
 5180   format %{ %}
 5181   interface(REG_INTER);
 5182 %}
 5183 

 5452 %}
 5453 
 5454 operand vRegD_V30()
 5455 %{
 5456   constraint(ALLOC_IN_RC(v30_reg));
 5457   match(RegD);
 5458   op_cost(0);
 5459   format %{ %}
 5460   interface(REG_INTER);
 5461 %}
 5462 
 5463 operand vRegD_V31()
 5464 %{
 5465   constraint(ALLOC_IN_RC(v31_reg));
 5466   match(RegD);
 5467   op_cost(0);
 5468   format %{ %}
 5469   interface(REG_INTER);
 5470 %}
 5471 
 5472 operand pRegGov()
 5473 %{
 5474   constraint(ALLOC_IN_RC(gov_pr));
 5475   match(RegVMask);
 5476   op_cost(0);
 5477   format %{ %}
 5478   interface(REG_INTER);
 5479 %}
 5480 
 5481 // Flags register, used as output of signed compare instructions
 5482 
 5483 // note that on AArch64 we also use this register as the output for
 5484 // for floating point compare instructions (CmpF CmpD). this ensures
 5485 // that ordered inequality tests use GT, GE, LT or LE none of which
 5486 // pass through cases where the result is unordered i.e. one or both
 5487 // inputs to the compare is a NaN. this means that the ideal code can
 5488 // replace e.g. a GT with an LE and not end up capturing the NaN case
 5489 // (where the comparison should always fail). EQ and NE tests are
 5490 // always generated in ideal code so that unordered folds into the NE
 5491 // case, matching the behaviour of AArch64 NE.
 5492 //
 5493 // This differs from x86 where the outputs of FP compares use a
 5494 // special FP flags registers and where compares based on this
 5495 // register are distinguished into ordered inequalities (cmpOpUCF) and
 5496 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
 5497 // to explicitly handle the unordered case in branches. x86 also has
 5498 // to include extra CMoveX rules to accept a cmpOpUCF input.
 5499 
 5500 operand rFlagsReg()
< prev index next >