1 //
   2 // Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
   3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4 //
   5 // This code is free software; you can redistribute it and/or modify it
   6 // under the terms of the GNU General Public License version 2 only, as
   7 // published by the Free Software Foundation.
   8 //
   9 // This code is distributed in the hope that it will be useful, but WITHOUT
  10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12 // version 2 for more details (a copy is included in the LICENSE file that
  13 // accompanied this code).
  14 //
  15 // You should have received a copy of the GNU General Public License version
  16 // 2 along with this work; if not, write to the Free Software Foundation,
  17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18 //
  19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20 // or visit www.oracle.com if you need additional information or have any
  21 // questions.
  22 //
  23 
  24 // ARM Architecture Description File
  25 
  26 //----------DEFINITION BLOCK---------------------------------------------------
  27 // Define name --> value mappings to inform the ADLC of an integer valued name
  28 // Current support includes integer values in the range [0, 0x7FFFFFFF]
  29 // Format:
  30 //        int_def  <name>         ( <int_value>, <expression>);
  31 // Generated Code in ad_<arch>.hpp
  32 //        #define  <name>   (<expression>)
  33 //        // value == <int_value>
  34 // Generated code in ad_<arch>.cpp adlc_verification()
  35 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
  36 //
  37 definitions %{
  38 // The default cost (of an ALU instruction).
  39   int_def DEFAULT_COST      (    100,     100);
  40   int_def HUGE_COST         (1000000, 1000000);
  41 
  42 // Memory refs are twice as expensive as run-of-the-mill.
  43   int_def MEMORY_REF_COST   (    200, DEFAULT_COST * 2);
  44 
  45 // Branches are even more expensive.
  46   int_def BRANCH_COST       (    300, DEFAULT_COST * 3);
  47   int_def CALL_COST         (    300, DEFAULT_COST * 3);
  48 %}
  49 
  50 
  51 //----------SOURCE BLOCK-------------------------------------------------------
  52 // This is a block of C++ code which provides values, functions, and
  53 // definitions necessary in the rest of the architecture description
  54 source_hpp %{
  55 // Header information of the source block.
  56 // Method declarations/definitions which are used outside
  57 // the ad-scope can conveniently be defined here.
  58 //
  59 // To keep related declarations/definitions/uses close together,
  60 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
  61 
  62 // Does destination need to be loaded in a register then passed to a
  63 // branch instruction?
  64 extern bool maybe_far_call(const CallNode *n);
  65 extern bool maybe_far_call(const MachCallNode *n);
  66 static inline bool cache_reachable() {
  67   return MacroAssembler::_cache_fully_reachable();
  68 }
  69 
  70 #ifdef AARCH64
  71 #define ldr_32 ldr_w
  72 #define str_32 str_w
  73 #else
  74 #define ldr_32 ldr
  75 #define str_32 str
  76 #define tst_32 tst
  77 #define teq_32 teq
  78 #endif
  79 #if 1
  80 extern bool PrintOptoAssembly;
  81 #endif
  82 
  83 class c2 {
  84 public:
  85   static OptoRegPair return_value(int ideal_reg);
  86 };
  87 
  88 class CallStubImpl {
  89 
  90   //--------------------------------------------------------------
  91   //---<  Used for optimization in Compile::Shorten_branches  >---
  92   //--------------------------------------------------------------
  93 
  94  public:
  95   // Size of call trampoline stub.
  96   static uint size_call_trampoline() {
  97     return 0; // no call trampolines on this platform
  98   }
  99 
 100   // number of relocations needed by a call trampoline stub
 101   static uint reloc_call_trampoline() {
 102     return 0; // no call trampolines on this platform
 103   }
 104 };
 105 
 106 class HandlerImpl {
 107 
 108  public:
 109 
 110   static int emit_exception_handler(CodeBuffer &cbuf);
 111   static int emit_deopt_handler(CodeBuffer& cbuf);
 112 
 113   static uint size_exception_handler() {
 114 #ifdef AARCH64
 115     // ldr_literal; br; (pad); <literal>
 116     return 3 * Assembler::InstructionSize + wordSize;
 117 #else
 118     return ( 3 * 4 );
 119 #endif
 120   }
 121 
 122 
 123   static uint size_deopt_handler() {
 124     return ( 9 * 4 );
 125   }
 126 
 127 };
 128 
 129 %}
 130 
 131 source %{
 132 #define __ _masm.
 133 
 134 static FloatRegister reg_to_FloatRegister_object(int register_encoding);
 135 static Register reg_to_register_object(int register_encoding);
 136 
 137 
 138 // ****************************************************************************
 139 
 140 // REQUIRED FUNCTIONALITY
 141 
 142 // Indicate if the safepoint node needs the polling page as an input.
 143 // Since ARM does not have absolute addressing, it does.
 144 bool SafePointNode::needs_polling_address_input() {
 145   return true;
 146 }
 147 
 148 // emit an interrupt that is caught by the debugger (for debugging compiler)
 149 void emit_break(CodeBuffer &cbuf) {
 150   MacroAssembler _masm(&cbuf);
 151   __ breakpoint();
 152 }
 153 
 154 #ifndef PRODUCT
 155 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream *st ) const {
 156   st->print("TA");
 157 }
 158 #endif
 159 
 160 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 161   emit_break(cbuf);
 162 }
 163 
 164 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
 165   return MachNode::size(ra_);
 166 }
 167 
 168 
 169 void emit_nop(CodeBuffer &cbuf) {
 170   MacroAssembler _masm(&cbuf);
 171   __ nop();
 172 }
 173 
 174 
 175 void emit_call_reloc(CodeBuffer &cbuf, const MachCallNode *n, MachOper *m, RelocationHolder const& rspec) {
 176   int ret_addr_offset0 = n->as_MachCall()->ret_addr_offset();
 177   int call_site_offset = cbuf.insts()->mark_off();
 178   MacroAssembler _masm(&cbuf);
 179   __ set_inst_mark(); // needed in emit_to_interp_stub() to locate the call
 180   address target = (address)m->method();
 181   assert(n->as_MachCall()->entry_point() == target, "sanity");
 182   assert(maybe_far_call(n) == !__ reachable_from_cache(target), "sanity");
 183   assert(cache_reachable() == __ cache_fully_reachable(), "sanity");
 184 
 185   assert(target != NULL, "need real address");
 186 
 187   int ret_addr_offset = -1;
 188   if (rspec.type() == relocInfo::runtime_call_type) {
 189     __ call(target, rspec);
 190     ret_addr_offset = __ offset();
 191   } else {
 192     // scratches Rtemp
 193     ret_addr_offset = __ patchable_call(target, rspec, true);
 194   }
 195   assert(ret_addr_offset - call_site_offset == ret_addr_offset0, "fix ret_addr_offset()");
 196 }
 197 
 198 //=============================================================================
 199 // REQUIRED FUNCTIONALITY for encoding
 200 void emit_lo(CodeBuffer &cbuf, int val) {  }
 201 void emit_hi(CodeBuffer &cbuf, int val) {  }
 202 
 203 
 204 //=============================================================================
 205 const RegMask& MachConstantBaseNode::_out_RegMask = PTR_REG_mask();
 206 
 207 int Compile::ConstantTable::calculate_table_base_offset() const {
 208 #ifdef AARCH64
 209   return 0;
 210 #else
 211   int offset = -(size() / 2);
 212   // flds, fldd: 8-bit  offset multiplied by 4: +/- 1024
 213   // ldr, ldrb : 12-bit offset:                 +/- 4096
 214   if (!Assembler::is_simm10(offset)) {
 215     offset = Assembler::min_simm10();
 216   }
 217   return offset;
 218 #endif
 219 }
 220 
 221 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
 222 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
 223   ShouldNotReachHere();
 224 }
 225 
 226 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
 227   Compile* C = ra_->C;
 228   Compile::ConstantTable& constant_table = C->constant_table();
 229   MacroAssembler _masm(&cbuf);
 230 
 231   Register r = as_Register(ra_->get_encode(this));
 232   CodeSection* consts_section = __ code()->consts();
 233   int consts_size = consts_section->align_at_start(consts_section->size());
 234   assert(constant_table.size() == consts_size, "must be: %d == %d", constant_table.size(), consts_size);
 235 
 236   // Materialize the constant table base.
 237   address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
 238   RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
 239   __ mov_address(r, baseaddr, rspec);
 240 }
 241 
 242 uint MachConstantBaseNode::size(PhaseRegAlloc*) const {
 243 #ifdef AARCH64
 244   return 5 * Assembler::InstructionSize;
 245 #else
 246   return 8;
 247 #endif
 248 }
 249 
 250 #ifndef PRODUCT
 251 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
 252   char reg[128];
 253   ra_->dump_register(this, reg);
 254   st->print("MOV_SLOW    &constanttable,%s\t! constant table base", reg);
 255 }
 256 #endif
 257 
 258 #ifndef PRODUCT
 259 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
 260   Compile* C = ra_->C;
 261 
 262   for (int i = 0; i < OptoPrologueNops; i++) {
 263     st->print_cr("NOP"); st->print("\t");
 264   }
 265 #ifdef AARCH64
 266   if (OptoPrologueNops <= 0) {
 267     st->print_cr("NOP\t! required for safe patching");
 268     st->print("\t");
 269   }
 270 #endif
 271 
 272   size_t framesize = C->frame_size_in_bytes();
 273   assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
 274   int bangsize = C->bang_size_in_bytes();
 275   // Remove two words for return addr and rbp,
 276   framesize -= 2*wordSize;
 277   bangsize -= 2*wordSize;
 278 
 279   // Calls to C2R adapters often do not accept exceptional returns.
 280   // We require that their callers must bang for them.  But be careful, because
 281   // some VM calls (such as call site linkage) can use several kilobytes of
 282   // stack.  But the stack safety zone should account for that.
 283   // See bugs 4446381, 4468289, 4497237.
 284   if (C->need_stack_bang(bangsize)) {
 285     st->print_cr("! stack bang (%d bytes)", bangsize); st->print("\t");
 286   }
 287   st->print_cr("PUSH   R_FP|R_LR_LR"); st->print("\t");
 288   if (framesize != 0) {
 289     st->print   ("SUB    R_SP, R_SP, " SIZE_FORMAT,framesize);
 290   }
 291 }
 292 #endif
 293 
 294 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 295   Compile* C = ra_->C;
 296   MacroAssembler _masm(&cbuf);
 297 
 298   for (int i = 0; i < OptoPrologueNops; i++) {
 299     __ nop();
 300   }
 301 #ifdef AARCH64
 302   if (OptoPrologueNops <= 0) {
 303     __ nop(); // required for safe patching by patch_verified_entry()
 304   }
 305 #endif
 306 
 307   size_t framesize = C->frame_size_in_bytes();
 308   assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
 309   int bangsize = C->bang_size_in_bytes();
 310   // Remove two words for return addr and fp,
 311   framesize -= 2*wordSize;
 312   bangsize -= 2*wordSize;
 313 
 314   // Calls to C2R adapters often do not accept exceptional returns.
 315   // We require that their callers must bang for them.  But be careful, because
 316   // some VM calls (such as call site linkage) can use several kilobytes of
 317   // stack.  But the stack safety zone should account for that.
 318   // See bugs 4446381, 4468289, 4497237.
 319   if (C->need_stack_bang(bangsize)) {
 320     __ arm_stack_overflow_check(bangsize, Rtemp);
 321   }
 322 
 323   __ raw_push(FP, LR);
 324   if (framesize != 0) {
 325     __ sub_slow(SP, SP, framesize);
 326   }
 327 
 328   // offset from scratch buffer is not valid
 329   if (strcmp(cbuf.name(), "Compile::Fill_buffer") == 0) {
 330     C->set_frame_complete( __ offset() );
 331   }
 332 
 333   if (C->has_mach_constant_base_node()) {
 334     // NOTE: We set the table base offset here because users might be
 335     // emitted before MachConstantBaseNode.
 336     Compile::ConstantTable& constant_table = C->constant_table();
 337     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
 338   }
 339 }
 340 
 341 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
 342   return MachNode::size(ra_);
 343 }
 344 
 345 int MachPrologNode::reloc() const {
 346   return 10; // a large enough number
 347 }
 348 
 349 //=============================================================================
 350 #ifndef PRODUCT
 351 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
 352   Compile* C = ra_->C;
 353 
 354   size_t framesize = C->frame_size_in_bytes();
 355   framesize -= 2*wordSize;
 356 
 357   if (framesize != 0) {
 358     st->print("ADD    R_SP, R_SP, " SIZE_FORMAT "\n\t",framesize);
 359   }
 360   st->print("POP    R_FP|R_LR_LR");
 361 
 362   if (do_polling() && ra_->C->is_method_compilation()) {
 363     st->print("\n\t");
 364 #ifdef AARCH64
 365     if (MacroAssembler::page_reachable_from_cache(os::get_polling_page())) {
 366       st->print("ADRP     Rtemp, #PollAddr\t! Load Polling address\n\t");
 367       st->print("LDR      ZR,[Rtemp + #PollAddr & 0xfff]\t!Poll for Safepointing");
 368     } else {
 369       st->print("mov_slow Rtemp, #PollAddr\t! Load Polling address\n\t");
 370       st->print("LDR      ZR,[Rtemp]\t!Poll for Safepointing");
 371     }
 372 #else
 373     st->print("MOV    Rtemp, #PollAddr\t! Load Polling address\n\t");
 374     st->print("LDR    Rtemp,[Rtemp]\t!Poll for Safepointing");
 375 #endif
 376   }
 377 }
 378 #endif
 379 
 380 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 381   MacroAssembler _masm(&cbuf);
 382   Compile* C = ra_->C;
 383 
 384   size_t framesize = C->frame_size_in_bytes();
 385   framesize -= 2*wordSize;
 386   if (framesize != 0) {
 387     __ add_slow(SP, SP, framesize);
 388   }
 389   __ raw_pop(FP, LR);
 390 
 391   // If this does safepoint polling, then do it here
 392   if (do_polling() && ra_->C->is_method_compilation()) {
 393 #ifdef AARCH64
 394     if (false && MacroAssembler::page_reachable_from_cache(os::get_polling_page())) {
 395 /* FIXME: TODO
 396       __ relocate(relocInfo::xxx);
 397       __ adrp(Rtemp, (intptr_t)os::get_polling_page());
 398       __ relocate(relocInfo::poll_return_type);
 399       int offset = os::get_polling_page() & 0xfff;
 400       __ ldr(ZR, Address(Rtemp + offset));
 401 */
 402     } else {
 403       __ mov_address(Rtemp, (address)os::get_polling_page(), symbolic_Relocation::polling_page_reference);
 404       __ relocate(relocInfo::poll_return_type);
 405       __ ldr(ZR, Address(Rtemp));
 406     }
 407 #else
 408     // mov_slow here is usually one or two instruction
 409     __ mov_address(Rtemp, (address)os::get_polling_page(), symbolic_Relocation::polling_page_reference);
 410     __ relocate(relocInfo::poll_return_type);
 411     __ ldr(Rtemp, Address(Rtemp));
 412 #endif
 413   }
 414 }
 415 
 416 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 417 #ifdef AARCH64
 418   // allow for added alignment nop from mov_address bind_literal
 419   return MachNode::size(ra_) + 1 * Assembler::InstructionSize;
 420 #else
 421   return MachNode::size(ra_);
 422 #endif
 423 }
 424 
 425 int MachEpilogNode::reloc() const {
 426   return 16; // a large enough number
 427 }
 428 
 429 const Pipeline * MachEpilogNode::pipeline() const {
 430   return MachNode::pipeline_class();
 431 }
 432 
 433 int MachEpilogNode::safepoint_offset() const {
 434   assert( do_polling(), "no return for this epilog node");
 435   //  return MacroAssembler::size_of_sethi(os::get_polling_page());
 436   Unimplemented();
 437   return 0;
 438 }
 439 
 440 //=============================================================================
 441 
 442 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
 443 enum RC { rc_bad, rc_int, rc_float, rc_stack };
 444 static enum RC rc_class( OptoReg::Name reg ) {
 445   if (!OptoReg::is_valid(reg)) return rc_bad;
 446   if (OptoReg::is_stack(reg)) return rc_stack;
 447   VMReg r = OptoReg::as_VMReg(reg);
 448   if (r->is_Register()) return rc_int;
 449   assert(r->is_FloatRegister(), "must be");
 450   return rc_float;
 451 }
 452 
 453 static inline bool is_iRegLd_memhd(OptoReg::Name src_first, OptoReg::Name src_second, int offset) {
 454 #ifdef AARCH64
 455   return is_memoryHD(offset);
 456 #else
 457   int rlo = Matcher::_regEncode[src_first];
 458   int rhi = Matcher::_regEncode[src_second];
 459   if (!((rlo&1)==0 && (rlo+1 == rhi))) {
 460     tty->print_cr("CAUGHT BAD LDRD/STRD");
 461   }
 462   return (rlo&1)==0 && (rlo+1 == rhi) && is_memoryHD(offset);
 463 #endif
 464 }
 465 
 466 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
 467                                         PhaseRegAlloc *ra_,
 468                                         bool do_size,
 469                                         outputStream* st ) const {
 470   // Get registers to move
 471   OptoReg::Name src_second = ra_->get_reg_second(in(1));
 472   OptoReg::Name src_first = ra_->get_reg_first(in(1));
 473   OptoReg::Name dst_second = ra_->get_reg_second(this );
 474   OptoReg::Name dst_first = ra_->get_reg_first(this );
 475 
 476   enum RC src_second_rc = rc_class(src_second);
 477   enum RC src_first_rc = rc_class(src_first);
 478   enum RC dst_second_rc = rc_class(dst_second);
 479   enum RC dst_first_rc = rc_class(dst_first);
 480 
 481   assert( OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
 482 
 483   // Generate spill code!
 484   int size = 0;
 485 
 486   if (src_first == dst_first && src_second == dst_second)
 487     return size;            // Self copy, no move
 488 
 489 #ifdef TODO
 490   if (bottom_type()->isa_vect() != NULL) {
 491   }
 492 #endif
 493 
 494   // Shared code does not expect instruction set capability based bailouts here.
 495   // Handle offset unreachable bailout with minimal change in shared code.
 496   // Bailout only for real instruction emit.
 497   // This requires a single comment change in shared code. ( see output.cpp "Normal" instruction case )
 498 
 499   MacroAssembler _masm(cbuf);
 500 
 501   // --------------------------------------
 502   // Check for mem-mem move.  Load into unused float registers and fall into
 503   // the float-store case.
 504   if (src_first_rc == rc_stack && dst_first_rc == rc_stack) {
 505     int offset = ra_->reg2offset(src_first);
 506     if (cbuf && !is_memoryfp(offset)) {
 507       ra_->C->record_method_not_compilable("unable to handle large constant offsets");
 508       return 0;
 509     } else {
 510       if (src_second_rc != rc_bad) {
 511         assert((src_first&1)==0 && src_first+1 == src_second, "pair of registers must be aligned/contiguous");
 512         src_first     = OptoReg::Name(R_mem_copy_lo_num);
 513         src_second    = OptoReg::Name(R_mem_copy_hi_num);
 514         src_first_rc  = rc_float;
 515         src_second_rc = rc_float;
 516         if (cbuf) {
 517           __ ldr_double(Rmemcopy, Address(SP, offset));
 518         } else if (!do_size) {
 519           st->print(LDR_DOUBLE "   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(src_first),offset);
 520         }
 521       } else {
 522         src_first     = OptoReg::Name(R_mem_copy_lo_num);
 523         src_first_rc  = rc_float;
 524         if (cbuf) {
 525           __ ldr_float(Rmemcopy, Address(SP, offset));
 526         } else if (!do_size) {
 527           st->print(LDR_FLOAT "   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(src_first),offset);
 528         }
 529       }
 530       size += 4;
 531     }
 532   }
 533 
 534   if (src_second_rc == rc_stack && dst_second_rc == rc_stack) {
 535     Unimplemented();
 536   }
 537 
 538   // --------------------------------------
 539   // Check for integer reg-reg copy
 540   if (src_first_rc == rc_int && dst_first_rc == rc_int) {
 541     // Else normal reg-reg copy
 542     assert( src_second != dst_first, "smashed second before evacuating it" );
 543     if (cbuf) {
 544       __ mov(reg_to_register_object(Matcher::_regEncode[dst_first]), reg_to_register_object(Matcher::_regEncode[src_first]));
 545 #ifndef PRODUCT
 546     } else if (!do_size) {
 547       st->print("MOV    R_%s, R_%s\t# spill",
 548                 Matcher::regName[dst_first],
 549                 Matcher::regName[src_first]);
 550 #endif
 551     }
 552 #ifdef AARCH64
 553     if (src_first+1 == src_second && dst_first+1 == dst_second) {
 554       return size + 4;
 555     }
 556 #endif
 557     size += 4;
 558   }
 559 
 560   // Check for integer store
 561   if (src_first_rc == rc_int && dst_first_rc == rc_stack) {
 562     int offset = ra_->reg2offset(dst_first);
 563     if (cbuf && !is_memoryI(offset)) {
 564       ra_->C->record_method_not_compilable("unable to handle large constant offsets");
 565       return 0;
 566     } else {
 567       if (src_second_rc != rc_bad && is_iRegLd_memhd(src_first, src_second, offset)) {
 568         assert((src_first&1)==0 && src_first+1 == src_second, "pair of registers must be aligned/contiguous");
 569         if (cbuf) {
 570           __ str_64(reg_to_register_object(Matcher::_regEncode[src_first]), Address(SP, offset));
 571 #ifndef PRODUCT
 572         } else if (!do_size) {
 573           if (size != 0) st->print("\n\t");
 574           st->print(STR_64 "   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(src_first), offset);
 575 #endif
 576         }
 577         return size + 4;
 578       } else {
 579         if (cbuf) {
 580           __ str_32(reg_to_register_object(Matcher::_regEncode[src_first]), Address(SP, offset));
 581 #ifndef PRODUCT
 582         } else if (!do_size) {
 583           if (size != 0) st->print("\n\t");
 584           st->print(STR_32 "   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(src_first), offset);
 585 #endif
 586         }
 587       }
 588     }
 589     size += 4;
 590   }
 591 
 592   // Check for integer load
 593   if (dst_first_rc == rc_int && src_first_rc == rc_stack) {
 594     int offset = ra_->reg2offset(src_first);
 595     if (cbuf && !is_memoryI(offset)) {
 596       ra_->C->record_method_not_compilable("unable to handle large constant offsets");
 597       return 0;
 598     } else {
 599       if (src_second_rc != rc_bad && is_iRegLd_memhd(dst_first, dst_second, offset)) {
 600         assert((src_first&1)==0 && src_first+1 == src_second, "pair of registers must be aligned/contiguous");
 601         if (cbuf) {
 602           __ ldr_64(reg_to_register_object(Matcher::_regEncode[dst_first]), Address(SP, offset));
 603 #ifndef PRODUCT
 604         } else if (!do_size) {
 605           if (size != 0) st->print("\n\t");
 606           st->print(LDR_64 "   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(dst_first), offset);
 607 #endif
 608         }
 609         return size + 4;
 610       } else {
 611         if (cbuf) {
 612           __ ldr_32(reg_to_register_object(Matcher::_regEncode[dst_first]), Address(SP, offset));
 613 #ifndef PRODUCT
 614         } else if (!do_size) {
 615           if (size != 0) st->print("\n\t");
 616           st->print(LDR_32 "   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(dst_first), offset);
 617 #endif
 618         }
 619       }
 620     }
 621     size += 4;
 622   }
 623 
 624   // Check for float reg-reg copy
 625   if (src_first_rc == rc_float && dst_first_rc == rc_float) {
 626     if (src_second_rc != rc_bad) {
 627       assert((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second, "pairs of registers must be aligned/contiguous");
 628       if (cbuf) {
 629       __ mov_double(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), reg_to_FloatRegister_object(Matcher::_regEncode[src_first]));
 630 #ifndef PRODUCT
 631       } else if (!do_size) {
 632         st->print(MOV_DOUBLE "    R_%s, R_%s\t# spill",
 633                   Matcher::regName[dst_first],
 634                   Matcher::regName[src_first]);
 635 #endif
 636       }
 637       return 4;
 638     }
 639     if (cbuf) {
 640       __ mov_float(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), reg_to_FloatRegister_object(Matcher::_regEncode[src_first]));
 641 #ifndef PRODUCT
 642     } else if (!do_size) {
 643       st->print(MOV_FLOAT "    R_%s, R_%s\t# spill",
 644                 Matcher::regName[dst_first],
 645                 Matcher::regName[src_first]);
 646 #endif
 647     }
 648     size = 4;
 649   }
 650 
 651   // Check for float store
 652   if (src_first_rc == rc_float && dst_first_rc == rc_stack) {
 653     int offset = ra_->reg2offset(dst_first);
 654     if (cbuf && !is_memoryfp(offset)) {
 655       ra_->C->record_method_not_compilable("unable to handle large constant offsets");
 656       return 0;
 657     } else {
 658       // Further check for aligned-adjacent pair, so we can use a double store
 659       if (src_second_rc != rc_bad) {
 660         assert((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second, "pairs of registers and stack slots must be aligned/contiguous");
 661         if (cbuf) {
 662           __ str_double(reg_to_FloatRegister_object(Matcher::_regEncode[src_first]), Address(SP, offset));
 663 #ifndef PRODUCT
 664         } else if (!do_size) {
 665           if (size != 0) st->print("\n\t");
 666           st->print(STR_DOUBLE "   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(src_first),offset);
 667 #endif
 668         }
 669         return size + 4;
 670       } else {
 671         if (cbuf) {
 672           __ str_float(reg_to_FloatRegister_object(Matcher::_regEncode[src_first]), Address(SP, offset));
 673 #ifndef PRODUCT
 674         } else if (!do_size) {
 675           if (size != 0) st->print("\n\t");
 676           st->print(STR_FLOAT "   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(src_first),offset);
 677 #endif
 678         }
 679       }
 680     }
 681     size += 4;
 682   }
 683 
 684   // Check for float load
 685   if (dst_first_rc == rc_float && src_first_rc == rc_stack) {
 686     int offset = ra_->reg2offset(src_first);
 687     if (cbuf && !is_memoryfp(offset)) {
 688       ra_->C->record_method_not_compilable("unable to handle large constant offsets");
 689       return 0;
 690     } else {
 691       // Further check for aligned-adjacent pair, so we can use a double store
 692       if (src_second_rc != rc_bad) {
 693         assert((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second, "pairs of registers and stack slots must be aligned/contiguous");
 694         if (cbuf) {
 695           __ ldr_double(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), Address(SP, offset));
 696 #ifndef PRODUCT
 697         } else if (!do_size) {
 698           if (size != 0) st->print("\n\t");
 699           st->print(LDR_DOUBLE "   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(dst_first),offset);
 700 #endif
 701         }
 702         return size + 4;
 703       } else {
 704         if (cbuf) {
 705           __ ldr_float(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), Address(SP, offset));
 706 #ifndef PRODUCT
 707         } else if (!do_size) {
 708           if (size != 0) st->print("\n\t");
 709           st->print(LDR_FLOAT "   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(dst_first),offset);
 710 #endif
 711         }
 712       }
 713     }
 714     size += 4;
 715   }
 716 
 717   // check for int reg -> float reg move
 718   if (src_first_rc == rc_int && dst_first_rc == rc_float) {
 719     // Further check for aligned-adjacent pair, so we can use a single instruction
 720     if (src_second_rc != rc_bad) {
 721       assert((dst_first&1)==0 && dst_first+1 == dst_second, "pairs of registers must be aligned/contiguous");
 722       assert((src_first&1)==0 && src_first+1 == src_second, "pairs of registers must be aligned/contiguous");
 723       assert(src_second_rc == rc_int && dst_second_rc == rc_float, "unsupported");
 724       if (cbuf) {
 725 #ifdef AARCH64
 726         __ fmov_dx(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), reg_to_register_object(Matcher::_regEncode[src_first]));
 727 #else
 728         __ fmdrr(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), reg_to_register_object(Matcher::_regEncode[src_first]), reg_to_register_object(Matcher::_regEncode[src_second]));
 729 #endif
 730 #ifndef PRODUCT
 731       } else if (!do_size) {
 732         if (size != 0) st->print("\n\t");
 733 #ifdef AARCH64
 734         st->print("FMOV_DX   R_%s, R_%s\t! spill",OptoReg::regname(dst_first), OptoReg::regname(src_first));
 735 #else
 736         st->print("FMDRR   R_%s, R_%s, R_%s\t! spill",OptoReg::regname(dst_first), OptoReg::regname(src_first), OptoReg::regname(src_second));
 737 #endif
 738 #endif
 739       }
 740       return size + 4;
 741     } else {
 742       if (cbuf) {
 743         __ fmsr(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), reg_to_register_object(Matcher::_regEncode[src_first]));
 744 #ifndef PRODUCT
 745       } else if (!do_size) {
 746         if (size != 0) st->print("\n\t");
 747         st->print(FMSR "   R_%s, R_%s\t! spill",OptoReg::regname(dst_first), OptoReg::regname(src_first));
 748 #endif
 749       }
 750       size += 4;
 751     }
 752   }
 753 
 754   // check for float reg -> int reg move
 755   if (src_first_rc == rc_float && dst_first_rc == rc_int) {
 756     // Further check for aligned-adjacent pair, so we can use a single instruction
 757     if (src_second_rc != rc_bad) {
 758       assert((src_first&1)==0 && src_first+1 == src_second, "pairs of registers must be aligned/contiguous");
 759       assert((dst_first&1)==0 && dst_first+1 == dst_second, "pairs of registers must be aligned/contiguous");
 760       assert(src_second_rc == rc_float && dst_second_rc == rc_int, "unsupported");
 761       if (cbuf) {
 762 #ifdef AARCH64
 763         __ fmov_xd(reg_to_register_object(Matcher::_regEncode[dst_first]), reg_to_FloatRegister_object(Matcher::_regEncode[src_first]));
 764 #else
 765         __ fmrrd(reg_to_register_object(Matcher::_regEncode[dst_first]), reg_to_register_object(Matcher::_regEncode[dst_second]), reg_to_FloatRegister_object(Matcher::_regEncode[src_first]));
 766 #endif
 767 #ifndef PRODUCT
 768       } else if (!do_size) {
 769         if (size != 0) st->print("\n\t");
 770 #ifdef AARCH64
 771         st->print("FMOV_XD R_%s, R_%s\t! spill",OptoReg::regname(dst_first), OptoReg::regname(src_first));
 772 #else
 773         st->print("FMRRD   R_%s, R_%s, R_%s\t! spill",OptoReg::regname(dst_first), OptoReg::regname(dst_second), OptoReg::regname(src_first));
 774 #endif
 775 #endif
 776       }
 777       return size + 4;
 778     } else {
 779       if (cbuf) {
 780         __ fmrs(reg_to_register_object(Matcher::_regEncode[dst_first]), reg_to_FloatRegister_object(Matcher::_regEncode[src_first]));
 781 #ifndef PRODUCT
 782       } else if (!do_size) {
 783         if (size != 0) st->print("\n\t");
 784         st->print(FMRS "   R_%s, R_%s\t! spill",OptoReg::regname(dst_first), OptoReg::regname(src_first));
 785 #endif
 786       }
 787       size += 4;
 788     }
 789   }
 790 
 791   // --------------------------------------------------------------------
 792   // Check for hi bits still needing moving.  Only happens for misaligned
 793   // arguments to native calls.
 794   if (src_second == dst_second)
 795     return size;               // Self copy; no move
 796   assert( src_second_rc != rc_bad && dst_second_rc != rc_bad, "src_second & dst_second cannot be Bad" );
 797 
 798 #ifndef AARCH64
 799   // Check for integer reg-reg copy.  Hi bits are stuck up in the top
 800   // 32-bits of a 64-bit register, but are needed in low bits of another
 801   // register (else it's a hi-bits-to-hi-bits copy which should have
 802   // happened already as part of a 64-bit move)
 803   if (src_second_rc == rc_int && dst_second_rc == rc_int) {
 804     if (cbuf) {
 805       __ mov(reg_to_register_object(Matcher::_regEncode[dst_second]), reg_to_register_object(Matcher::_regEncode[src_second]));
 806 #ifndef PRODUCT
 807     } else if (!do_size) {
 808       if (size != 0) st->print("\n\t");
 809       st->print("MOV    R_%s, R_%s\t# spill high",
 810                 Matcher::regName[dst_second],
 811                 Matcher::regName[src_second]);
 812 #endif
 813     }
 814     return size+4;
 815   }
 816 
 817   // Check for high word integer store
 818   if (src_second_rc == rc_int && dst_second_rc == rc_stack) {
 819     int offset = ra_->reg2offset(dst_second);
 820 
 821     if (cbuf && !is_memoryP(offset)) {
 822       ra_->C->record_method_not_compilable("unable to handle large constant offsets");
 823       return 0;
 824     } else {
 825       if (cbuf) {
 826         __ str(reg_to_register_object(Matcher::_regEncode[src_second]), Address(SP, offset));
 827 #ifndef PRODUCT
 828       } else if (!do_size) {
 829         if (size != 0) st->print("\n\t");
 830         st->print("STR   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(src_second), offset);
 831 #endif
 832       }
 833     }
 834     return size + 4;
 835   }
 836 
 837   // Check for high word integer load
 838   if (dst_second_rc == rc_int && src_second_rc == rc_stack) {
 839     int offset = ra_->reg2offset(src_second);
 840     if (cbuf && !is_memoryP(offset)) {
 841       ra_->C->record_method_not_compilable("unable to handle large constant offsets");
 842       return 0;
 843     } else {
 844       if (cbuf) {
 845         __ ldr(reg_to_register_object(Matcher::_regEncode[dst_second]), Address(SP, offset));
 846 #ifndef PRODUCT
 847       } else if (!do_size) {
 848         if (size != 0) st->print("\n\t");
 849         st->print("LDR   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(dst_second), offset);
 850 #endif
 851       }
 852     }
 853     return size + 4;
 854   }
 855 #endif
 856 
 857   Unimplemented();
 858   return 0; // Mute compiler
 859 }
 860 
 861 #ifndef PRODUCT
 862 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
 863   implementation( NULL, ra_, false, st );
 864 }
 865 #endif
 866 
 867 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 868   implementation( &cbuf, ra_, false, NULL );
 869 }
 870 
 871 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
 872   return implementation( NULL, ra_, true, NULL );
 873 }
 874 
 875 //=============================================================================
 876 #ifndef PRODUCT
 877 void MachNopNode::format( PhaseRegAlloc *, outputStream *st ) const {
 878   st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
 879 }
 880 #endif
 881 
 882 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
 883   MacroAssembler _masm(&cbuf);
 884   for(int i = 0; i < _count; i += 1) {
 885     __ nop();
 886   }
 887 }
 888 
 889 uint MachNopNode::size(PhaseRegAlloc *ra_) const {
 890   return 4 * _count;
 891 }
 892 
 893 
 894 //=============================================================================
 895 #ifndef PRODUCT
 896 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
 897   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 898   int reg = ra_->get_reg_first(this);
 899   st->print("ADD    %s,R_SP+#%d",Matcher::regName[reg], offset);
 900 }
 901 #endif
 902 
 903 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 904   MacroAssembler _masm(&cbuf);
 905   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 906   int reg = ra_->get_encode(this);
 907   Register dst = reg_to_register_object(reg);
 908 
 909   if (is_aimm(offset)) {
 910     __ add(dst, SP, offset);
 911   } else {
 912     __ mov_slow(dst, offset);
 913 #ifdef AARCH64
 914     __ add(dst, SP, dst, ex_lsl);
 915 #else
 916     __ add(dst, SP, dst);
 917 #endif
 918   }
 919 }
 920 
 921 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
 922   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_)
 923   assert(ra_ == ra_->C->regalloc(), "sanity");
 924   return ra_->C->scratch_emit_size(this);
 925 }
 926 
 927 //=============================================================================
 928 #ifndef PRODUCT
 929 #ifdef AARCH64
 930 #define R_RTEMP "R_R16"
 931 #else
 932 #define R_RTEMP "R_R12"
 933 #endif
 934 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
 935   st->print_cr("\nUEP:");
 936   if (UseCompressedClassPointers) {
 937     st->print_cr("\tLDR_w " R_RTEMP ",[R_R0 + oopDesc::klass_offset_in_bytes]\t! Inline cache check");
 938     st->print_cr("\tdecode_klass " R_RTEMP);
 939   } else {
 940     st->print_cr("\tLDR   " R_RTEMP ",[R_R0 + oopDesc::klass_offset_in_bytes]\t! Inline cache check");
 941   }
 942   st->print_cr("\tCMP   " R_RTEMP ",R_R8" );
 943   st->print   ("\tB.NE  SharedRuntime::handle_ic_miss_stub");
 944 }
 945 #endif
 946 
 947 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
 948   MacroAssembler _masm(&cbuf);
 949   Register iCache  = reg_to_register_object(Matcher::inline_cache_reg_encode());
 950   assert(iCache == Ricklass, "should be");
 951   Register receiver = R0;
 952 
 953   __ load_klass(Rtemp, receiver);
 954   __ cmp(Rtemp, iCache);
 955 #ifdef AARCH64
 956   Label match;
 957   __ b(match, eq);
 958   __ jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type, Rtemp);
 959   __ bind(match);
 960 #else
 961   __ jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type, noreg, ne);
 962 #endif
 963 }
 964 
 965 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
 966   return MachNode::size(ra_);
 967 }
 968 
 969 
 970 //=============================================================================
 971 
 972 // Emit exception handler code.
 973 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
 974   MacroAssembler _masm(&cbuf);
 975 
 976   address base = __ start_a_stub(size_exception_handler());
 977   if (base == NULL) {
 978     ciEnv::current()->record_failure("CodeCache is full");
 979     return 0;  // CodeBuffer::expand failed
 980   }
 981 
 982   int offset = __ offset();
 983 
 984   // OK to trash LR, because exception blob will kill it
 985   __ jump(OptoRuntime::exception_blob()->entry_point(), relocInfo::runtime_call_type, LR_tmp);
 986 
 987   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
 988 
 989   __ end_a_stub();
 990 
 991   return offset;
 992 }
 993 
 994 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
 995   // Can't use any of the current frame's registers as we may have deopted
 996   // at a poll and everything can be live.
 997   MacroAssembler _masm(&cbuf);
 998 
 999   address base = __ start_a_stub(size_deopt_handler());
1000   if (base == NULL) {
1001     ciEnv::current()->record_failure("CodeCache is full");
1002     return 0;  // CodeBuffer::expand failed
1003   }
1004 
1005   int offset = __ offset();
1006   address deopt_pc = __ pc();
1007 
1008 #ifdef AARCH64
1009   // See LR saved by caller in sharedRuntime_arm.cpp
1010   // see also hse1 ws
1011   // see also LIR_Assembler::emit_deopt_handler
1012 
1013   __ raw_push(LR, LR); // preserve LR in both slots
1014   __ mov_relative_address(LR, deopt_pc);
1015   __ str(LR, Address(SP, 1 * wordSize)); // save deopt PC
1016   // OK to kill LR, because deopt blob will restore it from SP[0]
1017   __ jump(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type, LR_tmp);
1018 #else
1019   __ sub(SP, SP, wordSize); // make room for saved PC
1020   __ push(LR); // save LR that may be live when we get here
1021   __ mov_relative_address(LR, deopt_pc);
1022   __ str(LR, Address(SP, wordSize)); // save deopt PC
1023   __ pop(LR); // restore LR
1024   __ jump(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type, noreg);
1025 #endif
1026 
1027   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
1028 
1029   __ end_a_stub();
1030   return offset;
1031 }
1032 
1033 const bool Matcher::match_rule_supported(int opcode) {
1034   if (!has_match_rule(opcode))
1035     return false;
1036 
1037   switch (opcode) {
1038   case Op_PopCountI:
1039   case Op_PopCountL:
1040     if (!UsePopCountInstruction)
1041       return false;
1042     break;
1043   case Op_LShiftCntV:
1044   case Op_RShiftCntV:
1045   case Op_AddVB:
1046   case Op_AddVS:
1047   case Op_AddVI:
1048   case Op_AddVL:
1049   case Op_SubVB:
1050   case Op_SubVS:
1051   case Op_SubVI:
1052   case Op_SubVL:
1053   case Op_MulVS:
1054   case Op_MulVI:
1055   case Op_LShiftVB:
1056   case Op_LShiftVS:
1057   case Op_LShiftVI:
1058   case Op_LShiftVL:
1059   case Op_RShiftVB:
1060   case Op_RShiftVS:
1061   case Op_RShiftVI:
1062   case Op_RShiftVL:
1063   case Op_URShiftVB:
1064   case Op_URShiftVS:
1065   case Op_URShiftVI:
1066   case Op_URShiftVL:
1067   case Op_AndV:
1068   case Op_OrV:
1069   case Op_XorV:
1070     return VM_Version::has_simd();
1071   case Op_LoadVector:
1072   case Op_StoreVector:
1073   case Op_AddVF:
1074   case Op_SubVF:
1075   case Op_MulVF:
1076 #ifdef AARCH64
1077     return VM_Version::has_simd();
1078 #else
1079     return VM_Version::has_vfp() || VM_Version::has_simd();
1080 #endif
1081   case Op_AddVD:
1082   case Op_SubVD:
1083   case Op_MulVD:
1084   case Op_DivVF:
1085   case Op_DivVD:
1086 #ifdef AARCH64
1087     return VM_Version::has_simd();
1088 #else
1089     return VM_Version::has_vfp();
1090 #endif
1091   }
1092 
1093   return true;  // Per default match rules are supported.
1094 }
1095 
1096 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
1097 
1098   // TODO
1099   // identify extra cases that we might want to provide match rules for
1100   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
1101   bool ret_value = match_rule_supported(opcode);
1102   // Add rules here.
1103 
1104   return ret_value;  // Per default match rules are supported.
1105 }
1106 
1107 const bool Matcher::has_predicated_vectors(void) {
1108   return false;
1109 }
1110 
1111 const int Matcher::float_pressure(int default_pressure_threshold) {
1112   return default_pressure_threshold;
1113 }
1114 
1115 int Matcher::regnum_to_fpu_offset(int regnum) {
1116   return regnum - 32; // The FP registers are in the second chunk
1117 }
1118 
1119 // Vector width in bytes
1120 const int Matcher::vector_width_in_bytes(BasicType bt) {
1121   return MaxVectorSize;
1122 }
1123 
1124 // Vector ideal reg corresponding to specified size in bytes
1125 const uint Matcher::vector_ideal_reg(int size) {
1126   assert(MaxVectorSize >= size, "");
1127   switch(size) {
1128     case  8: return Op_VecD;
1129     case 16: return Op_VecX;
1130   }
1131   ShouldNotReachHere();
1132   return 0;
1133 }
1134 
1135 const uint Matcher::vector_shift_count_ideal_reg(int size) {
1136   return vector_ideal_reg(size);
1137 }
1138 
1139 // Limits on vector size (number of elements) loaded into vector.
1140 const int Matcher::max_vector_size(const BasicType bt) {
1141   assert(is_java_primitive(bt), "only primitive type vectors");
1142   return vector_width_in_bytes(bt)/type2aelembytes(bt);
1143 }
1144 
1145 const int Matcher::min_vector_size(const BasicType bt) {
1146   assert(is_java_primitive(bt), "only primitive type vectors");
1147   return 8/type2aelembytes(bt);
1148 }
1149 
1150 // ARM doesn't support misaligned vectors store/load.
1151 const bool Matcher::misaligned_vectors_ok() {
1152   return false;
1153 }
1154 
1155 // ARM doesn't support AES intrinsics
1156 const bool Matcher::pass_original_key_for_aes() {
1157   return false;
1158 }
1159 
1160 const bool Matcher::convL2FSupported(void) {
1161 #ifdef AARCH64
1162   return true;
1163 #else
1164   return false;
1165 #endif
1166 }
1167 
1168 // Is this branch offset short enough that a short branch can be used?
1169 //
1170 // NOTE: If the platform does not provide any short branch variants, then
1171 //       this method should return false for offset 0.
1172 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
1173   // The passed offset is relative to address of the branch.
1174   // On ARM a branch displacement is calculated relative to address
1175   // of the branch + 8.
1176   //
1177   // offset -= 8;
1178   // return (Assembler::is_simm24(offset));
1179   return false;
1180 }
1181 
1182 const bool Matcher::isSimpleConstant64(jlong value) {
1183   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
1184 #ifdef AARCH64
1185   return (value == 0);
1186 #else
1187   return false;
1188 #endif
1189 }
1190 
1191 // No scaling for the parameter the ClearArray node.
1192 const bool Matcher::init_array_count_is_in_bytes = true;
1193 
1194 #ifdef AARCH64
1195 const int Matcher::long_cmove_cost() { return 1; }
1196 #else
1197 // Needs 2 CMOV's for longs.
1198 const int Matcher::long_cmove_cost() { return 2; }
1199 #endif
1200 
1201 #ifdef AARCH64
1202 const int Matcher::float_cmove_cost() { return 1; }
1203 #else
1204 // CMOVF/CMOVD are expensive on ARM.
1205 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
1206 #endif
1207 
1208 // Does the CPU require late expand (see block.cpp for description of late expand)?
1209 const bool Matcher::require_postalloc_expand = false;
1210 
1211 // Do we need to mask the count passed to shift instructions or does
1212 // the cpu only look at the lower 5/6 bits anyway?
1213 // FIXME: does this handle vector shifts as well?
1214 #ifdef AARCH64
1215 const bool Matcher::need_masked_shift_count = false;
1216 #else
1217 const bool Matcher::need_masked_shift_count = true;
1218 #endif
1219 
1220 const bool Matcher::convi2l_type_required = true;
1221 
1222 // Should the Matcher clone shifts on addressing modes, expecting them
1223 // to be subsumed into complex addressing expressions or compute them
1224 // into registers?
1225 bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
1226   return clone_base_plus_offset_address(m, mstack, address_visited);
1227 }
1228 
1229 void Compile::reshape_address(AddPNode* addp) {
1230 }
1231 
1232 bool Matcher::narrow_oop_use_complex_address() {
1233   NOT_LP64(ShouldNotCallThis());
1234   assert(UseCompressedOops, "only for compressed oops code");
1235   return false;
1236 }
1237 
1238 bool Matcher::narrow_klass_use_complex_address() {
1239   NOT_LP64(ShouldNotCallThis());
1240   assert(UseCompressedClassPointers, "only for compressed klass code");
1241   return false;
1242 }
1243 
1244 bool Matcher::const_oop_prefer_decode() {
1245   NOT_LP64(ShouldNotCallThis());
1246   return true;
1247 }
1248 
1249 bool Matcher::const_klass_prefer_decode() {
1250   NOT_LP64(ShouldNotCallThis());
1251   return true;
1252 }
1253 
1254 // Is it better to copy float constants, or load them directly from memory?
1255 // Intel can load a float constant from a direct address, requiring no
1256 // extra registers.  Most RISCs will have to materialize an address into a
1257 // register first, so they would do better to copy the constant from stack.
1258 const bool Matcher::rematerialize_float_constants = false;
1259 
1260 // If CPU can load and store mis-aligned doubles directly then no fixup is
1261 // needed.  Else we split the double into 2 integer pieces and move it
1262 // piece-by-piece.  Only happens when passing doubles into C code as the
1263 // Java calling convention forces doubles to be aligned.
1264 #ifdef AARCH64
1265 // On stack replacement support:
1266 // We don't need Load[DL]_unaligned support, because interpreter stack
1267 // has correct alignment
1268 const bool Matcher::misaligned_doubles_ok = true;
1269 #else
1270 const bool Matcher::misaligned_doubles_ok = false;
1271 #endif
1272 
1273 // No-op on ARM.
1274 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
1275 }
1276 
1277 // Advertise here if the CPU requires explicit rounding operations
1278 // to implement the UseStrictFP mode.
1279 const bool Matcher::strict_fp_requires_explicit_rounding = false;
1280 
1281 // Are floats converted to double when stored to stack during deoptimization?
1282 // ARM does not handle callee-save floats.
1283 bool Matcher::float_in_double() {
1284   return false;
1285 }
1286 
1287 // Do ints take an entire long register or just half?
1288 // Note that we if-def off of _LP64.
1289 // The relevant question is how the int is callee-saved.  In _LP64
1290 // the whole long is written but de-opt'ing will have to extract
1291 // the relevant 32 bits, in not-_LP64 only the low 32 bits is written.
1292 #ifdef _LP64
1293 const bool Matcher::int_in_long = true;
1294 #else
1295 const bool Matcher::int_in_long = false;
1296 #endif
1297 
1298 // Return whether or not this register is ever used as an argument.  This
1299 // function is used on startup to build the trampoline stubs in generateOptoStub.
1300 // Registers not mentioned will be killed by the VM call in the trampoline, and
1301 // arguments in those registers not be available to the callee.
1302 bool Matcher::can_be_java_arg( int reg ) {
1303 #ifdef AARCH64
1304   if (reg >= R_R0_num && reg < R_R8_num) return true;
1305   if (reg >= R_V0_num && reg <= R_V7b_num && ((reg & 3) < 2)) return true;
1306 #else
1307   if (reg == R_R0_num ||
1308       reg == R_R1_num ||
1309       reg == R_R2_num ||
1310       reg == R_R3_num) return true;
1311 
1312   if (reg >= R_S0_num &&
1313       reg <= R_S13_num) return true;
1314 #endif
1315   return false;
1316 }
1317 
1318 bool Matcher::is_spillable_arg( int reg ) {
1319   return can_be_java_arg(reg);
1320 }
1321 
1322 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
1323   return false;
1324 }
1325 
1326 // Register for DIVI projection of divmodI
1327 RegMask Matcher::divI_proj_mask() {
1328   ShouldNotReachHere();
1329   return RegMask();
1330 }
1331 
1332 // Register for MODI projection of divmodI
1333 RegMask Matcher::modI_proj_mask() {
1334   ShouldNotReachHere();
1335   return RegMask();
1336 }
1337 
1338 // Register for DIVL projection of divmodL
1339 RegMask Matcher::divL_proj_mask() {
1340   ShouldNotReachHere();
1341   return RegMask();
1342 }
1343 
1344 // Register for MODL projection of divmodL
1345 RegMask Matcher::modL_proj_mask() {
1346   ShouldNotReachHere();
1347   return RegMask();
1348 }
1349 
1350 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
1351   return FP_REGP_mask();
1352 }
1353 
1354 bool maybe_far_call(const CallNode *n) {
1355   return !MacroAssembler::_reachable_from_cache(n->as_Call()->entry_point());
1356 }
1357 
1358 bool maybe_far_call(const MachCallNode *n) {
1359   return !MacroAssembler::_reachable_from_cache(n->as_MachCall()->entry_point());
1360 }
1361 
1362 %}
1363 
1364 //----------ENCODING BLOCK-----------------------------------------------------
1365 // This block specifies the encoding classes used by the compiler to output
1366 // byte streams.  Encoding classes are parameterized macros used by
1367 // Machine Instruction Nodes in order to generate the bit encoding of the
1368 // instruction.  Operands specify their base encoding interface with the
1369 // interface keyword.  There are currently supported four interfaces,
1370 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER.  REG_INTER causes an
1371 // operand to generate a function which returns its register number when
1372 // queried.   CONST_INTER causes an operand to generate a function which
1373 // returns the value of the constant when queried.  MEMORY_INTER causes an
1374 // operand to generate four functions which return the Base Register, the
1375 // Index Register, the Scale Value, and the Offset Value of the operand when
1376 // queried.  COND_INTER causes an operand to generate six functions which
1377 // return the encoding code (ie - encoding bits for the instruction)
1378 // associated with each basic boolean condition for a conditional instruction.
1379 //
1380 // Instructions specify two basic values for encoding.  Again, a function
1381 // is available to check if the constant displacement is an oop. They use the
1382 // ins_encode keyword to specify their encoding classes (which must be
1383 // a sequence of enc_class names, and their parameters, specified in
1384 // the encoding block), and they use the
1385 // opcode keyword to specify, in order, their primary, secondary, and
1386 // tertiary opcode.  Only the opcode sections which a particular instruction
1387 // needs for encoding need to be specified.
1388 encode %{
1389   enc_class call_epilog %{
1390     // nothing
1391   %}
1392 
1393   enc_class Java_To_Runtime (method meth) %{
1394     // CALL directly to the runtime
1395     emit_call_reloc(cbuf, as_MachCall(), $meth, runtime_call_Relocation::spec());
1396   %}
1397 
1398   enc_class Java_Static_Call (method meth) %{
1399     // CALL to fixup routine.  Fixup routine uses ScopeDesc info to determine
1400     // who we intended to call.
1401 
1402     if ( !_method) {
1403       emit_call_reloc(cbuf, as_MachCall(), $meth, runtime_call_Relocation::spec());
1404     } else {
1405       int method_index = resolved_method_index(cbuf);
1406       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
1407                                                   : static_call_Relocation::spec(method_index);
1408       emit_call_reloc(cbuf, as_MachCall(), $meth, rspec);
1409 
1410       // Emit stubs for static call.
1411       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
1412       if (stub == NULL) {
1413         ciEnv::current()->record_failure("CodeCache is full");
1414         return;
1415       }
1416     }
1417   %}
1418 
1419   enc_class save_last_PC %{
1420     // preserve mark
1421     address mark = cbuf.insts()->mark();
1422     debug_only(int off0 = cbuf.insts_size());
1423     MacroAssembler _masm(&cbuf);
1424     int ret_addr_offset = as_MachCall()->ret_addr_offset();
1425     __ adr(LR, mark + ret_addr_offset);
1426     __ str(LR, Address(Rthread, JavaThread::last_Java_pc_offset()));
1427     debug_only(int off1 = cbuf.insts_size());
1428     assert(off1 - off0 == 2 * Assembler::InstructionSize, "correct size prediction");
1429     // restore mark
1430     cbuf.insts()->set_mark(mark);
1431   %}
1432 
1433   enc_class preserve_SP %{
1434     // preserve mark
1435     address mark = cbuf.insts()->mark();
1436     debug_only(int off0 = cbuf.insts_size());
1437     MacroAssembler _masm(&cbuf);
1438     // FP is preserved across all calls, even compiled calls.
1439     // Use it to preserve SP in places where the callee might change the SP.
1440     __ mov(Rmh_SP_save, SP);
1441     debug_only(int off1 = cbuf.insts_size());
1442     assert(off1 - off0 == 4, "correct size prediction");
1443     // restore mark
1444     cbuf.insts()->set_mark(mark);
1445   %}
1446 
1447   enc_class restore_SP %{
1448     MacroAssembler _masm(&cbuf);
1449     __ mov(SP, Rmh_SP_save);
1450   %}
1451 
1452   enc_class Java_Dynamic_Call (method meth) %{
1453     MacroAssembler _masm(&cbuf);
1454     Register R8_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode());
1455     assert(R8_ic_reg == Ricklass, "should be");
1456     __ set_inst_mark();
1457 #ifdef AARCH64
1458 // TODO: see C1 LIR_Assembler::ic_call()
1459     InlinedAddress oop_literal((address)Universe::non_oop_word());
1460     int offset = __ offset();
1461     int fixed_size = mov_oop_size * 4;
1462     if (VM_Version::prefer_moves_over_load_literal()) {
1463       uintptr_t val = (uintptr_t)Universe::non_oop_word();
1464       __ movz(R8_ic_reg, (val >>  0) & 0xffff,  0);
1465       __ movk(R8_ic_reg, (val >> 16) & 0xffff, 16);
1466       __ movk(R8_ic_reg, (val >> 32) & 0xffff, 32);
1467       __ movk(R8_ic_reg, (val >> 48) & 0xffff, 48);
1468     } else {
1469       __ ldr_literal(R8_ic_reg, oop_literal);
1470     }
1471     assert(__ offset() - offset == fixed_size, "bad mov_oop size");
1472 #else
1473     __ movw(R8_ic_reg, ((unsigned int)Universe::non_oop_word()) & 0xffff);
1474     __ movt(R8_ic_reg, ((unsigned int)Universe::non_oop_word()) >> 16);
1475 #endif
1476     address  virtual_call_oop_addr = __ inst_mark();
1477     // CALL to fixup routine.  Fixup routine uses ScopeDesc info to determine
1478     // who we intended to call.
1479     int method_index = resolved_method_index(cbuf);
1480     __ relocate(virtual_call_Relocation::spec(virtual_call_oop_addr, method_index));
1481     emit_call_reloc(cbuf, as_MachCall(), $meth, RelocationHolder::none);
1482 #ifdef AARCH64
1483     if (!VM_Version::prefer_moves_over_load_literal()) {
1484       Label skip_literal;
1485       __ b(skip_literal);
1486       int off2 = __ offset();
1487       __ bind_literal(oop_literal);
1488       if (__ offset() - off2 == wordSize) {
1489         // no padding, so insert nop for worst-case sizing
1490         __ nop();
1491       }
1492       __ bind(skip_literal);
1493     }
1494 #endif
1495   %}
1496 
1497   enc_class LdReplImmI(immI src, regD dst, iRegI tmp, int cnt, int wth) %{
1498     // FIXME: load from constant table?
1499     // Load a constant replicated "count" times with width "width"
1500     int count = $cnt$$constant;
1501     int width = $wth$$constant;
1502     assert(count*width == 4, "sanity");
1503     int val = $src$$constant;
1504     if (width < 4) {
1505       int bit_width = width * 8;
1506       val &= (((int)1) << bit_width) - 1; // mask off sign bits
1507       for (int i = 0; i < count - 1; i++) {
1508         val |= (val << bit_width);
1509       }
1510     }
1511     MacroAssembler _masm(&cbuf);
1512 
1513     if (val == -1) {
1514       __ mvn($tmp$$Register, 0);
1515     } else if (val == 0) {
1516       __ mov($tmp$$Register, 0);
1517     } else {
1518       __ movw($tmp$$Register, val & 0xffff);
1519       __ movt($tmp$$Register, (unsigned int)val >> 16);
1520     }
1521     __ fmdrr($dst$$FloatRegister, $tmp$$Register, $tmp$$Register);
1522   %}
1523 
1524   enc_class LdReplImmF(immF src, regD dst, iRegI tmp) %{
1525     // Replicate float con 2 times and pack into vector (8 bytes) in regD.
1526     float fval = $src$$constant;
1527     int val = *((int*)&fval);
1528     MacroAssembler _masm(&cbuf);
1529 
1530     if (val == -1) {
1531       __ mvn($tmp$$Register, 0);
1532     } else if (val == 0) {
1533       __ mov($tmp$$Register, 0);
1534     } else {
1535       __ movw($tmp$$Register, val & 0xffff);
1536       __ movt($tmp$$Register, (unsigned int)val >> 16);
1537     }
1538     __ fmdrr($dst$$FloatRegister, $tmp$$Register, $tmp$$Register);
1539   %}
1540 
1541   enc_class enc_String_Compare(R0RegP str1, R1RegP str2, R2RegI cnt1, R3RegI cnt2, iRegI result, iRegI tmp1, iRegI tmp2) %{
1542     Label Ldone, Lloop;
1543     MacroAssembler _masm(&cbuf);
1544 
1545     Register   str1_reg = $str1$$Register;
1546     Register   str2_reg = $str2$$Register;
1547     Register   cnt1_reg = $cnt1$$Register; // int
1548     Register   cnt2_reg = $cnt2$$Register; // int
1549     Register   tmp1_reg = $tmp1$$Register;
1550     Register   tmp2_reg = $tmp2$$Register;
1551     Register result_reg = $result$$Register;
1552 
1553     assert_different_registers(str1_reg, str2_reg, cnt1_reg, cnt2_reg, tmp1_reg, tmp2_reg);
1554 
1555     // Compute the minimum of the string lengths(str1_reg) and the
1556     // difference of the string lengths (stack)
1557 
1558     // See if the lengths are different, and calculate min in str1_reg.
1559     // Stash diff in tmp2 in case we need it for a tie-breaker.
1560     __ subs_32(tmp2_reg, cnt1_reg, cnt2_reg);
1561 #ifdef AARCH64
1562     Label Lskip;
1563     __ _lsl_w(cnt1_reg, cnt1_reg, exact_log2(sizeof(jchar))); // scale the limit
1564     __ b(Lskip, mi);
1565     __ _lsl_w(cnt1_reg, cnt2_reg, exact_log2(sizeof(jchar))); // scale the limit
1566     __ bind(Lskip);
1567 #else
1568     __ mov(cnt1_reg, AsmOperand(cnt1_reg, lsl, exact_log2(sizeof(jchar)))); // scale the limit
1569     __ mov(cnt1_reg, AsmOperand(cnt2_reg, lsl, exact_log2(sizeof(jchar))), pl); // scale the limit
1570 #endif
1571 
1572     // reallocate cnt1_reg, cnt2_reg, result_reg
1573     // Note:  limit_reg holds the string length pre-scaled by 2
1574     Register limit_reg = cnt1_reg;
1575     Register  chr2_reg = cnt2_reg;
1576     Register  chr1_reg = tmp1_reg;
1577     // str{12} are the base pointers
1578 
1579     // Is the minimum length zero?
1580     __ cmp_32(limit_reg, 0);
1581     if (result_reg != tmp2_reg) {
1582       __ mov(result_reg, tmp2_reg, eq);
1583     }
1584     __ b(Ldone, eq);
1585 
1586     // Load first characters
1587     __ ldrh(chr1_reg, Address(str1_reg, 0));
1588     __ ldrh(chr2_reg, Address(str2_reg, 0));
1589 
1590     // Compare first characters
1591     __ subs(chr1_reg, chr1_reg, chr2_reg);
1592     if (result_reg != chr1_reg) {
1593       __ mov(result_reg, chr1_reg, ne);
1594     }
1595     __ b(Ldone, ne);
1596 
1597     {
1598       // Check after comparing first character to see if strings are equivalent
1599       // Check if the strings start at same location
1600       __ cmp(str1_reg, str2_reg);
1601       // Check if the length difference is zero
1602       __ cond_cmp(tmp2_reg, 0, eq);
1603       __ mov(result_reg, 0, eq); // result is zero
1604       __ b(Ldone, eq);
1605       // Strings might not be equal
1606     }
1607 
1608     __ subs(chr1_reg, limit_reg, 1 * sizeof(jchar));
1609     if (result_reg != tmp2_reg) {
1610       __ mov(result_reg, tmp2_reg, eq);
1611     }
1612     __ b(Ldone, eq);
1613 
1614     // Shift str1_reg and str2_reg to the end of the arrays, negate limit
1615     __ add(str1_reg, str1_reg, limit_reg);
1616     __ add(str2_reg, str2_reg, limit_reg);
1617     __ neg(limit_reg, chr1_reg);  // limit = -(limit-2)
1618 
1619     // Compare the rest of the characters
1620     __ bind(Lloop);
1621     __ ldrh(chr1_reg, Address(str1_reg, limit_reg));
1622     __ ldrh(chr2_reg, Address(str2_reg, limit_reg));
1623     __ subs(chr1_reg, chr1_reg, chr2_reg);
1624     if (result_reg != chr1_reg) {
1625       __ mov(result_reg, chr1_reg, ne);
1626     }
1627     __ b(Ldone, ne);
1628 
1629     __ adds(limit_reg, limit_reg, sizeof(jchar));
1630     __ b(Lloop, ne);
1631 
1632     // If strings are equal up to min length, return the length difference.
1633     if (result_reg != tmp2_reg) {
1634       __ mov(result_reg, tmp2_reg);
1635     }
1636 
1637     // Otherwise, return the difference between the first mismatched chars.
1638     __ bind(Ldone);
1639   %}
1640 
1641   enc_class enc_String_Equals(R0RegP str1, R1RegP str2, R2RegI cnt, iRegI result, iRegI tmp1, iRegI tmp2) %{
1642     Label Lchar, Lchar_loop, Ldone, Lequal;
1643     MacroAssembler _masm(&cbuf);
1644 
1645     Register   str1_reg = $str1$$Register;
1646     Register   str2_reg = $str2$$Register;
1647     Register    cnt_reg = $cnt$$Register; // int
1648     Register   tmp1_reg = $tmp1$$Register;
1649     Register   tmp2_reg = $tmp2$$Register;
1650     Register result_reg = $result$$Register;
1651 
1652     assert_different_registers(str1_reg, str2_reg, cnt_reg, tmp1_reg, tmp2_reg, result_reg);
1653 
1654     __ cmp(str1_reg, str2_reg); //same char[] ?
1655     __ b(Lequal, eq);
1656 
1657     __ cbz_32(cnt_reg, Lequal); // count == 0
1658 
1659     //rename registers
1660     Register limit_reg = cnt_reg;
1661     Register  chr1_reg = tmp1_reg;
1662     Register  chr2_reg = tmp2_reg;
1663 
1664     __ logical_shift_left(limit_reg, limit_reg, exact_log2(sizeof(jchar)));
1665 
1666     //check for alignment and position the pointers to the ends
1667     __ orr(chr1_reg, str1_reg, str2_reg);
1668     __ tst(chr1_reg, 0x3);
1669 
1670     // notZero means at least one not 4-byte aligned.
1671     // We could optimize the case when both arrays are not aligned
1672     // but it is not frequent case and it requires additional checks.
1673     __ b(Lchar, ne);
1674 
1675     // Compare char[] arrays aligned to 4 bytes.
1676     __ char_arrays_equals(str1_reg, str2_reg, limit_reg, result_reg,
1677                           chr1_reg, chr2_reg, Ldone);
1678 
1679     __ b(Lequal); // equal
1680 
1681     // char by char compare
1682     __ bind(Lchar);
1683     __ mov(result_reg, 0);
1684     __ add(str1_reg, limit_reg, str1_reg);
1685     __ add(str2_reg, limit_reg, str2_reg);
1686     __ neg(limit_reg, limit_reg); //negate count
1687 
1688     // Lchar_loop
1689     __ bind(Lchar_loop);
1690     __ ldrh(chr1_reg, Address(str1_reg, limit_reg));
1691     __ ldrh(chr2_reg, Address(str2_reg, limit_reg));
1692     __ cmp(chr1_reg, chr2_reg);
1693     __ b(Ldone, ne);
1694     __ adds(limit_reg, limit_reg, sizeof(jchar));
1695     __ b(Lchar_loop, ne);
1696 
1697     __ bind(Lequal);
1698     __ mov(result_reg, 1);  //equal
1699 
1700     __ bind(Ldone);
1701   %}
1702 
1703   enc_class enc_Array_Equals(R0RegP ary1, R1RegP ary2, iRegI tmp1, iRegI tmp2, iRegI tmp3, iRegI result) %{
1704     Label Ldone, Lloop, Lequal;
1705     MacroAssembler _masm(&cbuf);
1706 
1707     Register   ary1_reg = $ary1$$Register;
1708     Register   ary2_reg = $ary2$$Register;
1709     Register   tmp1_reg = $tmp1$$Register;
1710     Register   tmp2_reg = $tmp2$$Register;
1711     Register   tmp3_reg = $tmp3$$Register;
1712     Register result_reg = $result$$Register;
1713 
1714     assert_different_registers(ary1_reg, ary2_reg, tmp1_reg, tmp2_reg, tmp3_reg, result_reg);
1715 
1716     int length_offset  = arrayOopDesc::length_offset_in_bytes();
1717     int base_offset    = arrayOopDesc::base_offset_in_bytes(T_CHAR);
1718 
1719     // return true if the same array
1720 #ifdef AARCH64
1721     __ cmp(ary1_reg, ary2_reg);
1722     __ b(Lequal, eq);
1723 
1724     __ mov(result_reg, 0);
1725 
1726     __ cbz(ary1_reg, Ldone); // not equal
1727 
1728     __ cbz(ary2_reg, Ldone); // not equal
1729 #else
1730     __ teq(ary1_reg, ary2_reg);
1731     __ mov(result_reg, 1, eq);
1732     __ b(Ldone, eq); // equal
1733 
1734     __ tst(ary1_reg, ary1_reg);
1735     __ mov(result_reg, 0, eq);
1736     __ b(Ldone, eq);    // not equal
1737 
1738     __ tst(ary2_reg, ary2_reg);
1739     __ mov(result_reg, 0, eq);
1740     __ b(Ldone, eq);    // not equal
1741 #endif
1742 
1743     //load the lengths of arrays
1744     __ ldr_s32(tmp1_reg, Address(ary1_reg, length_offset)); // int
1745     __ ldr_s32(tmp2_reg, Address(ary2_reg, length_offset)); // int
1746 
1747     // return false if the two arrays are not equal length
1748 #ifdef AARCH64
1749     __ cmp_w(tmp1_reg, tmp2_reg);
1750     __ b(Ldone, ne);    // not equal
1751 
1752     __ cbz_w(tmp1_reg, Lequal); // zero-length arrays are equal
1753 #else
1754     __ teq_32(tmp1_reg, tmp2_reg);
1755     __ mov(result_reg, 0, ne);
1756     __ b(Ldone, ne);    // not equal
1757 
1758     __ tst(tmp1_reg, tmp1_reg);
1759     __ mov(result_reg, 1, eq);
1760     __ b(Ldone, eq);    // zero-length arrays are equal
1761 #endif
1762 
1763     // load array addresses
1764     __ add(ary1_reg, ary1_reg, base_offset);
1765     __ add(ary2_reg, ary2_reg, base_offset);
1766 
1767     // renaming registers
1768     Register chr1_reg  =  tmp3_reg;   // for characters in ary1
1769     Register chr2_reg  =  tmp2_reg;   // for characters in ary2
1770     Register limit_reg =  tmp1_reg;   // length
1771 
1772     // set byte count
1773     __ logical_shift_left_32(limit_reg, limit_reg, exact_log2(sizeof(jchar)));
1774 
1775     // Compare char[] arrays aligned to 4 bytes.
1776     __ char_arrays_equals(ary1_reg, ary2_reg, limit_reg, result_reg,
1777                           chr1_reg, chr2_reg, Ldone);
1778     __ bind(Lequal);
1779     __ mov(result_reg, 1);  //equal
1780 
1781     __ bind(Ldone);
1782     %}
1783 %}
1784 
1785 //----------FRAME--------------------------------------------------------------
1786 // Definition of frame structure and management information.
1787 //
1788 //  S T A C K   L A Y O U T    Allocators stack-slot number
1789 //                             |   (to get allocators register number
1790 //  G  Owned by    |        |  v    add VMRegImpl::stack0)
1791 //  r   CALLER     |        |
1792 //  o     |        +--------+      pad to even-align allocators stack-slot
1793 //  w     V        |  pad0  |        numbers; owned by CALLER
1794 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
1795 //  h     ^        |   in   |  5
1796 //        |        |  args  |  4   Holes in incoming args owned by SELF
1797 //  |     |        |        |  3
1798 //  |     |        +--------+
1799 //  V     |        | old out|      Empty on Intel, window on Sparc
1800 //        |    old |preserve|      Must be even aligned.
1801 //        |     SP-+--------+----> Matcher::_old_SP, 8 (or 16 in LP64)-byte aligned
1802 //        |        |   in   |  3   area for Intel ret address
1803 //     Owned by    |preserve|      Empty on Sparc.
1804 //       SELF      +--------+
1805 //        |        |  pad2  |  2   pad to align old SP
1806 //        |        +--------+  1
1807 //        |        | locks  |  0
1808 //        |        +--------+----> VMRegImpl::stack0, 8 (or 16 in LP64)-byte aligned
1809 //        |        |  pad1  | 11   pad to align new SP
1810 //        |        +--------+
1811 //        |        |        | 10
1812 //        |        | spills |  9   spills
1813 //        V        |        |  8   (pad0 slot for callee)
1814 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
1815 //        ^        |  out   |  7
1816 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
1817 //     Owned by    +--------+
1818 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
1819 //        |    new |preserve|      Must be even-aligned.
1820 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
1821 //        |        |        |
1822 //
1823 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
1824 //         known from SELF's arguments and the Java calling convention.
1825 //         Region 6-7 is determined per call site.
1826 // Note 2: If the calling convention leaves holes in the incoming argument
1827 //         area, those holes are owned by SELF.  Holes in the outgoing area
1828 //         are owned by the CALLEE.  Holes should not be nessecary in the
1829 //         incoming area, as the Java calling convention is completely under
1830 //         the control of the AD file.  Doubles can be sorted and packed to
1831 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
1832 //         varargs C calling conventions.
1833 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
1834 //         even aligned with pad0 as needed.
1835 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
1836 //         region 6-11 is even aligned; it may be padded out more so that
1837 //         the region from SP to FP meets the minimum stack alignment.
1838 
1839 frame %{
1840   // What direction does stack grow in (assumed to be same for native & Java)
1841   stack_direction(TOWARDS_LOW);
1842 
1843   // These two registers define part of the calling convention
1844   // between compiled code and the interpreter.
1845   inline_cache_reg(R_Ricklass);          // Inline Cache Register or Method* for I2C
1846   interpreter_method_oop_reg(R_Rmethod); // Method Oop Register when calling interpreter
1847 
1848   // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
1849   cisc_spilling_operand_name(indOffset);
1850 
1851   // Number of stack slots consumed by a Monitor enter
1852   sync_stack_slots(1 * VMRegImpl::slots_per_word);
1853 
1854   // Compiled code's Frame Pointer
1855 #ifdef AARCH64
1856   frame_pointer(R_SP);
1857 #else
1858   frame_pointer(R_R13);
1859 #endif
1860 
1861   // Stack alignment requirement
1862   stack_alignment(StackAlignmentInBytes);
1863   //  LP64: Alignment size in bytes (128-bit -> 16 bytes)
1864   // !LP64: Alignment size in bytes (64-bit  ->  8 bytes)
1865 
1866   // Number of stack slots between incoming argument block and the start of
1867   // a new frame.  The PROLOG must add this many slots to the stack.  The
1868   // EPILOG must remove this many slots.
1869   // FP + LR
1870   in_preserve_stack_slots(2 * VMRegImpl::slots_per_word);
1871 
1872   // Number of outgoing stack slots killed above the out_preserve_stack_slots
1873   // for calls to C.  Supports the var-args backing area for register parms.
1874   // ADLC doesn't support parsing expressions, so I folded the math by hand.
1875   varargs_C_out_slots_killed( 0);
1876 
1877   // The after-PROLOG location of the return address.  Location of
1878   // return address specifies a type (REG or STACK) and a number
1879   // representing the register number (i.e. - use a register name) or
1880   // stack slot.
1881   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
1882   // Otherwise, it is above the locks and verification slot and alignment word
1883   return_addr(STACK - 1*VMRegImpl::slots_per_word +
1884               align_up((Compile::current()->in_preserve_stack_slots() +
1885                         Compile::current()->fixed_slots()),
1886                        stack_alignment_in_slots()));
1887 
1888   // Body of function which returns an OptoRegs array locating
1889   // arguments either in registers or in stack slots for calling
1890   // java
1891   calling_convention %{
1892     (void) SharedRuntime::java_calling_convention(sig_bt, regs, length, is_outgoing);
1893 
1894   %}
1895 
1896   // Body of function which returns an OptoRegs array locating
1897   // arguments either in registers or in stack slots for callin
1898   // C.
1899   c_calling_convention %{
1900     // This is obviously always outgoing
1901     (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
1902   %}
1903 
1904   // Location of compiled Java return values.  Same as C
1905   return_value %{
1906     return c2::return_value(ideal_reg);
1907   %}
1908 
1909 %}
1910 
1911 //----------ATTRIBUTES---------------------------------------------------------
1912 //----------Instruction Attributes---------------------------------------------
1913 ins_attrib ins_cost(DEFAULT_COST); // Required cost attribute
1914 ins_attrib ins_size(32);           // Required size attribute (in bits)
1915 ins_attrib ins_short_branch(0);    // Required flag: is this instruction a
1916                                    // non-matching short branch variant of some
1917                                                             // long branch?
1918 
1919 //----------OPERANDS-----------------------------------------------------------
1920 // Operand definitions must precede instruction definitions for correct parsing
1921 // in the ADLC because operands constitute user defined types which are used in
1922 // instruction definitions.
1923 
1924 //----------Simple Operands----------------------------------------------------
1925 // Immediate Operands
1926 // Integer Immediate: 32-bit
1927 operand immI() %{
1928   match(ConI);
1929 
1930   op_cost(0);
1931   // formats are generated automatically for constants and base registers
1932   format %{ %}
1933   interface(CONST_INTER);
1934 %}
1935 
1936 // Integer Immediate: 8-bit unsigned - for VMOV
1937 operand immU8() %{
1938   predicate(0 <= n->get_int() && (n->get_int() <= 255));
1939   match(ConI);
1940   op_cost(0);
1941 
1942   format %{ %}
1943   interface(CONST_INTER);
1944 %}
1945 
1946 // Integer Immediate: 16-bit
1947 operand immI16() %{
1948   predicate((n->get_int() >> 16) == 0 && VM_Version::supports_movw());
1949   match(ConI);
1950   op_cost(0);
1951 
1952   format %{ %}
1953   interface(CONST_INTER);
1954 %}
1955 
1956 #ifndef AARCH64
1957 // Integer Immediate: offset for half and double word loads and stores
1958 operand immIHD() %{
1959   predicate(is_memoryHD(n->get_int()));
1960   match(ConI);
1961   op_cost(0);
1962   format %{ %}
1963   interface(CONST_INTER);
1964 %}
1965 
1966 // Integer Immediate: offset for fp loads and stores
1967 operand immIFP() %{
1968   predicate(is_memoryfp(n->get_int()) && ((n->get_int() & 3) == 0));
1969   match(ConI);
1970   op_cost(0);
1971 
1972   format %{ %}
1973   interface(CONST_INTER);
1974 %}
1975 #endif
1976 
1977 // Valid scale values for addressing modes and shifts
1978 operand immU5() %{
1979   predicate(0 <= n->get_int() && (n->get_int() <= 31));
1980   match(ConI);
1981   op_cost(0);
1982 
1983   format %{ %}
1984   interface(CONST_INTER);
1985 %}
1986 
1987 // Integer Immediate: 6-bit
1988 operand immU6Big() %{
1989   predicate(n->get_int() >= 32 && n->get_int() <= 63);
1990   match(ConI);
1991   op_cost(0);
1992   format %{ %}
1993   interface(CONST_INTER);
1994 %}
1995 
1996 // Integer Immediate: 0-bit
1997 operand immI0() %{
1998   predicate(n->get_int() == 0);
1999   match(ConI);
2000   op_cost(0);
2001 
2002   format %{ %}
2003   interface(CONST_INTER);
2004 %}
2005 
2006 // Integer Immediate: the value 1
2007 operand immI_1() %{
2008   predicate(n->get_int() == 1);
2009   match(ConI);
2010   op_cost(0);
2011 
2012   format %{ %}
2013   interface(CONST_INTER);
2014 %}
2015 
2016 // Integer Immediate: the value 2
2017 operand immI_2() %{
2018   predicate(n->get_int() == 2);
2019   match(ConI);
2020   op_cost(0);
2021 
2022   format %{ %}
2023   interface(CONST_INTER);
2024 %}
2025 
2026 // Integer Immediate: the value 3
2027 operand immI_3() %{
2028   predicate(n->get_int() == 3);
2029   match(ConI);
2030   op_cost(0);
2031 
2032   format %{ %}
2033   interface(CONST_INTER);
2034 %}
2035 
2036 // Integer Immediate: the value 4
2037 operand immI_4() %{
2038   predicate(n->get_int() == 4);
2039   match(ConI);
2040   op_cost(0);
2041 
2042   format %{ %}
2043   interface(CONST_INTER);
2044 %}
2045 
2046 // Integer Immediate: the value 8
2047 operand immI_8() %{
2048   predicate(n->get_int() == 8);
2049   match(ConI);
2050   op_cost(0);
2051 
2052   format %{ %}
2053   interface(CONST_INTER);
2054 %}
2055 
2056 // Int Immediate non-negative
2057 operand immU31()
2058 %{
2059   predicate(n->get_int() >= 0);
2060   match(ConI);
2061 
2062   op_cost(0);
2063   format %{ %}
2064   interface(CONST_INTER);
2065 %}
2066 
2067 // Integer Immediate: the values 32-63
2068 operand immI_32_63() %{
2069   predicate(n->get_int() >= 32 && n->get_int() <= 63);
2070   match(ConI);
2071   op_cost(0);
2072 
2073   format %{ %}
2074   interface(CONST_INTER);
2075 %}
2076 
2077 // Immediates for special shifts (sign extend)
2078 
2079 // Integer Immediate: the value 16
2080 operand immI_16() %{
2081   predicate(n->get_int() == 16);
2082   match(ConI);
2083   op_cost(0);
2084 
2085   format %{ %}
2086   interface(CONST_INTER);
2087 %}
2088 
2089 // Integer Immediate: the value 24
2090 operand immI_24() %{
2091   predicate(n->get_int() == 24);
2092   match(ConI);
2093   op_cost(0);
2094 
2095   format %{ %}
2096   interface(CONST_INTER);
2097 %}
2098 
2099 // Integer Immediate: the value 255
2100 operand immI_255() %{
2101   predicate( n->get_int() == 255 );
2102   match(ConI);
2103   op_cost(0);
2104 
2105   format %{ %}
2106   interface(CONST_INTER);
2107 %}
2108 
2109 // Integer Immediate: the value 65535
2110 operand immI_65535() %{
2111   predicate(n->get_int() == 65535);
2112   match(ConI);
2113   op_cost(0);
2114 
2115   format %{ %}
2116   interface(CONST_INTER);
2117 %}
2118 
2119 // Integer Immediates for arithmetic instructions
2120 
2121 operand aimmI() %{
2122   predicate(is_aimm(n->get_int()));
2123   match(ConI);
2124   op_cost(0);
2125 
2126   format %{ %}
2127   interface(CONST_INTER);
2128 %}
2129 
2130 operand aimmIneg() %{
2131   predicate(is_aimm(-n->get_int()));
2132   match(ConI);
2133   op_cost(0);
2134 
2135   format %{ %}
2136   interface(CONST_INTER);
2137 %}
2138 
2139 operand aimmU31() %{
2140   predicate((0 <= n->get_int()) && is_aimm(n->get_int()));
2141   match(ConI);
2142   op_cost(0);
2143 
2144   format %{ %}
2145   interface(CONST_INTER);
2146 %}
2147 
2148 // Integer Immediates for logical instructions
2149 
2150 operand limmI() %{
2151   predicate(is_limmI(n->get_int()));
2152   match(ConI);
2153   op_cost(0);
2154 
2155   format %{ %}
2156   interface(CONST_INTER);
2157 %}
2158 
2159 operand limmIlow8() %{
2160   predicate(is_limmI_low(n->get_int(), 8));
2161   match(ConI);
2162   op_cost(0);
2163 
2164   format %{ %}
2165   interface(CONST_INTER);
2166 %}
2167 
2168 operand limmU31() %{
2169   predicate(0 <= n->get_int() && is_limmI(n->get_int()));
2170   match(ConI);
2171   op_cost(0);
2172 
2173   format %{ %}
2174   interface(CONST_INTER);
2175 %}
2176 
2177 operand limmIn() %{
2178   predicate(is_limmI(~n->get_int()));
2179   match(ConI);
2180   op_cost(0);
2181 
2182   format %{ %}
2183   interface(CONST_INTER);
2184 %}
2185 
2186 #ifdef AARCH64
2187 // Long Immediate: for logical instruction
2188 operand limmL() %{
2189   predicate(is_limmL(n->get_long()));
2190   match(ConL);
2191   op_cost(0);
2192 
2193   format %{ %}
2194   interface(CONST_INTER);
2195 %}
2196 
2197 operand limmLn() %{
2198   predicate(is_limmL(~n->get_long()));
2199   match(ConL);
2200   op_cost(0);
2201 
2202   format %{ %}
2203   interface(CONST_INTER);
2204 %}
2205 
2206 // Long Immediate: for arithmetic instruction
2207 operand aimmL() %{
2208   predicate(is_aimm(n->get_long()));
2209   match(ConL);
2210   op_cost(0);
2211 
2212   format %{ %}
2213   interface(CONST_INTER);
2214 %}
2215 
2216 operand aimmLneg() %{
2217   predicate(is_aimm(-n->get_long()));
2218   match(ConL);
2219   op_cost(0);
2220 
2221   format %{ %}
2222   interface(CONST_INTER);
2223 %}
2224 #endif // AARCH64
2225 
2226 // Long Immediate: the value FF
2227 operand immL_FF() %{
2228   predicate( n->get_long() == 0xFFL );
2229   match(ConL);
2230   op_cost(0);
2231 
2232   format %{ %}
2233   interface(CONST_INTER);
2234 %}
2235 
2236 // Long Immediate: the value FFFF
2237 operand immL_FFFF() %{
2238   predicate( n->get_long() == 0xFFFFL );
2239   match(ConL);
2240   op_cost(0);
2241 
2242   format %{ %}
2243   interface(CONST_INTER);
2244 %}
2245 
2246 // Pointer Immediate: 32 or 64-bit
2247 operand immP() %{
2248   match(ConP);
2249 
2250   op_cost(5);
2251   // formats are generated automatically for constants and base registers
2252   format %{ %}
2253   interface(CONST_INTER);
2254 %}
2255 
2256 operand immP0() %{
2257   predicate(n->get_ptr() == 0);
2258   match(ConP);
2259   op_cost(0);
2260 
2261   format %{ %}
2262   interface(CONST_INTER);
2263 %}
2264 
2265 operand immP_poll() %{
2266   predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
2267   match(ConP);
2268 
2269   // formats are generated automatically for constants and base registers
2270   format %{ %}
2271   interface(CONST_INTER);
2272 %}
2273 
2274 // Pointer Immediate
2275 operand immN()
2276 %{
2277   match(ConN);
2278 
2279   op_cost(10);
2280   format %{ %}
2281   interface(CONST_INTER);
2282 %}
2283 
2284 operand immNKlass()
2285 %{
2286   match(ConNKlass);
2287 
2288   op_cost(10);
2289   format %{ %}
2290   interface(CONST_INTER);
2291 %}
2292 
2293 // NULL Pointer Immediate
2294 operand immN0()
2295 %{
2296   predicate(n->get_narrowcon() == 0);
2297   match(ConN);
2298 
2299   op_cost(0);
2300   format %{ %}
2301   interface(CONST_INTER);
2302 %}
2303 
2304 operand immL() %{
2305   match(ConL);
2306   op_cost(40);
2307   // formats are generated automatically for constants and base registers
2308   format %{ %}
2309   interface(CONST_INTER);
2310 %}
2311 
2312 operand immL0() %{
2313   predicate(n->get_long() == 0L);
2314   match(ConL);
2315   op_cost(0);
2316   // formats are generated automatically for constants and base registers
2317   format %{ %}
2318   interface(CONST_INTER);
2319 %}
2320 
2321 // Long Immediate: 16-bit
2322 operand immL16() %{
2323   predicate(n->get_long() >= 0 && n->get_long() < (1<<16)  && VM_Version::supports_movw());
2324   match(ConL);
2325   op_cost(0);
2326 
2327   format %{ %}
2328   interface(CONST_INTER);
2329 %}
2330 
2331 // Long Immediate: low 32-bit mask
2332 operand immL_32bits() %{
2333   predicate(n->get_long() == 0xFFFFFFFFL);
2334   match(ConL);
2335   op_cost(0);
2336 
2337   format %{ %}
2338   interface(CONST_INTER);
2339 %}
2340 
2341 // Double Immediate
2342 operand immD() %{
2343   match(ConD);
2344 
2345   op_cost(40);
2346   format %{ %}
2347   interface(CONST_INTER);
2348 %}
2349 
2350 // Double Immediate: +0.0d.
2351 operand immD0() %{
2352   predicate(jlong_cast(n->getd()) == 0);
2353 
2354   match(ConD);
2355   op_cost(0);
2356   format %{ %}
2357   interface(CONST_INTER);
2358 %}
2359 
2360 operand imm8D() %{
2361   predicate(Assembler::double_num(n->getd()).can_be_imm8());
2362   match(ConD);
2363 
2364   op_cost(0);
2365   format %{ %}
2366   interface(CONST_INTER);
2367 %}
2368 
2369 // Float Immediate
2370 operand immF() %{
2371   match(ConF);
2372 
2373   op_cost(20);
2374   format %{ %}
2375   interface(CONST_INTER);
2376 %}
2377 
2378 // Float Immediate: +0.0f
2379 operand immF0() %{
2380   predicate(jint_cast(n->getf()) == 0);
2381   match(ConF);
2382 
2383   op_cost(0);
2384   format %{ %}
2385   interface(CONST_INTER);
2386 %}
2387 
2388 // Float Immediate: encoded as 8 bits
2389 operand imm8F() %{
2390   predicate(Assembler::float_num(n->getf()).can_be_imm8());
2391   match(ConF);
2392 
2393   op_cost(0);
2394   format %{ %}
2395   interface(CONST_INTER);
2396 %}
2397 
2398 // Integer Register Operands
2399 // Integer Register
2400 operand iRegI() %{
2401   constraint(ALLOC_IN_RC(int_reg));
2402   match(RegI);
2403   match(R0RegI);
2404   match(R1RegI);
2405   match(R2RegI);
2406   match(R3RegI);
2407 #ifdef AARCH64
2408   match(ZRRegI);
2409 #else
2410   match(R12RegI);
2411 #endif
2412 
2413   format %{ %}
2414   interface(REG_INTER);
2415 %}
2416 
2417 // Pointer Register
2418 operand iRegP() %{
2419   constraint(ALLOC_IN_RC(ptr_reg));
2420   match(RegP);
2421   match(R0RegP);
2422   match(R1RegP);
2423   match(R2RegP);
2424   match(RExceptionRegP);
2425   match(R8RegP);
2426   match(R9RegP);
2427   match(RthreadRegP); // FIXME: move to sp_ptr_RegP?
2428   match(R12RegP);
2429   match(LRRegP);
2430 
2431   match(sp_ptr_RegP);
2432   match(store_ptr_RegP);
2433 
2434   format %{ %}
2435   interface(REG_INTER);
2436 %}
2437 
2438 // GPRs + Rthread + SP
2439 operand sp_ptr_RegP() %{
2440   constraint(ALLOC_IN_RC(sp_ptr_reg));
2441   match(RegP);
2442   match(iRegP);
2443   match(SPRegP); // FIXME: check cost
2444 
2445   format %{ %}
2446   interface(REG_INTER);
2447 %}
2448 
2449 #ifdef AARCH64
2450 // Like sp_ptr_reg, but exclude regs (Aarch64 SP) that can't be
2451 // stored directly.  Includes ZR, so can't be used as a destination.
2452 operand store_ptr_RegP() %{
2453   constraint(ALLOC_IN_RC(store_ptr_reg));
2454   match(RegP);
2455   match(iRegP);
2456   match(ZRRegP);
2457 
2458   format %{ %}
2459   interface(REG_INTER);
2460 %}
2461 
2462 operand store_RegI() %{
2463   constraint(ALLOC_IN_RC(store_reg));
2464   match(RegI);
2465   match(iRegI);
2466   match(ZRRegI);
2467 
2468   format %{ %}
2469   interface(REG_INTER);
2470 %}
2471 
2472 operand store_RegL() %{
2473   constraint(ALLOC_IN_RC(store_ptr_reg));
2474   match(RegL);
2475   match(iRegL);
2476   match(ZRRegL);
2477 
2478   format %{ %}
2479   interface(REG_INTER);
2480 %}
2481 
2482 operand store_RegN() %{
2483   constraint(ALLOC_IN_RC(store_reg));
2484   match(RegN);
2485   match(iRegN);
2486   match(ZRRegN);
2487 
2488   format %{ %}
2489   interface(REG_INTER);
2490 %}
2491 #endif
2492 
2493 operand R0RegP() %{
2494   constraint(ALLOC_IN_RC(R0_regP));
2495   match(iRegP);
2496 
2497   format %{ %}
2498   interface(REG_INTER);
2499 %}
2500 
2501 operand R1RegP() %{
2502   constraint(ALLOC_IN_RC(R1_regP));
2503   match(iRegP);
2504 
2505   format %{ %}
2506   interface(REG_INTER);
2507 %}
2508 
2509 operand R2RegP() %{
2510   constraint(ALLOC_IN_RC(R2_regP));
2511   match(iRegP);
2512 
2513   format %{ %}
2514   interface(REG_INTER);
2515 %}
2516 
2517 operand RExceptionRegP() %{
2518   constraint(ALLOC_IN_RC(Rexception_regP));
2519   match(iRegP);
2520 
2521   format %{ %}
2522   interface(REG_INTER);
2523 %}
2524 
2525 operand RthreadRegP() %{
2526   constraint(ALLOC_IN_RC(Rthread_regP));
2527   match(iRegP);
2528 
2529   format %{ %}
2530   interface(REG_INTER);
2531 %}
2532 
2533 operand IPRegP() %{
2534   constraint(ALLOC_IN_RC(IP_regP));
2535   match(iRegP);
2536 
2537   format %{ %}
2538   interface(REG_INTER);
2539 %}
2540 
2541 operand LRRegP() %{
2542   constraint(ALLOC_IN_RC(LR_regP));
2543   match(iRegP);
2544 
2545   format %{ %}
2546   interface(REG_INTER);
2547 %}
2548 
2549 operand R0RegI() %{
2550   constraint(ALLOC_IN_RC(R0_regI));
2551   match(iRegI);
2552 
2553   format %{ %}
2554   interface(REG_INTER);
2555 %}
2556 
2557 operand R1RegI() %{
2558   constraint(ALLOC_IN_RC(R1_regI));
2559   match(iRegI);
2560 
2561   format %{ %}
2562   interface(REG_INTER);
2563 %}
2564 
2565 operand R2RegI() %{
2566   constraint(ALLOC_IN_RC(R2_regI));
2567   match(iRegI);
2568 
2569   format %{ %}
2570   interface(REG_INTER);
2571 %}
2572 
2573 operand R3RegI() %{
2574   constraint(ALLOC_IN_RC(R3_regI));
2575   match(iRegI);
2576 
2577   format %{ %}
2578   interface(REG_INTER);
2579 %}
2580 
2581 #ifndef AARCH64
2582 operand R12RegI() %{
2583   constraint(ALLOC_IN_RC(R12_regI));
2584   match(iRegI);
2585 
2586   format %{ %}
2587   interface(REG_INTER);
2588 %}
2589 #endif
2590 
2591 // Long Register
2592 operand iRegL() %{
2593   constraint(ALLOC_IN_RC(long_reg));
2594   match(RegL);
2595 #ifdef AARCH64
2596   match(iRegLd);
2597 #else
2598   match(R0R1RegL);
2599   match(R2R3RegL);
2600 #endif
2601 //match(iRegLex);
2602 
2603   format %{ %}
2604   interface(REG_INTER);
2605 %}
2606 
2607 operand iRegLd() %{
2608   constraint(ALLOC_IN_RC(long_reg_align));
2609   match(iRegL); // FIXME: allows unaligned R11/R12?
2610 
2611   format %{ %}
2612   interface(REG_INTER);
2613 %}
2614 
2615 #ifndef AARCH64
2616 // first long arg, or return value
2617 operand R0R1RegL() %{
2618   constraint(ALLOC_IN_RC(R0R1_regL));
2619   match(iRegL);
2620 
2621   format %{ %}
2622   interface(REG_INTER);
2623 %}
2624 
2625 operand R2R3RegL() %{
2626   constraint(ALLOC_IN_RC(R2R3_regL));
2627   match(iRegL);
2628 
2629   format %{ %}
2630   interface(REG_INTER);
2631 %}
2632 #endif
2633 
2634 // Condition Code Flag Register
2635 operand flagsReg() %{
2636   constraint(ALLOC_IN_RC(int_flags));
2637   match(RegFlags);
2638 
2639   format %{ "apsr" %}
2640   interface(REG_INTER);
2641 %}
2642 
2643 // Result of compare to 0 (TST)
2644 operand flagsReg_EQNELTGE() %{
2645   constraint(ALLOC_IN_RC(int_flags));
2646   match(RegFlags);
2647 
2648   format %{ "apsr_EQNELTGE" %}
2649   interface(REG_INTER);
2650 %}
2651 
2652 // Condition Code Register, unsigned comparisons.
2653 operand flagsRegU() %{
2654   constraint(ALLOC_IN_RC(int_flags));
2655   match(RegFlags);
2656 #ifdef TODO
2657   match(RegFlagsP);
2658 #endif
2659 
2660   format %{ "apsr_U" %}
2661   interface(REG_INTER);
2662 %}
2663 
2664 // Condition Code Register, pointer comparisons.
2665 operand flagsRegP() %{
2666   constraint(ALLOC_IN_RC(int_flags));
2667   match(RegFlags);
2668 
2669   format %{ "apsr_P" %}
2670   interface(REG_INTER);
2671 %}
2672 
2673 // Condition Code Register, long comparisons.
2674 #ifndef AARCH64
2675 operand flagsRegL_LTGE() %{
2676   constraint(ALLOC_IN_RC(int_flags));
2677   match(RegFlags);
2678 
2679   format %{ "apsr_L_LTGE" %}
2680   interface(REG_INTER);
2681 %}
2682 
2683 operand flagsRegL_EQNE() %{
2684   constraint(ALLOC_IN_RC(int_flags));
2685   match(RegFlags);
2686 
2687   format %{ "apsr_L_EQNE" %}
2688   interface(REG_INTER);
2689 %}
2690 
2691 operand flagsRegL_LEGT() %{
2692   constraint(ALLOC_IN_RC(int_flags));
2693   match(RegFlags);
2694 
2695   format %{ "apsr_L_LEGT" %}
2696   interface(REG_INTER);
2697 %}
2698 
2699 operand flagsRegUL_LTGE() %{
2700   constraint(ALLOC_IN_RC(int_flags));
2701   match(RegFlags);
2702 
2703   format %{ "apsr_UL_LTGE" %}
2704   interface(REG_INTER);
2705 %}
2706 
2707 operand flagsRegUL_EQNE() %{
2708   constraint(ALLOC_IN_RC(int_flags));
2709   match(RegFlags);
2710 
2711   format %{ "apsr_UL_EQNE" %}
2712   interface(REG_INTER);
2713 %}
2714 
2715 operand flagsRegUL_LEGT() %{
2716   constraint(ALLOC_IN_RC(int_flags));
2717   match(RegFlags);
2718 
2719   format %{ "apsr_UL_LEGT" %}
2720   interface(REG_INTER);
2721 %}
2722 #endif
2723 
2724 // Condition Code Register, floating comparisons, unordered same as "less".
2725 operand flagsRegF() %{
2726   constraint(ALLOC_IN_RC(float_flags));
2727   match(RegFlags);
2728 
2729   format %{ "fpscr_F" %}
2730   interface(REG_INTER);
2731 %}
2732 
2733 // Vectors
2734 operand vecD() %{
2735   constraint(ALLOC_IN_RC(actual_dflt_reg));
2736   match(VecD);
2737 
2738   format %{ %}
2739   interface(REG_INTER);
2740 %}
2741 
2742 operand vecX() %{
2743   constraint(ALLOC_IN_RC(vectorx_reg));
2744   match(VecX);
2745 
2746   format %{ %}
2747   interface(REG_INTER);
2748 %}
2749 
2750 operand regD() %{
2751   constraint(ALLOC_IN_RC(actual_dflt_reg));
2752   match(RegD);
2753   match(regD_low);
2754 
2755   format %{ %}
2756   interface(REG_INTER);
2757 %}
2758 
2759 operand regF() %{
2760   constraint(ALLOC_IN_RC(sflt_reg));
2761   match(RegF);
2762 
2763   format %{ %}
2764   interface(REG_INTER);
2765 %}
2766 
2767 operand regD_low() %{
2768   constraint(ALLOC_IN_RC(dflt_low_reg));
2769   match(RegD);
2770 
2771   format %{ %}
2772   interface(REG_INTER);
2773 %}
2774 
2775 // Special Registers
2776 
2777 // Method Register
2778 operand inline_cache_regP(iRegP reg) %{
2779   constraint(ALLOC_IN_RC(Ricklass_regP));
2780   match(reg);
2781   format %{ %}
2782   interface(REG_INTER);
2783 %}
2784 
2785 operand interpreter_method_oop_regP(iRegP reg) %{
2786   constraint(ALLOC_IN_RC(Rmethod_regP));
2787   match(reg);
2788   format %{ %}
2789   interface(REG_INTER);
2790 %}
2791 
2792 
2793 //----------Complex Operands---------------------------------------------------
2794 // Indirect Memory Reference
2795 operand indirect(sp_ptr_RegP reg) %{
2796   constraint(ALLOC_IN_RC(sp_ptr_reg));
2797   match(reg);
2798 
2799   op_cost(100);
2800   format %{ "[$reg]" %}
2801   interface(MEMORY_INTER) %{
2802     base($reg);
2803 #ifdef AARCH64
2804     index(0xff); // 0xff => no index
2805 #else
2806     index(0xf); // PC => no index
2807 #endif
2808     scale(0x0);
2809     disp(0x0);
2810   %}
2811 %}
2812 
2813 #ifdef AARCH64
2814 // Indirect with scaled*1 uimm12 offset
2815 operand indOffsetU12ScaleB(sp_ptr_RegP reg, immUL12 offset) %{
2816   constraint(ALLOC_IN_RC(sp_ptr_reg));
2817   match(AddP reg offset);
2818 
2819   op_cost(100);
2820   format %{ "[$reg + $offset]" %}
2821   interface(MEMORY_INTER) %{
2822     base($reg);
2823 #ifdef AARCH64
2824     index(0xff); // 0xff => no index
2825 #else
2826     index(0xf); // PC => no index
2827 #endif
2828     scale(0x0);
2829     disp($offset);
2830   %}
2831 %}
2832 
2833 // Indirect with scaled*2 uimm12 offset
2834 operand indOffsetU12ScaleS(sp_ptr_RegP reg, immUL12x2 offset) %{
2835   constraint(ALLOC_IN_RC(sp_ptr_reg));
2836   match(AddP reg offset);
2837 
2838   op_cost(100);
2839   format %{ "[$reg + $offset]" %}
2840   interface(MEMORY_INTER) %{
2841     base($reg);
2842 #ifdef AARCH64
2843     index(0xff); // 0xff => no index
2844 #else
2845     index(0xf); // PC => no index
2846 #endif
2847     scale(0x0);
2848     disp($offset);
2849   %}
2850 %}
2851 
2852 // Indirect with scaled*4 uimm12 offset
2853 operand indOffsetU12ScaleI(sp_ptr_RegP reg, immUL12x4 offset) %{
2854   constraint(ALLOC_IN_RC(sp_ptr_reg));
2855   match(AddP reg offset);
2856 
2857   op_cost(100);
2858   format %{ "[$reg + $offset]" %}
2859   interface(MEMORY_INTER) %{
2860     base($reg);
2861 #ifdef AARCH64
2862     index(0xff); // 0xff => no index
2863 #else
2864     index(0xf); // PC => no index
2865 #endif
2866     scale(0x0);
2867     disp($offset);
2868   %}
2869 %}
2870 
2871 // Indirect with scaled*8 uimm12 offset
2872 operand indOffsetU12ScaleL(sp_ptr_RegP reg, immUL12x8 offset) %{
2873   constraint(ALLOC_IN_RC(sp_ptr_reg));
2874   match(AddP reg offset);
2875 
2876   op_cost(100);
2877   format %{ "[$reg + $offset]" %}
2878   interface(MEMORY_INTER) %{
2879     base($reg);
2880 #ifdef AARCH64
2881     index(0xff); // 0xff => no index
2882 #else
2883     index(0xf); // PC => no index
2884 #endif
2885     scale(0x0);
2886     disp($offset);
2887   %}
2888 %}
2889 
2890 // Indirect with scaled*16 uimm12 offset
2891 operand indOffsetU12ScaleQ(sp_ptr_RegP reg, immUL12x16 offset) %{
2892   constraint(ALLOC_IN_RC(sp_ptr_reg));
2893   match(AddP reg offset);
2894 
2895   op_cost(100);
2896   format %{ "[$reg + $offset]" %}
2897   interface(MEMORY_INTER) %{
2898     base($reg);
2899 #ifdef AARCH64
2900     index(0xff); // 0xff => no index
2901 #else
2902     index(0xf); // PC => no index
2903 #endif
2904     scale(0x0);
2905     disp($offset);
2906   %}
2907 %}
2908 
2909 #else // ! AARCH64
2910 
2911 // Indirect with Offset in ]-4096, 4096[
2912 operand indOffset12(sp_ptr_RegP reg, immI12 offset) %{
2913   constraint(ALLOC_IN_RC(sp_ptr_reg));
2914   match(AddP reg offset);
2915 
2916   op_cost(100);
2917   format %{ "[$reg + $offset]" %}
2918   interface(MEMORY_INTER) %{
2919     base($reg);
2920 #ifdef AARCH64
2921     index(0xff); // 0xff => no index
2922 #else
2923     index(0xf); // PC => no index
2924 #endif
2925     scale(0x0);
2926     disp($offset);
2927   %}
2928 %}
2929 
2930 // Indirect with offset for float load/store
2931 operand indOffsetFP(sp_ptr_RegP reg, immIFP offset) %{
2932   constraint(ALLOC_IN_RC(sp_ptr_reg));
2933   match(AddP reg offset);
2934 
2935   op_cost(100);
2936   format %{ "[$reg + $offset]" %}
2937   interface(MEMORY_INTER) %{
2938     base($reg);
2939 #ifdef AARCH64
2940     index(0xff); // 0xff => no index
2941 #else
2942     index(0xf); // PC => no index
2943 #endif
2944     scale(0x0);
2945     disp($offset);
2946   %}
2947 %}
2948 
2949 // Indirect with Offset for half and double words
2950 operand indOffsetHD(sp_ptr_RegP reg, immIHD offset) %{
2951   constraint(ALLOC_IN_RC(sp_ptr_reg));
2952   match(AddP reg offset);
2953 
2954   op_cost(100);
2955   format %{ "[$reg + $offset]" %}
2956   interface(MEMORY_INTER) %{
2957     base($reg);
2958 #ifdef AARCH64
2959     index(0xff); // 0xff => no index
2960 #else
2961     index(0xf); // PC => no index
2962 #endif
2963     scale(0x0);
2964     disp($offset);
2965   %}
2966 %}
2967 
2968 // Indirect with Offset and Offset+4 in ]-1024, 1024[
2969 operand indOffsetFPx2(sp_ptr_RegP reg, immX10x2 offset) %{
2970   constraint(ALLOC_IN_RC(sp_ptr_reg));
2971   match(AddP reg offset);
2972 
2973   op_cost(100);
2974   format %{ "[$reg + $offset]" %}
2975   interface(MEMORY_INTER) %{
2976     base($reg);
2977 #ifdef AARCH64
2978     index(0xff); // 0xff => no index
2979 #else
2980     index(0xf); // PC => no index
2981 #endif
2982     scale(0x0);
2983     disp($offset);
2984   %}
2985 %}
2986 
2987 // Indirect with Offset and Offset+4 in ]-4096, 4096[
2988 operand indOffset12x2(sp_ptr_RegP reg, immI12x2 offset) %{
2989   constraint(ALLOC_IN_RC(sp_ptr_reg));
2990   match(AddP reg offset);
2991 
2992   op_cost(100);
2993   format %{ "[$reg + $offset]" %}
2994   interface(MEMORY_INTER) %{
2995     base($reg);
2996 #ifdef AARCH64
2997     index(0xff); // 0xff => no index
2998 #else
2999     index(0xf); // PC => no index
3000 #endif
3001     scale(0x0);
3002     disp($offset);
3003   %}
3004 %}
3005 #endif // !AARCH64
3006 
3007 // Indirect with Register Index
3008 operand indIndex(iRegP addr, iRegX index) %{
3009   constraint(ALLOC_IN_RC(ptr_reg));
3010   match(AddP addr index);
3011 
3012   op_cost(100);
3013   format %{ "[$addr + $index]" %}
3014   interface(MEMORY_INTER) %{
3015     base($addr);
3016     index($index);
3017     scale(0x0);
3018     disp(0x0);
3019   %}
3020 %}
3021 
3022 #ifdef AARCH64
3023 // Indirect Memory Times Scale Plus Index Register
3024 operand indIndexScaleS(iRegP addr, iRegX index, immI_1 scale) %{
3025   constraint(ALLOC_IN_RC(ptr_reg));
3026   match(AddP addr (LShiftX index scale));
3027 
3028   op_cost(100);
3029   format %{"[$addr + $index << $scale]" %}
3030   interface(MEMORY_INTER) %{
3031     base($addr);
3032     index($index);
3033     scale($scale);
3034     disp(0x0);
3035   %}
3036 %}
3037 
3038 // Indirect Memory Times Scale Plus 32-bit Index Register
3039 operand indIndexIScaleS(iRegP addr, iRegI index, immI_1 scale) %{
3040   constraint(ALLOC_IN_RC(ptr_reg));
3041   match(AddP addr (LShiftX (ConvI2L index) scale));
3042 
3043   op_cost(100);
3044   format %{"[$addr + $index.w << $scale]" %}
3045   interface(MEMORY_INTER) %{
3046     base($addr);
3047     index($index);
3048     scale($scale);
3049     disp(0x7fffffff); // sxtw
3050   %}
3051 %}
3052 
3053 // Indirect Memory Times Scale Plus Index Register
3054 operand indIndexScaleI(iRegP addr, iRegX index, immI_2 scale) %{
3055   constraint(ALLOC_IN_RC(ptr_reg));
3056   match(AddP addr (LShiftX index scale));
3057 
3058   op_cost(100);
3059   format %{"[$addr + $index << $scale]" %}
3060   interface(MEMORY_INTER) %{
3061     base($addr);
3062     index($index);
3063     scale($scale);
3064     disp(0x0);
3065   %}
3066 %}
3067 
3068 // Indirect Memory Times Scale Plus 32-bit Index Register
3069 operand indIndexIScaleI(iRegP addr, iRegI index, immI_2 scale) %{
3070   constraint(ALLOC_IN_RC(ptr_reg));
3071   match(AddP addr (LShiftX (ConvI2L index) scale));
3072 
3073   op_cost(100);
3074   format %{"[$addr + $index.w << $scale]" %}
3075   interface(MEMORY_INTER) %{
3076     base($addr);
3077     index($index);
3078     scale($scale);
3079     disp(0x7fffffff); // sxtw
3080   %}
3081 %}
3082 
3083 // Indirect Memory Times Scale Plus Index Register
3084 operand indIndexScaleL(iRegP addr, iRegX index, immI_3 scale) %{
3085   constraint(ALLOC_IN_RC(ptr_reg));
3086   match(AddP addr (LShiftX index scale));
3087 
3088   op_cost(100);
3089   format %{"[$addr + $index << $scale]" %}
3090   interface(MEMORY_INTER) %{
3091     base($addr);
3092     index($index);
3093     scale($scale);
3094     disp(0x0);
3095   %}
3096 %}
3097 
3098 // Indirect Memory Times Scale Plus 32-bit Index Register
3099 operand indIndexIScaleL(iRegP addr, iRegI index, immI_3 scale) %{
3100   constraint(ALLOC_IN_RC(ptr_reg));
3101   match(AddP addr (LShiftX (ConvI2L index) scale));
3102 
3103   op_cost(100);
3104   format %{"[$addr + $index.w << $scale]" %}
3105   interface(MEMORY_INTER) %{
3106     base($addr);
3107     index($index);
3108     scale($scale);
3109     disp(0x7fffffff); // sxtw
3110   %}
3111 %}
3112 
3113 // Indirect Memory Times Scale Plus Index Register
3114 operand indIndexScaleQ(iRegP addr, iRegX index, immI_4 scale) %{
3115   constraint(ALLOC_IN_RC(ptr_reg));
3116   match(AddP addr (LShiftX index scale));
3117 
3118   op_cost(100);
3119   format %{"[$addr + $index << $scale]" %}
3120   interface(MEMORY_INTER) %{
3121     base($addr);
3122     index($index);
3123     scale($scale);
3124     disp(0x0);
3125   %}
3126 %}
3127 
3128 // Indirect Memory Times Scale Plus 32-bit Index Register
3129 operand indIndexIScaleQ(iRegP addr, iRegI index, immI_4 scale) %{
3130   constraint(ALLOC_IN_RC(ptr_reg));
3131   match(AddP addr (LShiftX (ConvI2L index) scale));
3132 
3133   op_cost(100);
3134   format %{"[$addr + $index.w << $scale]" %}
3135   interface(MEMORY_INTER) %{
3136     base($addr);
3137     index($index);
3138     scale($scale);
3139     disp(0x7fffffff); // sxtw
3140   %}
3141 %}
3142 #else
3143 // Indirect Memory Times Scale Plus Index Register
3144 operand indIndexScale(iRegP addr, iRegX index, immU5 scale) %{
3145   constraint(ALLOC_IN_RC(ptr_reg));
3146   match(AddP addr (LShiftX index scale));
3147 
3148   op_cost(100);
3149   format %{"[$addr + $index << $scale]" %}
3150   interface(MEMORY_INTER) %{
3151     base($addr);
3152     index($index);
3153     scale($scale);
3154     disp(0x0);
3155   %}
3156 %}
3157 #endif
3158 
3159 // Operands for expressing Control Flow
3160 // NOTE:  Label is a predefined operand which should not be redefined in
3161 //        the AD file.  It is generically handled within the ADLC.
3162 
3163 //----------Conditional Branch Operands----------------------------------------
3164 // Comparison Op  - This is the operation of the comparison, and is limited to
3165 //                  the following set of codes:
3166 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
3167 //
3168 // Other attributes of the comparison, such as unsignedness, are specified
3169 // by the comparison instruction that sets a condition code flags register.
3170 // That result is represented by a flags operand whose subtype is appropriate
3171 // to the unsignedness (etc.) of the comparison.
3172 //
3173 // Later, the instruction which matches both the Comparison Op (a Bool) and
3174 // the flags (produced by the Cmp) specifies the coding of the comparison op
3175 // by matching a specific subtype of Bool operand below, such as cmpOpU.
3176 
3177 operand cmpOp() %{
3178   match(Bool);
3179 
3180   format %{ "" %}
3181   interface(COND_INTER) %{
3182     equal(0x0);
3183     not_equal(0x1);
3184     less(0xb);
3185     greater_equal(0xa);
3186     less_equal(0xd);
3187     greater(0xc);
3188     overflow(0x0); // unsupported/unimplemented
3189     no_overflow(0x0); // unsupported/unimplemented
3190   %}
3191 %}
3192 
3193 // integer comparison with 0, signed
3194 operand cmpOp0() %{
3195   match(Bool);
3196 
3197   format %{ "" %}
3198   interface(COND_INTER) %{
3199     equal(0x0);
3200     not_equal(0x1);
3201     less(0x4);
3202     greater_equal(0x5);
3203     less_equal(0xd); // unsupported
3204     greater(0xc); // unsupported
3205     overflow(0x0); // unsupported/unimplemented
3206     no_overflow(0x0); // unsupported/unimplemented
3207   %}
3208 %}
3209 
3210 // Comparison Op, unsigned
3211 operand cmpOpU() %{
3212   match(Bool);
3213 
3214   format %{ "u" %}
3215   interface(COND_INTER) %{
3216     equal(0x0);
3217     not_equal(0x1);
3218     less(0x3);
3219     greater_equal(0x2);
3220     less_equal(0x9);
3221     greater(0x8);
3222     overflow(0x0); // unsupported/unimplemented
3223     no_overflow(0x0); // unsupported/unimplemented
3224   %}
3225 %}
3226 
3227 // Comparison Op, pointer (same as unsigned)
3228 operand cmpOpP() %{
3229   match(Bool);
3230 
3231   format %{ "p" %}
3232   interface(COND_INTER) %{
3233     equal(0x0);
3234     not_equal(0x1);
3235     less(0x3);
3236     greater_equal(0x2);
3237     less_equal(0x9);
3238     greater(0x8);
3239     overflow(0x0); // unsupported/unimplemented
3240     no_overflow(0x0); // unsupported/unimplemented
3241   %}
3242 %}
3243 
3244 operand cmpOpL() %{
3245   match(Bool);
3246 
3247   format %{ "L" %}
3248   interface(COND_INTER) %{
3249     equal(0x0);
3250     not_equal(0x1);
3251     less(0xb);
3252     greater_equal(0xa);
3253     less_equal(0xd);
3254     greater(0xc);
3255     overflow(0x0); // unsupported/unimplemented
3256     no_overflow(0x0); // unsupported/unimplemented
3257   %}
3258 %}
3259 
3260 operand cmpOpL_commute() %{
3261   match(Bool);
3262 
3263   format %{ "L" %}
3264   interface(COND_INTER) %{
3265     equal(0x0);
3266     not_equal(0x1);
3267     less(0xc);
3268     greater_equal(0xd);
3269     less_equal(0xa);
3270     greater(0xb);
3271     overflow(0x0); // unsupported/unimplemented
3272     no_overflow(0x0); // unsupported/unimplemented
3273   %}
3274 %}
3275 
3276 operand cmpOpUL() %{
3277   match(Bool);
3278 
3279   format %{ "UL" %}
3280   interface(COND_INTER) %{
3281     equal(0x0);
3282     not_equal(0x1);
3283     less(0x3);
3284     greater_equal(0x2);
3285     less_equal(0x9);
3286     greater(0x8);
3287     overflow(0x0); // unsupported/unimplemented
3288     no_overflow(0x0); // unsupported/unimplemented
3289   %}
3290 %}
3291 
3292 operand cmpOpUL_commute() %{
3293   match(Bool);
3294 
3295   format %{ "UL" %}
3296   interface(COND_INTER) %{
3297     equal(0x0);
3298     not_equal(0x1);
3299     less(0x8);
3300     greater_equal(0x9);
3301     less_equal(0x2);
3302     greater(0x3);
3303     overflow(0x0); // unsupported/unimplemented
3304     no_overflow(0x0); // unsupported/unimplemented
3305   %}
3306 %}
3307 
3308 
3309 //----------OPERAND CLASSES----------------------------------------------------
3310 // Operand Classes are groups of operands that are used to simplify
3311 // instruction definitions by not requiring the AD writer to specify separate
3312 // instructions for every form of operand when the instruction accepts
3313 // multiple operand types with the same basic encoding and format.  The classic
3314 // case of this is memory operands.
3315 #ifdef AARCH64
3316 opclass memoryB(indirect, indIndex, indOffsetU12ScaleB);
3317 opclass memoryS(indirect, indIndex, indIndexScaleS, indIndexIScaleS, indOffsetU12ScaleS);
3318 opclass memoryI(indirect, indIndex, indIndexScaleI, indIndexIScaleI, indOffsetU12ScaleI);
3319 opclass memoryL(indirect, indIndex, indIndexScaleL, indIndexIScaleL, indOffsetU12ScaleL);
3320 opclass memoryP(indirect, indIndex, indIndexScaleL, indIndexIScaleL, indOffsetU12ScaleL);
3321 opclass memoryQ(indirect, indIndex, indIndexScaleQ, indIndexIScaleQ, indOffsetU12ScaleQ);
3322 opclass memoryF(indirect, indIndex, indIndexScaleI, indIndexIScaleI, indOffsetU12ScaleI);
3323 opclass memoryD(indirect, indIndex, indIndexScaleL, indIndexIScaleL, indOffsetU12ScaleL);
3324 
3325 opclass memoryScaledS(indIndexScaleS, indIndexIScaleS);
3326 opclass memoryScaledI(indIndexScaleI, indIndexIScaleI);
3327 opclass memoryScaledL(indIndexScaleL, indIndexIScaleL);
3328 opclass memoryScaledP(indIndexScaleL, indIndexIScaleL);
3329 opclass memoryScaledQ(indIndexScaleQ, indIndexIScaleQ);
3330 opclass memoryScaledF(indIndexScaleI, indIndexIScaleI);
3331 opclass memoryScaledD(indIndexScaleL, indIndexIScaleL);
3332 // when ldrex/strex is used:
3333 opclass memoryex ( indirect );
3334 opclass indIndexMemory( indIndex );
3335 opclass memoryvld ( indirect /* , write back mode not implemented */ );
3336 
3337 #else
3338 
3339 opclass memoryI ( indirect, indOffset12, indIndex, indIndexScale );
3340 opclass memoryP ( indirect, indOffset12, indIndex, indIndexScale );
3341 opclass memoryF ( indirect, indOffsetFP );
3342 opclass memoryF2 ( indirect, indOffsetFPx2 );
3343 opclass memoryD ( indirect, indOffsetFP );
3344 opclass memoryfp( indirect, indOffsetFP );
3345 opclass memoryB ( indirect, indIndex, indOffsetHD );
3346 opclass memoryS ( indirect, indIndex, indOffsetHD );
3347 opclass memoryL ( indirect, indIndex, indOffsetHD );
3348 
3349 opclass memoryScaledI(indIndexScale);
3350 opclass memoryScaledP(indIndexScale);
3351 
3352 // when ldrex/strex is used:
3353 opclass memoryex ( indirect );
3354 opclass indIndexMemory( indIndex );
3355 opclass memorylong ( indirect, indOffset12x2 );
3356 opclass memoryvld ( indirect /* , write back mode not implemented */ );
3357 #endif
3358 
3359 //----------PIPELINE-----------------------------------------------------------
3360 pipeline %{
3361 
3362 //----------ATTRIBUTES---------------------------------------------------------
3363 attributes %{
3364   fixed_size_instructions;           // Fixed size instructions
3365   max_instructions_per_bundle = 4;   // Up to 4 instructions per bundle
3366   instruction_unit_size = 4;         // An instruction is 4 bytes long
3367   instruction_fetch_unit_size = 16;  // The processor fetches one line
3368   instruction_fetch_units = 1;       // of 16 bytes
3369 
3370   // List of nop instructions
3371   nops( Nop_A0, Nop_A1, Nop_MS, Nop_FA, Nop_BR );
3372 %}
3373 
3374 //----------RESOURCES----------------------------------------------------------
3375 // Resources are the functional units available to the machine
3376 resources(A0, A1, MS, BR, FA, FM, IDIV, FDIV, IALU = A0 | A1);
3377 
3378 //----------PIPELINE DESCRIPTION-----------------------------------------------
3379 // Pipeline Description specifies the stages in the machine's pipeline
3380 
3381 pipe_desc(A, P, F, B, I, J, S, R, E, C, M, W, X, T, D);
3382 
3383 //----------PIPELINE CLASSES---------------------------------------------------
3384 // Pipeline Classes describe the stages in which input and output are
3385 // referenced by the hardware pipeline.
3386 
3387 // Integer ALU reg-reg operation
3388 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
3389     single_instruction;
3390     dst   : E(write);
3391     src1  : R(read);
3392     src2  : R(read);
3393     IALU  : R;
3394 %}
3395 
3396 // Integer ALU reg-reg long operation
3397 pipe_class ialu_reg_reg_2(iRegL dst, iRegL src1, iRegL src2) %{
3398     instruction_count(2);
3399     dst   : E(write);
3400     src1  : R(read);
3401     src2  : R(read);
3402     IALU  : R;
3403     IALU  : R;
3404 %}
3405 
3406 // Integer ALU reg-reg long dependent operation
3407 pipe_class ialu_reg_reg_2_dep(iRegL dst, iRegL src1, iRegL src2, flagsReg cr) %{
3408     instruction_count(1); multiple_bundles;
3409     dst   : E(write);
3410     src1  : R(read);
3411     src2  : R(read);
3412     cr    : E(write);
3413     IALU  : R(2);
3414 %}
3415 
3416 // Integer ALU reg-imm operaion
3417 pipe_class ialu_reg_imm(iRegI dst, iRegI src1) %{
3418     single_instruction;
3419     dst   : E(write);
3420     src1  : R(read);
3421     IALU  : R;
3422 %}
3423 
3424 // Integer ALU reg-reg operation with condition code
3425 pipe_class ialu_cc_reg_reg(iRegI dst, iRegI src1, iRegI src2, flagsReg cr) %{
3426     single_instruction;
3427     dst   : E(write);
3428     cr    : E(write);
3429     src1  : R(read);
3430     src2  : R(read);
3431     IALU  : R;
3432 %}
3433 
3434 // Integer ALU zero-reg operation
3435 pipe_class ialu_zero_reg(iRegI dst, immI0 zero, iRegI src2) %{
3436     single_instruction;
3437     dst   : E(write);
3438     src2  : R(read);
3439     IALU  : R;
3440 %}
3441 
3442 // Integer ALU zero-reg operation with condition code only
3443 pipe_class ialu_cconly_zero_reg(flagsReg cr, iRegI src) %{
3444     single_instruction;
3445     cr    : E(write);
3446     src   : R(read);
3447     IALU  : R;
3448 %}
3449 
3450 // Integer ALU reg-reg operation with condition code only
3451 pipe_class ialu_cconly_reg_reg(flagsReg cr, iRegI src1, iRegI src2) %{
3452     single_instruction;
3453     cr    : E(write);
3454     src1  : R(read);
3455     src2  : R(read);
3456     IALU  : R;
3457 %}
3458 
3459 // Integer ALU reg-imm operation with condition code only
3460 pipe_class ialu_cconly_reg_imm(flagsReg cr, iRegI src1) %{
3461     single_instruction;
3462     cr    : E(write);
3463     src1  : R(read);
3464     IALU  : R;
3465 %}
3466 
3467 // Integer ALU reg-reg-zero operation with condition code only
3468 pipe_class ialu_cconly_reg_reg_zero(flagsReg cr, iRegI src1, iRegI src2, immI0 zero) %{
3469     single_instruction;
3470     cr    : E(write);
3471     src1  : R(read);
3472     src2  : R(read);
3473     IALU  : R;
3474 %}
3475 
3476 // Integer ALU reg-imm-zero operation with condition code only
3477 pipe_class ialu_cconly_reg_imm_zero(flagsReg cr, iRegI src1, immI0 zero) %{
3478     single_instruction;
3479     cr    : E(write);
3480     src1  : R(read);
3481     IALU  : R;
3482 %}
3483 
3484 // Integer ALU reg-reg operation with condition code, src1 modified
3485 pipe_class ialu_cc_rwreg_reg(flagsReg cr, iRegI src1, iRegI src2) %{
3486     single_instruction;
3487     cr    : E(write);
3488     src1  : E(write);
3489     src1  : R(read);
3490     src2  : R(read);
3491     IALU  : R;
3492 %}
3493 
3494 pipe_class cmpL_reg(iRegI dst, iRegL src1, iRegL src2, flagsReg cr ) %{
3495     multiple_bundles;
3496     dst   : E(write)+4;
3497     cr    : E(write);
3498     src1  : R(read);
3499     src2  : R(read);
3500     IALU  : R(3);
3501     BR    : R(2);
3502 %}
3503 
3504 // Integer ALU operation
3505 pipe_class ialu_none(iRegI dst) %{
3506     single_instruction;
3507     dst   : E(write);
3508     IALU  : R;
3509 %}
3510 
3511 // Integer ALU reg operation
3512 pipe_class ialu_reg(iRegI dst, iRegI src) %{
3513     single_instruction; may_have_no_code;
3514     dst   : E(write);
3515     src   : R(read);
3516     IALU  : R;
3517 %}
3518 
3519 // Integer ALU reg conditional operation
3520 // This instruction has a 1 cycle stall, and cannot execute
3521 // in the same cycle as the instruction setting the condition
3522 // code. We kludge this by pretending to read the condition code
3523 // 1 cycle earlier, and by marking the functional units as busy
3524 // for 2 cycles with the result available 1 cycle later than
3525 // is really the case.
3526 pipe_class ialu_reg_flags( iRegI op2_out, iRegI op2_in, iRegI op1, flagsReg cr ) %{
3527     single_instruction;
3528     op2_out : C(write);
3529     op1     : R(read);
3530     cr      : R(read);       // This is really E, with a 1 cycle stall
3531     BR      : R(2);
3532     MS      : R(2);
3533 %}
3534 
3535 // Integer ALU reg operation
3536 pipe_class ialu_move_reg_L_to_I(iRegI dst, iRegL src) %{
3537     single_instruction; may_have_no_code;
3538     dst   : E(write);
3539     src   : R(read);
3540     IALU  : R;
3541 %}
3542 pipe_class ialu_move_reg_I_to_L(iRegL dst, iRegI src) %{
3543     single_instruction; may_have_no_code;
3544     dst   : E(write);
3545     src   : R(read);
3546     IALU  : R;
3547 %}
3548 
3549 // Two integer ALU reg operations
3550 pipe_class ialu_reg_2(iRegL dst, iRegL src) %{
3551     instruction_count(2);
3552     dst   : E(write);
3553     src   : R(read);
3554     A0    : R;
3555     A1    : R;
3556 %}
3557 
3558 // Two integer ALU reg operations
3559 pipe_class ialu_move_reg_L_to_L(iRegL dst, iRegL src) %{
3560     instruction_count(2); may_have_no_code;
3561     dst   : E(write);
3562     src   : R(read);
3563     A0    : R;
3564     A1    : R;
3565 %}
3566 
3567 // Integer ALU imm operation
3568 pipe_class ialu_imm(iRegI dst) %{
3569     single_instruction;
3570     dst   : E(write);
3571     IALU  : R;
3572 %}
3573 
3574 pipe_class ialu_imm_n(iRegI dst) %{
3575     single_instruction;
3576     dst   : E(write);
3577     IALU  : R;
3578 %}
3579 
3580 // Integer ALU reg-reg with carry operation
3581 pipe_class ialu_reg_reg_cy(iRegI dst, iRegI src1, iRegI src2, iRegI cy) %{
3582     single_instruction;
3583     dst   : E(write);
3584     src1  : R(read);
3585     src2  : R(read);
3586     IALU  : R;
3587 %}
3588 
3589 // Integer ALU cc operation
3590 pipe_class ialu_cc(iRegI dst, flagsReg cc) %{
3591     single_instruction;
3592     dst   : E(write);
3593     cc    : R(read);
3594     IALU  : R;
3595 %}
3596 
3597 // Integer ALU cc / second IALU operation
3598 pipe_class ialu_reg_ialu( iRegI dst, iRegI src ) %{
3599     instruction_count(1); multiple_bundles;
3600     dst   : E(write)+1;
3601     src   : R(read);
3602     IALU  : R;
3603 %}
3604 
3605 // Integer ALU cc / second IALU operation
3606 pipe_class ialu_reg_reg_ialu( iRegI dst, iRegI p, iRegI q ) %{
3607     instruction_count(1); multiple_bundles;
3608     dst   : E(write)+1;
3609     p     : R(read);
3610     q     : R(read);
3611     IALU  : R;
3612 %}
3613 
3614 // Integer ALU hi-lo-reg operation
3615 pipe_class ialu_hi_lo_reg(iRegI dst, immI src) %{
3616     instruction_count(1); multiple_bundles;
3617     dst   : E(write)+1;
3618     IALU  : R(2);
3619 %}
3620 
3621 // Long Constant
3622 pipe_class loadConL( iRegL dst, immL src ) %{
3623     instruction_count(2); multiple_bundles;
3624     dst   : E(write)+1;
3625     IALU  : R(2);
3626     IALU  : R(2);
3627 %}
3628 
3629 // Pointer Constant
3630 pipe_class loadConP( iRegP dst, immP src ) %{
3631     instruction_count(0); multiple_bundles;
3632     fixed_latency(6);
3633 %}
3634 
3635 // Polling Address
3636 pipe_class loadConP_poll( iRegP dst, immP_poll src ) %{
3637     dst   : E(write);
3638     IALU  : R;
3639 %}
3640 
3641 // Long Constant small
3642 pipe_class loadConLlo( iRegL dst, immL src ) %{
3643     instruction_count(2);
3644     dst   : E(write);
3645     IALU  : R;
3646     IALU  : R;
3647 %}
3648 
3649 // [PHH] This is wrong for 64-bit.  See LdImmF/D.
3650 pipe_class loadConFD(regF dst, immF src, iRegP tmp) %{
3651     instruction_count(1); multiple_bundles;
3652     src   : R(read);
3653     dst   : M(write)+1;
3654     IALU  : R;
3655     MS    : E;
3656 %}
3657 
3658 // Integer ALU nop operation
3659 pipe_class ialu_nop() %{
3660     single_instruction;
3661     IALU  : R;
3662 %}
3663 
3664 // Integer ALU nop operation
3665 pipe_class ialu_nop_A0() %{
3666     single_instruction;
3667     A0    : R;
3668 %}
3669 
3670 // Integer ALU nop operation
3671 pipe_class ialu_nop_A1() %{
3672     single_instruction;
3673     A1    : R;
3674 %}
3675 
3676 // Integer Multiply reg-reg operation
3677 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
3678     single_instruction;
3679     dst   : E(write);
3680     src1  : R(read);
3681     src2  : R(read);
3682     MS    : R(5);
3683 %}
3684 
3685 pipe_class mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
3686     single_instruction;
3687     dst   : E(write)+4;
3688     src1  : R(read);
3689     src2  : R(read);
3690     MS    : R(6);
3691 %}
3692 
3693 // Integer Divide reg-reg
3694 pipe_class sdiv_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI temp, flagsReg cr) %{
3695     instruction_count(1); multiple_bundles;
3696     dst   : E(write);
3697     temp  : E(write);
3698     src1  : R(read);
3699     src2  : R(read);
3700     temp  : R(read);
3701     MS    : R(38);
3702 %}
3703 
3704 // Long Divide
3705 pipe_class divL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
3706     dst  : E(write)+71;
3707     src1 : R(read);
3708     src2 : R(read)+1;
3709     MS   : R(70);
3710 %}
3711 
3712 // Floating Point Add Float
3713 pipe_class faddF_reg_reg(regF dst, regF src1, regF src2) %{
3714     single_instruction;
3715     dst   : X(write);
3716     src1  : E(read);
3717     src2  : E(read);
3718     FA    : R;
3719 %}
3720 
3721 // Floating Point Add Double
3722 pipe_class faddD_reg_reg(regD dst, regD src1, regD src2) %{
3723     single_instruction;
3724     dst   : X(write);
3725     src1  : E(read);
3726     src2  : E(read);
3727     FA    : R;
3728 %}
3729 
3730 // Floating Point Conditional Move based on integer flags
3731 pipe_class int_conditional_float_move (cmpOp cmp, flagsReg cr, regF dst, regF src) %{
3732     single_instruction;
3733     dst   : X(write);
3734     src   : E(read);
3735     cr    : R(read);
3736     FA    : R(2);
3737     BR    : R(2);
3738 %}
3739 
3740 // Floating Point Conditional Move based on integer flags
3741 pipe_class int_conditional_double_move (cmpOp cmp, flagsReg cr, regD dst, regD src) %{
3742     single_instruction;
3743     dst   : X(write);
3744     src   : E(read);
3745     cr    : R(read);
3746     FA    : R(2);
3747     BR    : R(2);
3748 %}
3749 
3750 // Floating Point Multiply Float
3751 pipe_class fmulF_reg_reg(regF dst, regF src1, regF src2) %{
3752     single_instruction;
3753     dst   : X(write);
3754     src1  : E(read);
3755     src2  : E(read);
3756     FM    : R;
3757 %}
3758 
3759 // Floating Point Multiply Double
3760 pipe_class fmulD_reg_reg(regD dst, regD src1, regD src2) %{
3761     single_instruction;
3762     dst   : X(write);
3763     src1  : E(read);
3764     src2  : E(read);
3765     FM    : R;
3766 %}
3767 
3768 // Floating Point Divide Float
3769 pipe_class fdivF_reg_reg(regF dst, regF src1, regF src2) %{
3770     single_instruction;
3771     dst   : X(write);
3772     src1  : E(read);
3773     src2  : E(read);
3774     FM    : R;
3775     FDIV  : C(14);
3776 %}
3777 
3778 // Floating Point Divide Double
3779 pipe_class fdivD_reg_reg(regD dst, regD src1, regD src2) %{
3780     single_instruction;
3781     dst   : X(write);
3782     src1  : E(read);
3783     src2  : E(read);
3784     FM    : R;
3785     FDIV  : C(17);
3786 %}
3787 
3788 // Floating Point Move/Negate/Abs Float
3789 pipe_class faddF_reg(regF dst, regF src) %{
3790     single_instruction;
3791     dst   : W(write);
3792     src   : E(read);
3793     FA    : R(1);
3794 %}
3795 
3796 // Floating Point Move/Negate/Abs Double
3797 pipe_class faddD_reg(regD dst, regD src) %{
3798     single_instruction;
3799     dst   : W(write);
3800     src   : E(read);
3801     FA    : R;
3802 %}
3803 
3804 // Floating Point Convert F->D
3805 pipe_class fcvtF2D(regD dst, regF src) %{
3806     single_instruction;
3807     dst   : X(write);
3808     src   : E(read);
3809     FA    : R;
3810 %}
3811 
3812 // Floating Point Convert I->D
3813 pipe_class fcvtI2D(regD dst, regF src) %{
3814     single_instruction;
3815     dst   : X(write);
3816     src   : E(read);
3817     FA    : R;
3818 %}
3819 
3820 // Floating Point Convert LHi->D
3821 pipe_class fcvtLHi2D(regD dst, regD src) %{
3822     single_instruction;
3823     dst   : X(write);
3824     src   : E(read);
3825     FA    : R;
3826 %}
3827 
3828 // Floating Point Convert L->D
3829 pipe_class fcvtL2D(regD dst, iRegL src) %{
3830     single_instruction;
3831     dst   : X(write);
3832     src   : E(read);
3833     FA    : R;
3834 %}
3835 
3836 // Floating Point Convert L->F
3837 pipe_class fcvtL2F(regF dst, iRegL src) %{
3838     single_instruction;
3839     dst   : X(write);
3840     src   : E(read);
3841     FA    : R;
3842 %}
3843 
3844 // Floating Point Convert D->F
3845 pipe_class fcvtD2F(regD dst, regF src) %{
3846     single_instruction;
3847     dst   : X(write);
3848     src   : E(read);
3849     FA    : R;
3850 %}
3851 
3852 // Floating Point Convert I->L
3853 pipe_class fcvtI2L(regD dst, regF src) %{
3854     single_instruction;
3855     dst   : X(write);
3856     src   : E(read);
3857     FA    : R;
3858 %}
3859 
3860 // Floating Point Convert D->F
3861 pipe_class fcvtD2I(iRegI dst, regD src, flagsReg cr) %{
3862     instruction_count(1); multiple_bundles;
3863     dst   : X(write)+6;
3864     src   : E(read);
3865     FA    : R;
3866 %}
3867 
3868 // Floating Point Convert D->L
3869 pipe_class fcvtD2L(regD dst, regD src, flagsReg cr) %{
3870     instruction_count(1); multiple_bundles;
3871     dst   : X(write)+6;
3872     src   : E(read);
3873     FA    : R;
3874 %}
3875 
3876 // Floating Point Convert F->I
3877 pipe_class fcvtF2I(regF dst, regF src, flagsReg cr) %{
3878     instruction_count(1); multiple_bundles;
3879     dst   : X(write)+6;
3880     src   : E(read);
3881     FA    : R;
3882 %}
3883 
3884 // Floating Point Convert F->L
3885 pipe_class fcvtF2L(regD dst, regF src, flagsReg cr) %{
3886     instruction_count(1); multiple_bundles;
3887     dst   : X(write)+6;
3888     src   : E(read);
3889     FA    : R;
3890 %}
3891 
3892 // Floating Point Convert I->F
3893 pipe_class fcvtI2F(regF dst, regF src) %{
3894     single_instruction;
3895     dst   : X(write);
3896     src   : E(read);
3897     FA    : R;
3898 %}
3899 
3900 // Floating Point Compare
3901 pipe_class faddF_fcc_reg_reg_zero(flagsRegF cr, regF src1, regF src2, immI0 zero) %{
3902     single_instruction;
3903     cr    : X(write);
3904     src1  : E(read);
3905     src2  : E(read);
3906     FA    : R;
3907 %}
3908 
3909 // Floating Point Compare
3910 pipe_class faddD_fcc_reg_reg_zero(flagsRegF cr, regD src1, regD src2, immI0 zero) %{
3911     single_instruction;
3912     cr    : X(write);
3913     src1  : E(read);
3914     src2  : E(read);
3915     FA    : R;
3916 %}
3917 
3918 // Floating Add Nop
3919 pipe_class fadd_nop() %{
3920     single_instruction;
3921     FA  : R;
3922 %}
3923 
3924 // Integer Store to Memory
3925 pipe_class istore_mem_reg(memoryI mem, iRegI src) %{
3926     single_instruction;
3927     mem   : R(read);
3928     src   : C(read);
3929     MS    : R;
3930 %}
3931 
3932 // Integer Store to Memory
3933 pipe_class istore_mem_spORreg(memoryI mem, sp_ptr_RegP src) %{
3934     single_instruction;
3935     mem   : R(read);
3936     src   : C(read);
3937     MS    : R;
3938 %}
3939 
3940 // Float Store
3941 pipe_class fstoreF_mem_reg(memoryF mem, RegF src) %{
3942     single_instruction;
3943     mem : R(read);
3944     src : C(read);
3945     MS  : R;
3946 %}
3947 
3948 // Float Store
3949 pipe_class fstoreF_mem_zero(memoryF mem, immF0 src) %{
3950     single_instruction;
3951     mem : R(read);
3952     MS  : R;
3953 %}
3954 
3955 // Double Store
3956 pipe_class fstoreD_mem_reg(memoryD mem, RegD src) %{
3957     instruction_count(1);
3958     mem : R(read);
3959     src : C(read);
3960     MS  : R;
3961 %}
3962 
3963 // Double Store
3964 pipe_class fstoreD_mem_zero(memoryD mem, immD0 src) %{
3965     single_instruction;
3966     mem : R(read);
3967     MS  : R;
3968 %}
3969 
3970 // Integer Load (when sign bit propagation not needed)
3971 pipe_class iload_mem(iRegI dst, memoryI mem) %{
3972     single_instruction;
3973     mem : R(read);
3974     dst : C(write);
3975     MS  : R;
3976 %}
3977 
3978 // Integer Load (when sign bit propagation or masking is needed)
3979 pipe_class iload_mask_mem(iRegI dst, memoryI mem) %{
3980     single_instruction;
3981     mem : R(read);
3982     dst : M(write);
3983     MS  : R;
3984 %}
3985 
3986 // Float Load
3987 pipe_class floadF_mem(regF dst, memoryF mem) %{
3988     single_instruction;
3989     mem : R(read);
3990     dst : M(write);
3991     MS  : R;
3992 %}
3993 
3994 // Float Load
3995 pipe_class floadD_mem(regD dst, memoryD mem) %{
3996     instruction_count(1); multiple_bundles; // Again, unaligned argument is only multiple case
3997     mem : R(read);
3998     dst : M(write);
3999     MS  : R;
4000 %}
4001 
4002 // Memory Nop
4003 pipe_class mem_nop() %{
4004     single_instruction;
4005     MS  : R;
4006 %}
4007 
4008 pipe_class sethi(iRegP dst, immI src) %{
4009     single_instruction;
4010     dst  : E(write);
4011     IALU : R;
4012 %}
4013 
4014 pipe_class loadPollP(iRegP poll) %{
4015     single_instruction;
4016     poll : R(read);
4017     MS   : R;
4018 %}
4019 
4020 pipe_class br(Universe br, label labl) %{
4021     single_instruction_with_delay_slot;
4022     BR  : R;
4023 %}
4024 
4025 pipe_class br_cc(Universe br, cmpOp cmp, flagsReg cr, label labl) %{
4026     single_instruction_with_delay_slot;
4027     cr    : E(read);
4028     BR    : R;
4029 %}
4030 
4031 pipe_class br_reg(Universe br, cmpOp cmp, iRegI op1, label labl) %{
4032     single_instruction_with_delay_slot;
4033     op1 : E(read);
4034     BR  : R;
4035     MS  : R;
4036 %}
4037 
4038 pipe_class br_nop() %{
4039     single_instruction;
4040     BR  : R;
4041 %}
4042 
4043 pipe_class simple_call(method meth) %{
4044     instruction_count(2); multiple_bundles; force_serialization;
4045     fixed_latency(100);
4046     BR  : R(1);
4047     MS  : R(1);
4048     A0  : R(1);
4049 %}
4050 
4051 pipe_class compiled_call(method meth) %{
4052     instruction_count(1); multiple_bundles; force_serialization;
4053     fixed_latency(100);
4054     MS  : R(1);
4055 %}
4056 
4057 pipe_class call(method meth) %{
4058     instruction_count(0); multiple_bundles; force_serialization;
4059     fixed_latency(100);
4060 %}
4061 
4062 pipe_class tail_call(Universe ignore, label labl) %{
4063     single_instruction; has_delay_slot;
4064     fixed_latency(100);
4065     BR  : R(1);
4066     MS  : R(1);
4067 %}
4068 
4069 pipe_class ret(Universe ignore) %{
4070     single_instruction; has_delay_slot;
4071     BR  : R(1);
4072     MS  : R(1);
4073 %}
4074 
4075 // The real do-nothing guy
4076 pipe_class empty( ) %{
4077     instruction_count(0);
4078 %}
4079 
4080 pipe_class long_memory_op() %{
4081     instruction_count(0); multiple_bundles; force_serialization;
4082     fixed_latency(25);
4083     MS  : R(1);
4084 %}
4085 
4086 // Check-cast
4087 pipe_class partial_subtype_check_pipe(Universe ignore, iRegP array, iRegP match ) %{
4088     array : R(read);
4089     match  : R(read);
4090     IALU   : R(2);
4091     BR     : R(2);
4092     MS     : R;
4093 %}
4094 
4095 // Convert FPU flags into +1,0,-1
4096 pipe_class floating_cmp( iRegI dst, regF src1, regF src2 ) %{
4097     src1  : E(read);
4098     src2  : E(read);
4099     dst   : E(write);
4100     FA    : R;
4101     MS    : R(2);
4102     BR    : R(2);
4103 %}
4104 
4105 // Compare for p < q, and conditionally add y
4106 pipe_class cadd_cmpltmask( iRegI p, iRegI q, iRegI y ) %{
4107     p     : E(read);
4108     q     : E(read);
4109     y     : E(read);
4110     IALU  : R(3)
4111 %}
4112 
4113 // Perform a compare, then move conditionally in a branch delay slot.
4114 pipe_class min_max( iRegI src2, iRegI srcdst ) %{
4115     src2   : E(read);
4116     srcdst : E(read);
4117     IALU   : R;
4118     BR     : R;
4119 %}
4120 
4121 // Define the class for the Nop node
4122 define %{
4123    MachNop = ialu_nop;
4124 %}
4125 
4126 %}
4127 
4128 //----------INSTRUCTIONS-------------------------------------------------------
4129 
4130 //------------Special Nop instructions for bundling - no match rules-----------
4131 // Nop using the A0 functional unit
4132 instruct Nop_A0() %{
4133   ins_pipe(ialu_nop_A0);
4134 %}
4135 
4136 // Nop using the A1 functional unit
4137 instruct Nop_A1( ) %{
4138   ins_pipe(ialu_nop_A1);
4139 %}
4140 
4141 // Nop using the memory functional unit
4142 instruct Nop_MS( ) %{
4143   ins_pipe(mem_nop);
4144 %}
4145 
4146 // Nop using the floating add functional unit
4147 instruct Nop_FA( ) %{
4148   ins_pipe(fadd_nop);
4149 %}
4150 
4151 // Nop using the branch functional unit
4152 instruct Nop_BR( ) %{
4153   ins_pipe(br_nop);
4154 %}
4155 
4156 //----------Load/Store/Move Instructions---------------------------------------
4157 //----------Load Instructions--------------------------------------------------
4158 // Load Byte (8bit signed)
4159 instruct loadB(iRegI dst, memoryB mem) %{
4160   match(Set dst (LoadB mem));
4161   ins_cost(MEMORY_REF_COST);
4162 
4163   size(4);
4164   format %{ "LDRSB   $dst,$mem\t! byte -> int" %}
4165   ins_encode %{
4166     // High 32 bits are harmlessly set on Aarch64
4167     __ ldrsb($dst$$Register, $mem$$Address);
4168   %}
4169   ins_pipe(iload_mask_mem);
4170 %}
4171 
4172 // Load Byte (8bit signed) into a Long Register
4173 instruct loadB2L(iRegL dst, memoryB mem) %{
4174   match(Set dst (ConvI2L (LoadB mem)));
4175   ins_cost(MEMORY_REF_COST);
4176 
4177 #ifdef AARCH64
4178   size(4);
4179   format %{ "LDRSB $dst,$mem\t! byte -> long"  %}
4180   ins_encode %{
4181     __ ldrsb($dst$$Register, $mem$$Address);
4182   %}
4183 #else
4184   size(8);
4185   format %{ "LDRSB $dst.lo,$mem\t! byte -> long\n\t"
4186             "ASR   $dst.hi,$dst.lo,31" %}
4187   ins_encode %{
4188     __ ldrsb($dst$$Register, $mem$$Address);
4189     __ mov($dst$$Register->successor(), AsmOperand($dst$$Register, asr, 31));
4190   %}
4191 #endif
4192   ins_pipe(iload_mask_mem);
4193 %}
4194 
4195 // Load Unsigned Byte (8bit UNsigned) into an int reg
4196 instruct loadUB(iRegI dst, memoryB mem) %{
4197   match(Set dst (LoadUB mem));
4198   ins_cost(MEMORY_REF_COST);
4199 
4200   size(4);
4201   format %{ "LDRB   $dst,$mem\t! ubyte -> int" %}
4202   ins_encode %{
4203     __ ldrb($dst$$Register, $mem$$Address);
4204   %}
4205   ins_pipe(iload_mem);
4206 %}
4207 
4208 // Load Unsigned Byte (8bit UNsigned) into a Long Register
4209 instruct loadUB2L(iRegL dst, memoryB mem) %{
4210   match(Set dst (ConvI2L (LoadUB mem)));
4211   ins_cost(MEMORY_REF_COST);
4212 
4213 #ifdef AARCH64
4214   size(4);
4215   format %{ "LDRB  $dst,$mem\t! ubyte -> long"  %}
4216   ins_encode %{
4217     __ ldrb($dst$$Register, $mem$$Address);
4218   %}
4219 #else
4220   size(8);
4221   format %{ "LDRB  $dst.lo,$mem\t! ubyte -> long\n\t"
4222             "MOV   $dst.hi,0" %}
4223   ins_encode %{
4224     __ ldrb($dst$$Register, $mem$$Address);
4225     __ mov($dst$$Register->successor(), 0);
4226   %}
4227 #endif
4228   ins_pipe(iload_mem);
4229 %}
4230 
4231 // Load Unsigned Byte (8 bit UNsigned) with immediate mask into Long Register
4232 instruct loadUB2L_limmI(iRegL dst, memoryB mem, limmIlow8 mask) %{
4233   match(Set dst (ConvI2L (AndI (LoadUB mem) mask)));
4234 
4235 #ifdef AARCH64
4236   ins_cost(MEMORY_REF_COST + DEFAULT_COST);
4237   size(8);
4238   format %{ "LDRB  $dst,$mem\t! ubyte -> long\n\t"
4239             "AND  $dst,$dst,$mask" %}
4240   ins_encode %{
4241     __ ldrb($dst$$Register, $mem$$Address);
4242     __ andr($dst$$Register, $dst$$Register, limmI_low($mask$$constant, 8));
4243   %}
4244 #else
4245   ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST);
4246   size(12);
4247   format %{ "LDRB  $dst.lo,$mem\t! ubyte -> long\n\t"
4248             "MOV   $dst.hi,0\n\t"
4249             "AND  $dst.lo,$dst.lo,$mask" %}
4250   ins_encode %{
4251     __ ldrb($dst$$Register, $mem$$Address);
4252     __ mov($dst$$Register->successor(), 0);
4253     __ andr($dst$$Register, $dst$$Register, limmI_low($mask$$constant, 8));
4254   %}
4255 #endif
4256   ins_pipe(iload_mem);
4257 %}
4258 
4259 // Load Short (16bit signed)
4260 #ifdef AARCH64
4261 // XXX This variant shouldn't be necessary if 6217251 is implemented
4262 instruct loadSoff(iRegI dst, memoryScaledS mem, aimmX off, iRegP tmp) %{
4263   match(Set dst (LoadS (AddP mem off)));
4264   ins_cost(MEMORY_REF_COST + DEFAULT_COST); // assume shift/sign-extend is free
4265   effect(TEMP tmp);
4266   size(4 * 2);
4267 
4268   format %{ "LDRSH   $dst,$mem+$off\t! short temp=$tmp" %}
4269   ins_encode %{
4270     Register base = reg_to_register_object($mem$$base);
4271     __ add($tmp$$Register, base, $off$$constant);
4272     Address nmem = Address::make_raw($tmp$$reg, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
4273     __ ldrsh($dst$$Register, nmem);
4274   %}
4275   ins_pipe(iload_mask_mem);
4276 %}
4277 #endif
4278 
4279 instruct loadS(iRegI dst, memoryS mem) %{
4280   match(Set dst (LoadS mem));
4281   ins_cost(MEMORY_REF_COST);
4282 
4283   size(4);
4284   format %{ "LDRSH   $dst,$mem\t! short" %}
4285   ins_encode %{
4286     __ ldrsh($dst$$Register, $mem$$Address);
4287   %}
4288   ins_pipe(iload_mask_mem);
4289 %}
4290 
4291 // Load Short (16 bit signed) to Byte (8 bit signed)
4292 instruct loadS2B(iRegI dst, memoryS mem, immI_24 twentyfour) %{
4293   match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
4294   ins_cost(MEMORY_REF_COST);
4295 
4296   size(4);
4297 
4298   format %{ "LDRSB   $dst,$mem\t! short -> byte" %}
4299   ins_encode %{
4300     // High 32 bits are harmlessly set on Aarch64
4301     __ ldrsb($dst$$Register, $mem$$Address);
4302   %}
4303   ins_pipe(iload_mask_mem);
4304 %}
4305 
4306 // Load Short (16bit signed) into a Long Register
4307 instruct loadS2L(iRegL dst, memoryS mem) %{
4308   match(Set dst (ConvI2L (LoadS mem)));
4309   ins_cost(MEMORY_REF_COST);
4310 
4311 #ifdef AARCH64
4312   size(4);
4313   format %{ "LDRSH $dst,$mem\t! short -> long"  %}
4314   ins_encode %{
4315     __ ldrsh($dst$$Register, $mem$$Address);
4316   %}
4317 #else
4318   size(8);
4319   format %{ "LDRSH $dst.lo,$mem\t! short -> long\n\t"
4320             "ASR   $dst.hi,$dst.lo,31" %}
4321   ins_encode %{
4322     __ ldrsh($dst$$Register, $mem$$Address);
4323     __ mov($dst$$Register->successor(), AsmOperand($dst$$Register, asr, 31));
4324   %}
4325 #endif
4326   ins_pipe(iload_mask_mem);
4327 %}
4328 
4329 // Load Unsigned Short/Char (16bit UNsigned)
4330 
4331 #ifdef AARCH64
4332 // XXX This variant shouldn't be necessary if 6217251 is implemented
4333 instruct loadUSoff(iRegI dst, memoryScaledS mem, aimmX off, iRegP tmp) %{
4334   match(Set dst (LoadUS (AddP mem off)));
4335   ins_cost(MEMORY_REF_COST + DEFAULT_COST); // assume shift/sign-extend is free
4336   effect(TEMP tmp);
4337   size(4 * 2);
4338 
4339   format %{ "LDRH   $dst,$mem+$off\t! ushort/char temp=$tmp" %}
4340   ins_encode %{
4341     Register base = reg_to_register_object($mem$$base);
4342     __ add($tmp$$Register, base, $off$$constant);
4343     Address nmem = Address::make_raw($tmp$$reg, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
4344     __ ldrh($dst$$Register, nmem);
4345   %}
4346   ins_pipe(iload_mem);
4347 %}
4348 #endif
4349 
4350 instruct loadUS(iRegI dst, memoryS mem) %{
4351   match(Set dst (LoadUS mem));
4352   ins_cost(MEMORY_REF_COST);
4353 
4354   size(4);
4355   format %{ "LDRH   $dst,$mem\t! ushort/char" %}
4356   ins_encode %{
4357     __ ldrh($dst$$Register, $mem$$Address);
4358   %}
4359   ins_pipe(iload_mem);
4360 %}
4361 
4362 // Load Unsigned Short/Char (16 bit UNsigned) to Byte (8 bit signed)
4363 instruct loadUS2B(iRegI dst, memoryB mem, immI_24 twentyfour) %{
4364   match(Set dst (RShiftI (LShiftI (LoadUS mem) twentyfour) twentyfour));
4365   ins_cost(MEMORY_REF_COST);
4366 
4367   size(4);
4368   format %{ "LDRSB   $dst,$mem\t! ushort -> byte" %}
4369   ins_encode %{
4370     __ ldrsb($dst$$Register, $mem$$Address);
4371   %}
4372   ins_pipe(iload_mask_mem);
4373 %}
4374 
4375 // Load Unsigned Short/Char (16bit UNsigned) into a Long Register
4376 instruct loadUS2L(iRegL dst, memoryS mem) %{
4377   match(Set dst (ConvI2L (LoadUS mem)));
4378   ins_cost(MEMORY_REF_COST);
4379 
4380 #ifdef AARCH64
4381   size(4);
4382   format %{ "LDRH  $dst,$mem\t! short -> long"  %}
4383   ins_encode %{
4384     __ ldrh($dst$$Register, $mem$$Address);
4385   %}
4386 #else
4387   size(8);
4388   format %{ "LDRH  $dst.lo,$mem\t! short -> long\n\t"
4389             "MOV   $dst.hi, 0" %}
4390   ins_encode %{
4391     __ ldrh($dst$$Register, $mem$$Address);
4392     __ mov($dst$$Register->successor(), 0);
4393   %}
4394 #endif
4395   ins_pipe(iload_mem);
4396 %}
4397 
4398 // Load Unsigned Short/Char (16bit UNsigned) with mask 0xFF into a Long Register
4399 instruct loadUS2L_immI_255(iRegL dst, memoryB mem, immI_255 mask) %{
4400   match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
4401   ins_cost(MEMORY_REF_COST);
4402 
4403 #ifdef AARCH64
4404   size(4);
4405   format %{ "LDRB  $dst,$mem"  %}
4406   ins_encode %{
4407     __ ldrb($dst$$Register, $mem$$Address);
4408   %}
4409 #else
4410   size(8);
4411   format %{ "LDRB  $dst.lo,$mem\t! \n\t"
4412             "MOV   $dst.hi, 0" %}
4413   ins_encode %{
4414     __ ldrb($dst$$Register, $mem$$Address);
4415     __ mov($dst$$Register->successor(), 0);
4416   %}
4417 #endif
4418   ins_pipe(iload_mem);
4419 %}
4420 
4421 // Load Unsigned Short/Char (16bit UNsigned) with a immediate mask into a Long Register
4422 instruct loadUS2L_limmI(iRegL dst, memoryS mem, limmI mask) %{
4423   match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
4424 #ifdef AARCH64
4425   ins_cost(MEMORY_REF_COST + 1*DEFAULT_COST);
4426 
4427   size(8);
4428   format %{ "LDRH   $dst,$mem\t! ushort/char & mask -> long\n\t"
4429             "AND    $dst,$dst,$mask" %}
4430   ins_encode %{
4431     __ ldrh($dst$$Register, $mem$$Address);
4432     __ andr($dst$$Register, $dst$$Register, (uintx)$mask$$constant);
4433   %}
4434 #else
4435   ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST);
4436 
4437   size(12);
4438   format %{ "LDRH   $dst,$mem\t! ushort/char & mask -> long\n\t"
4439             "MOV    $dst.hi, 0\n\t"
4440             "AND    $dst,$dst,$mask" %}
4441   ins_encode %{
4442     __ ldrh($dst$$Register, $mem$$Address);
4443     __ mov($dst$$Register->successor(), 0);
4444     __ andr($dst$$Register, $dst$$Register, $mask$$constant);
4445   %}
4446 #endif
4447   ins_pipe(iload_mem);
4448 %}
4449 
4450 // Load Integer
4451 
4452 #ifdef AARCH64
4453 // XXX This variant shouldn't be necessary if 6217251 is implemented
4454 instruct loadIoff(iRegI dst, memoryScaledI mem, aimmX off, iRegP tmp) %{
4455   match(Set dst (LoadI (AddP mem off)));
4456   ins_cost(MEMORY_REF_COST + DEFAULT_COST); // assume shift/sign-extend is free
4457   effect(TEMP tmp);
4458   size(4 * 2);
4459 
4460   format %{ "ldr_s32 $dst,$mem+$off\t! int temp=$tmp" %}
4461   ins_encode %{
4462     Register base = reg_to_register_object($mem$$base);
4463     __ add($tmp$$Register, base, $off$$constant);
4464     Address nmem = Address::make_raw($tmp$$reg, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
4465     __ ldr_s32($dst$$Register, nmem);
4466   %}
4467   ins_pipe(iload_mem);
4468 %}
4469 #endif
4470 
4471 instruct loadI(iRegI dst, memoryI mem) %{
4472   match(Set dst (LoadI mem));
4473   ins_cost(MEMORY_REF_COST);
4474 
4475   size(4);
4476   format %{ "ldr_s32 $dst,$mem\t! int" %}
4477   ins_encode %{
4478     __ ldr_s32($dst$$Register, $mem$$Address);
4479   %}
4480   ins_pipe(iload_mem);
4481 %}
4482 
4483 // Load Integer to Byte (8 bit signed)
4484 instruct loadI2B(iRegI dst, memoryS mem, immI_24 twentyfour) %{
4485   match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
4486   ins_cost(MEMORY_REF_COST);
4487 
4488   size(4);
4489 
4490   format %{ "LDRSB   $dst,$mem\t! int -> byte" %}
4491   ins_encode %{
4492     __ ldrsb($dst$$Register, $mem$$Address);
4493   %}
4494   ins_pipe(iload_mask_mem);
4495 %}
4496 
4497 // Load Integer to Unsigned Byte (8 bit UNsigned)
4498 instruct loadI2UB(iRegI dst, memoryB mem, immI_255 mask) %{
4499   match(Set dst (AndI (LoadI mem) mask));
4500   ins_cost(MEMORY_REF_COST);
4501 
4502   size(4);
4503 
4504   format %{ "LDRB   $dst,$mem\t! int -> ubyte" %}
4505   ins_encode %{
4506     __ ldrb($dst$$Register, $mem$$Address);
4507   %}
4508   ins_pipe(iload_mask_mem);
4509 %}
4510 
4511 // Load Integer to Short (16 bit signed)
4512 instruct loadI2S(iRegI dst, memoryS mem, immI_16 sixteen) %{
4513   match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
4514   ins_cost(MEMORY_REF_COST);
4515 
4516   size(4);
4517   format %{ "LDRSH   $dst,$mem\t! int -> short" %}
4518   ins_encode %{
4519     __ ldrsh($dst$$Register, $mem$$Address);
4520   %}
4521   ins_pipe(iload_mask_mem);
4522 %}
4523 
4524 // Load Integer to Unsigned Short (16 bit UNsigned)
4525 instruct loadI2US(iRegI dst, memoryS mem, immI_65535 mask) %{
4526   match(Set dst (AndI (LoadI mem) mask));
4527   ins_cost(MEMORY_REF_COST);
4528 
4529   size(4);
4530   format %{ "LDRH   $dst,$mem\t! int -> ushort/char" %}
4531   ins_encode %{
4532     __ ldrh($dst$$Register, $mem$$Address);
4533   %}
4534   ins_pipe(iload_mask_mem);
4535 %}
4536 
4537 // Load Integer into a Long Register
4538 instruct loadI2L(iRegL dst, memoryI mem) %{
4539   match(Set dst (ConvI2L (LoadI mem)));
4540 #ifdef AARCH64
4541   ins_cost(MEMORY_REF_COST);
4542 
4543   size(4);
4544   format %{ "LDRSW $dst.lo,$mem\t! int -> long"  %}
4545   ins_encode %{
4546     __ ldr_s32($dst$$Register, $mem$$Address);
4547   %}
4548 #else
4549   ins_cost(MEMORY_REF_COST);
4550 
4551   size(8);
4552   format %{ "LDR   $dst.lo,$mem\t! int -> long\n\t"
4553             "ASR   $dst.hi,$dst.lo,31\t! int->long" %}
4554   ins_encode %{
4555     __ ldr($dst$$Register, $mem$$Address);
4556     __ mov($dst$$Register->successor(), AsmOperand($dst$$Register, asr, 31));
4557   %}
4558 #endif
4559   ins_pipe(iload_mask_mem);
4560 %}
4561 
4562 // Load Integer with mask 0xFF into a Long Register
4563 instruct loadI2L_immI_255(iRegL dst, memoryB mem, immI_255 mask) %{
4564   match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
4565 #ifdef AARCH64
4566   ins_cost(MEMORY_REF_COST);
4567 
4568   size(4);
4569   format %{ "LDRB   $dst.lo,$mem\t! int & 0xFF -> long"  %}
4570   ins_encode %{
4571     __ ldrb($dst$$Register, $mem$$Address);
4572   %}
4573 #else
4574   ins_cost(MEMORY_REF_COST);
4575 
4576   size(8);
4577   format %{ "LDRB   $dst.lo,$mem\t! int & 0xFF -> long\n\t"
4578             "MOV    $dst.hi, 0" %}
4579   ins_encode %{
4580     __ ldrb($dst$$Register, $mem$$Address);
4581     __ mov($dst$$Register->successor(), 0);
4582   %}
4583 #endif
4584   ins_pipe(iload_mem);
4585 %}
4586 
4587 // Load Integer with mask 0xFFFF into a Long Register
4588 instruct loadI2L_immI_65535(iRegL dst, memoryS mem, immI_65535 mask) %{
4589   match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
4590   ins_cost(MEMORY_REF_COST);
4591 
4592 #ifdef AARCH64
4593   size(4);
4594   format %{ "LDRH   $dst,$mem\t! int & 0xFFFF -> long" %}
4595   ins_encode %{
4596     __ ldrh($dst$$Register, $mem$$Address);
4597   %}
4598 #else
4599   size(8);
4600   format %{ "LDRH   $dst,$mem\t! int & 0xFFFF -> long\n\t"
4601             "MOV    $dst.hi, 0" %}
4602   ins_encode %{
4603     __ ldrh($dst$$Register, $mem$$Address);
4604     __ mov($dst$$Register->successor(), 0);
4605   %}
4606 #endif
4607   ins_pipe(iload_mask_mem);
4608 %}
4609 
4610 #ifdef AARCH64
4611 // Load Integer with an immediate mask into a Long Register
4612 instruct loadI2L_limmI(iRegL dst, memoryI mem, limmI mask) %{
4613   match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
4614   ins_cost(MEMORY_REF_COST + 1*DEFAULT_COST);
4615 
4616   size(8);
4617   format %{ "LDRSW $dst,$mem\t! int -> long\n\t"
4618             "AND   $dst,$dst,$mask" %}
4619 
4620   ins_encode %{
4621     __ ldr_s32($dst$$Register, $mem$$Address);
4622     __ andr($dst$$Register, $dst$$Register, (uintx)$mask$$constant);
4623   %}
4624   ins_pipe(iload_mem);
4625 %}
4626 #else
4627 // Load Integer with a 31-bit immediate mask into a Long Register
4628 instruct loadI2L_limmU31(iRegL dst, memoryI mem, limmU31 mask) %{
4629   match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
4630   ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST);
4631 
4632   size(12);
4633   format %{ "LDR   $dst.lo,$mem\t! int -> long\n\t"
4634             "MOV    $dst.hi, 0\n\t"
4635             "AND   $dst,$dst,$mask" %}
4636 
4637   ins_encode %{
4638     __ ldr($dst$$Register, $mem$$Address);
4639     __ mov($dst$$Register->successor(), 0);
4640     __ andr($dst$$Register, $dst$$Register, $mask$$constant);
4641   %}
4642   ins_pipe(iload_mem);
4643 %}
4644 #endif
4645 
4646 #ifdef AARCH64
4647 // Load Integer with mask into a Long Register
4648 // FIXME: use signedRegI mask, remove tmp?
4649 instruct loadI2L_immI(iRegL dst, memoryI mem, immI mask, iRegI tmp) %{
4650   match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
4651   effect(TEMP dst, TEMP tmp);
4652 
4653   ins_cost(MEMORY_REF_COST + 3*DEFAULT_COST);
4654   format %{ "LDRSW    $mem,$dst\t! int & 31-bit mask -> long\n\t"
4655             "MOV_SLOW $tmp,$mask\n\t"
4656             "AND      $dst,$tmp,$dst" %}
4657   ins_encode %{
4658     __ ldrsw($dst$$Register, $mem$$Address);
4659     __ mov_slow($tmp$$Register, $mask$$constant);
4660     __ andr($dst$$Register, $dst$$Register, $tmp$$Register);
4661   %}
4662   ins_pipe(iload_mem);
4663 %}
4664 #else
4665 // Load Integer with a 31-bit mask into a Long Register
4666 // FIXME: use iRegI mask, remove tmp?
4667 instruct loadI2L_immU31(iRegL dst, memoryI mem, immU31 mask, iRegI tmp) %{
4668   match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
4669   effect(TEMP dst, TEMP tmp);
4670 
4671   ins_cost(MEMORY_REF_COST + 4*DEFAULT_COST);
4672   size(20);
4673   format %{ "LDR      $mem,$dst\t! int & 31-bit mask -> long\n\t"
4674             "MOV      $dst.hi, 0\n\t"
4675             "MOV_SLOW $tmp,$mask\n\t"
4676             "AND      $dst,$tmp,$dst" %}
4677   ins_encode %{
4678     __ ldr($dst$$Register, $mem$$Address);
4679     __ mov($dst$$Register->successor(), 0);
4680     __ mov_slow($tmp$$Register, $mask$$constant);
4681     __ andr($dst$$Register, $dst$$Register, $tmp$$Register);
4682   %}
4683   ins_pipe(iload_mem);
4684 %}
4685 #endif
4686 
4687 // Load Unsigned Integer into a Long Register
4688 instruct loadUI2L(iRegL dst, memoryI mem, immL_32bits mask) %{
4689   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
4690   ins_cost(MEMORY_REF_COST);
4691 
4692 #ifdef AARCH64
4693 //size(4);
4694   format %{ "LDR_w $dst,$mem\t! uint -> long" %}
4695   ins_encode %{
4696     __ ldr_w($dst$$Register, $mem$$Address);
4697   %}
4698 #else
4699   size(8);
4700   format %{ "LDR   $dst.lo,$mem\t! uint -> long\n\t"
4701             "MOV   $dst.hi,0" %}
4702   ins_encode %{
4703     __ ldr($dst$$Register, $mem$$Address);
4704     __ mov($dst$$Register->successor(), 0);
4705   %}
4706 #endif
4707   ins_pipe(iload_mem);
4708 %}
4709 
4710 // Load Long
4711 
4712 #ifdef AARCH64
4713 // XXX This variant shouldn't be necessary if 6217251 is implemented
4714 instruct loadLoff(iRegLd dst, memoryScaledL mem, aimmX off, iRegP tmp) %{
4715   match(Set dst (LoadL (AddP mem off)));
4716   ins_cost(MEMORY_REF_COST + DEFAULT_COST); // assume shift/sign-extend is free
4717   effect(TEMP tmp);
4718   size(4 * 2);
4719 
4720   format %{ "LDR    $dst,$mem+$off\t! long temp=$tmp" %}
4721   ins_encode %{
4722     Register base = reg_to_register_object($mem$$base);
4723     __ add($tmp$$Register, base, $off$$constant);
4724     Address nmem = Address::make_raw($tmp$$reg, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
4725     __ ldr($dst$$Register, nmem);
4726   %}
4727   ins_pipe(iload_mem);
4728 %}
4729 #endif
4730 
4731 instruct loadL(iRegLd dst, memoryL mem ) %{
4732 #ifdef AARCH64
4733   // already atomic for Aarch64
4734 #else
4735   predicate(!((LoadLNode*)n)->require_atomic_access());
4736 #endif
4737   match(Set dst (LoadL mem));
4738   effect(TEMP dst);
4739   ins_cost(MEMORY_REF_COST);
4740 
4741   size(4);
4742   format %{ "ldr_64  $dst,$mem\t! long" %}
4743   ins_encode %{
4744     __ ldr_64($dst$$Register, $mem$$Address);
4745   %}
4746   ins_pipe(iload_mem);
4747 %}
4748 
4749 #ifndef AARCH64
4750 instruct loadL_2instr(iRegL dst, memorylong mem ) %{
4751   predicate(!((LoadLNode*)n)->require_atomic_access());
4752   match(Set dst (LoadL mem));
4753   ins_cost(MEMORY_REF_COST + DEFAULT_COST);
4754 
4755   size(8);
4756   format %{ "LDR    $dst.lo,$mem \t! long order of instrs reversed if $dst.lo == base($mem)\n\t"
4757             "LDR    $dst.hi,$mem+4 or $mem" %}
4758   ins_encode %{
4759     Address Amemlo = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
4760     Address Amemhi = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp + 4, relocInfo::none);
4761 
4762     if ($dst$$Register == reg_to_register_object($mem$$base)) {
4763       __ ldr($dst$$Register->successor(), Amemhi);
4764       __ ldr($dst$$Register, Amemlo);
4765     } else {
4766       __ ldr($dst$$Register, Amemlo);
4767       __ ldr($dst$$Register->successor(), Amemhi);
4768     }
4769   %}
4770   ins_pipe(iload_mem);
4771 %}
4772 
4773 instruct loadL_volatile(iRegL dst, indirect mem ) %{
4774   predicate(((LoadLNode*)n)->require_atomic_access());
4775   match(Set dst (LoadL mem));
4776   ins_cost(MEMORY_REF_COST);
4777 
4778   size(4);
4779   format %{ "LDMIA    $dst,$mem\t! long" %}
4780   ins_encode %{
4781     // FIXME: why is ldmia considered atomic?  Should be ldrexd
4782     RegisterSet set($dst$$Register);
4783     set = set | reg_to_register_object($dst$$reg + 1);
4784     __ ldmia(reg_to_register_object($mem$$base), set);
4785   %}
4786   ins_pipe(iload_mem);
4787 %}
4788 
4789 instruct loadL_volatile_fp(iRegL dst, memoryD mem ) %{
4790   predicate(((LoadLNode*)n)->require_atomic_access());
4791   match(Set dst (LoadL mem));
4792   ins_cost(MEMORY_REF_COST);
4793 
4794   size(8);
4795   format %{ "FLDD      S14, $mem"
4796             "FMRRD    $dst, S14\t! long \n't" %}
4797   ins_encode %{
4798     __ fldd(S14, $mem$$Address);
4799     __ fmrrd($dst$$Register, $dst$$Register->successor(), S14);
4800   %}
4801   ins_pipe(iload_mem);
4802 %}
4803 
4804 instruct loadL_unaligned(iRegL dst, memorylong mem ) %{
4805   match(Set dst (LoadL_unaligned mem));
4806   ins_cost(MEMORY_REF_COST);
4807 
4808   size(8);
4809   format %{ "LDR    $dst.lo,$mem\t! long order of instrs reversed if $dst.lo == base($mem)\n\t"
4810             "LDR    $dst.hi,$mem+4" %}
4811   ins_encode %{
4812     Address Amemlo = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
4813     Address Amemhi = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp + 4, relocInfo::none);
4814 
4815     if ($dst$$Register == reg_to_register_object($mem$$base)) {
4816       __ ldr($dst$$Register->successor(), Amemhi);
4817       __ ldr($dst$$Register, Amemlo);
4818     } else {
4819       __ ldr($dst$$Register, Amemlo);
4820       __ ldr($dst$$Register->successor(), Amemhi);
4821     }
4822   %}
4823   ins_pipe(iload_mem);
4824 %}
4825 #endif // !AARCH64
4826 
4827 // Load Range
4828 instruct loadRange(iRegI dst, memoryI mem) %{
4829   match(Set dst (LoadRange mem));
4830   ins_cost(MEMORY_REF_COST);
4831 
4832   size(4);
4833   format %{ "LDR_u32 $dst,$mem\t! range" %}
4834   ins_encode %{
4835     __ ldr_u32($dst$$Register, $mem$$Address);
4836   %}
4837   ins_pipe(iload_mem);
4838 %}
4839 
4840 // Load Pointer
4841 
4842 #ifdef AARCH64
4843 // XXX This variant shouldn't be necessary if 6217251 is implemented
4844 instruct loadPoff(iRegP dst, memoryScaledP mem, aimmX off, iRegP tmp) %{
4845   match(Set dst (LoadP (AddP mem off)));
4846   ins_cost(MEMORY_REF_COST + DEFAULT_COST); // assume shift/sign-extend is free
4847   effect(TEMP tmp);
4848   size(4 * 2);
4849 
4850   format %{ "LDR    $dst,$mem+$off\t! ptr temp=$tmp" %}
4851   ins_encode %{
4852     Register base = reg_to_register_object($mem$$base);
4853     __ add($tmp$$Register, base, $off$$constant);
4854     Address nmem = Address::make_raw($tmp$$reg, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
4855     __ ldr($dst$$Register, nmem);
4856   %}
4857   ins_pipe(iload_mem);
4858 %}
4859 #endif
4860 
4861 instruct loadP(iRegP dst, memoryP mem) %{
4862   match(Set dst (LoadP mem));
4863   ins_cost(MEMORY_REF_COST);
4864   size(4);
4865 
4866   format %{ "LDR   $dst,$mem\t! ptr" %}
4867   ins_encode %{
4868     __ ldr($dst$$Register, $mem$$Address);
4869   %}
4870   ins_pipe(iload_mem);
4871 %}
4872 
4873 #ifdef XXX
4874 // FIXME XXXX
4875 //instruct loadSP(iRegP dst, memoryP mem) %{
4876 instruct loadSP(SPRegP dst, memoryP mem, iRegP tmp) %{
4877   match(Set dst (LoadP mem));
4878   effect(TEMP tmp);
4879   ins_cost(MEMORY_REF_COST+1);
4880   size(8);
4881 
4882   format %{ "LDR   $tmp,$mem\t! ptr\n\t"
4883             "MOV   $dst,$tmp\t! ptr" %}
4884   ins_encode %{
4885     __ ldr($tmp$$Register, $mem$$Address);
4886     __ mov($dst$$Register, $tmp$$Register);
4887   %}
4888   ins_pipe(iload_mem);
4889 %}
4890 #endif
4891 
4892 #ifdef _LP64
4893 // Load Compressed Pointer
4894 
4895 // XXX This variant shouldn't be necessary if 6217251 is implemented
4896 instruct loadNoff(iRegN dst, memoryScaledI mem, aimmX off, iRegP tmp) %{
4897   match(Set dst (LoadN (AddP mem off)));
4898   ins_cost(MEMORY_REF_COST + DEFAULT_COST); // assume shift/sign-extend is free
4899   effect(TEMP tmp);
4900   size(4 * 2);
4901 
4902   format %{ "ldr_u32 $dst,$mem+$off\t! compressed ptr temp=$tmp" %}
4903   ins_encode %{
4904     Register base = reg_to_register_object($mem$$base);
4905     __ add($tmp$$Register, base, $off$$constant);
4906     Address nmem = Address::make_raw($tmp$$reg, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
4907     __ ldr_u32($dst$$Register, nmem);
4908   %}
4909   ins_pipe(iload_mem);
4910 %}
4911 
4912 instruct loadN(iRegN dst, memoryI mem) %{
4913   match(Set dst (LoadN mem));
4914   ins_cost(MEMORY_REF_COST);
4915   size(4);
4916 
4917   format %{ "ldr_u32 $dst,$mem\t! compressed ptr" %}
4918   ins_encode %{
4919     __ ldr_u32($dst$$Register, $mem$$Address);
4920   %}
4921   ins_pipe(iload_mem);
4922 %}
4923 #endif
4924 
4925 // Load Klass Pointer
4926 instruct loadKlass(iRegP dst, memoryI mem) %{
4927   match(Set dst (LoadKlass mem));
4928   ins_cost(MEMORY_REF_COST);
4929   size(4);
4930 
4931   format %{ "LDR   $dst,$mem\t! klass ptr" %}
4932   ins_encode %{
4933     __ ldr($dst$$Register, $mem$$Address);
4934   %}
4935   ins_pipe(iload_mem);
4936 %}
4937 
4938 #ifdef _LP64
4939 // Load narrow Klass Pointer
4940 instruct loadNKlass(iRegN dst, memoryI mem) %{
4941   match(Set dst (LoadNKlass mem));
4942   ins_cost(MEMORY_REF_COST);
4943   size(4);
4944 
4945   format %{ "ldr_u32 $dst,$mem\t! compressed klass ptr" %}
4946   ins_encode %{
4947     __ ldr_u32($dst$$Register, $mem$$Address);
4948   %}
4949   ins_pipe(iload_mem);
4950 %}
4951 #endif
4952 
4953 #ifdef AARCH64
4954 // XXX This variant shouldn't be necessary if 6217251 is implemented
4955 instruct loadDoff(regD dst, memoryScaledD mem, aimmX off, iRegP tmp) %{
4956   match(Set dst (LoadD (AddP mem off)));
4957   ins_cost(MEMORY_REF_COST + DEFAULT_COST); // assume shift/sign-extend is free
4958   effect(TEMP tmp);
4959   size(4 * 2);
4960 
4961   format %{ "ldr    $dst,$mem+$off\t! double temp=$tmp" %}
4962   ins_encode %{
4963     Register base = reg_to_register_object($mem$$base);
4964     __ add($tmp$$Register, base, $off$$constant);
4965     Address nmem = Address::make_raw($tmp$$reg, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
4966     __ ldr_d($dst$$FloatRegister, nmem);
4967   %}
4968   ins_pipe(floadD_mem);
4969 %}
4970 #endif
4971 
4972 instruct loadD(regD dst, memoryD mem) %{
4973   match(Set dst (LoadD mem));
4974   ins_cost(MEMORY_REF_COST);
4975 
4976   size(4);
4977   // FIXME: needs to be atomic, but  ARMv7 A.R.M. guarantees
4978   // only LDREXD and STREXD are 64-bit single-copy atomic
4979   format %{ "FLDD   $dst,$mem" %}
4980   ins_encode %{
4981     __ ldr_double($dst$$FloatRegister, $mem$$Address);
4982   %}
4983   ins_pipe(floadD_mem);
4984 %}
4985 
4986 #ifndef AARCH64
4987 // Load Double - UNaligned
4988 instruct loadD_unaligned(regD_low dst, memoryF2 mem ) %{
4989   match(Set dst (LoadD_unaligned mem));
4990   ins_cost(MEMORY_REF_COST*2+DEFAULT_COST);
4991   size(8);
4992   format %{ "FLDS    $dst.lo,$mem\t! misaligned double\n"
4993           "\tFLDS    $dst.hi,$mem+4\t!" %}
4994   ins_encode %{
4995     Address Amemlo = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
4996     Address Amemhi = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp + 4, relocInfo::none);
4997       __ flds($dst$$FloatRegister, Amemlo);
4998       __ flds($dst$$FloatRegister->successor(), Amemhi);
4999   %}
5000   ins_pipe(iload_mem);
5001 %}
5002 #endif
5003 
5004 #ifdef AARCH64
5005 // XXX This variant shouldn't be necessary if 6217251 is implemented
5006 instruct loadFoff(regF dst, memoryScaledF mem, aimmX off, iRegP tmp) %{
5007   match(Set dst (LoadF (AddP mem off)));
5008   ins_cost(MEMORY_REF_COST + DEFAULT_COST); // assume shift/sign-extend is free
5009   effect(TEMP tmp);
5010   size(4 * 2);
5011 
5012   format %{ "ldr    $dst,$mem+$off\t! float temp=$tmp" %}
5013   ins_encode %{
5014     Register base = reg_to_register_object($mem$$base);
5015     __ add($tmp$$Register, base, $off$$constant);
5016     Address nmem = Address::make_raw($tmp$$reg, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
5017     __ ldr_s($dst$$FloatRegister, nmem);
5018   %}
5019   ins_pipe(floadF_mem);
5020 %}
5021 #endif
5022 
5023 instruct loadF(regF dst, memoryF mem) %{
5024   match(Set dst (LoadF mem));
5025 
5026   ins_cost(MEMORY_REF_COST);
5027   size(4);
5028   format %{ "FLDS    $dst,$mem" %}
5029   ins_encode %{
5030     __ ldr_float($dst$$FloatRegister, $mem$$Address);
5031   %}
5032   ins_pipe(floadF_mem);
5033 %}
5034 
5035 #ifdef AARCH64
5036 instruct load_limmI(iRegI dst, limmI src) %{
5037   match(Set dst src);
5038   ins_cost(DEFAULT_COST + 1); // + 1 because MOV is preferred
5039   format %{ "ORR_w  $dst, ZR, $src\t! int"  %}
5040   ins_encode %{
5041     __ orr_w($dst$$Register, ZR, (uintx)$src$$constant);
5042   %}
5043   ins_pipe(ialu_imm);
5044 %}
5045 #endif
5046 
5047 // // Load Constant
5048 instruct loadConI( iRegI dst, immI src ) %{
5049   match(Set dst src);
5050   ins_cost(DEFAULT_COST * 3/2);
5051   format %{ "MOV_SLOW    $dst, $src" %}
5052   ins_encode %{
5053     __ mov_slow($dst$$Register, $src$$constant);
5054   %}
5055   ins_pipe(ialu_hi_lo_reg);
5056 %}
5057 
5058 instruct loadConIMov( iRegI dst, immIMov src ) %{
5059   match(Set dst src);
5060   size(4);
5061   format %{ "MOV    $dst, $src" %}
5062   ins_encode %{
5063     __ mov($dst$$Register, $src$$constant);
5064   %}
5065   ins_pipe(ialu_imm);
5066 %}
5067 
5068 #ifndef AARCH64
5069 instruct loadConIMovn( iRegI dst, immIRotn src ) %{
5070   match(Set dst src);
5071   size(4);
5072   format %{ "MVN    $dst, ~$src" %}
5073   ins_encode %{
5074     __ mvn($dst$$Register, ~$src$$constant);
5075   %}
5076   ins_pipe(ialu_imm_n);
5077 %}
5078 #endif
5079 
5080 instruct loadConI16( iRegI dst, immI16 src ) %{
5081   match(Set dst src);
5082   size(4);
5083 #ifdef AARCH64
5084   format %{ "MOVZ_w  $dst, $src" %}
5085 #else
5086   format %{ "MOVW    $dst, $src" %}
5087 #endif
5088   ins_encode %{
5089 #ifdef AARCH64
5090     __ mov_w($dst$$Register, $src$$constant);
5091 #else
5092     __ movw($dst$$Register, $src$$constant);
5093 #endif
5094   %}
5095   ins_pipe(ialu_imm_n);
5096 %}
5097 
5098 instruct loadConP(iRegP dst, immP src) %{
5099   match(Set dst src);
5100   ins_cost(DEFAULT_COST * 3/2);
5101   format %{ "MOV_SLOW    $dst,$src\t!ptr" %}
5102   ins_encode %{
5103     relocInfo::relocType constant_reloc = _opnds[1]->constant_reloc();
5104     intptr_t val = $src$$constant;
5105     if (constant_reloc == relocInfo::oop_type) {
5106       __ mov_oop($dst$$Register, (jobject)val);
5107     } else if (constant_reloc == relocInfo::metadata_type) {
5108       __ mov_metadata($dst$$Register, (Metadata*)val);
5109     } else {
5110       __ mov_slow($dst$$Register, val);
5111     }
5112   %}
5113   ins_pipe(loadConP);
5114 %}
5115 
5116 
5117 instruct loadConP_poll(iRegP dst, immP_poll src) %{
5118   match(Set dst src);
5119   ins_cost(DEFAULT_COST);
5120   format %{ "MOV_SLOW    $dst,$src\t!ptr" %}
5121   ins_encode %{
5122       __ mov_slow($dst$$Register, $src$$constant);
5123   %}
5124   ins_pipe(loadConP_poll);
5125 %}
5126 
5127 #ifdef AARCH64
5128 instruct loadConP0(iRegP dst, immP0 src) %{
5129   match(Set dst src);
5130   ins_cost(DEFAULT_COST);
5131   format %{ "MOV    $dst,ZR\t!ptr" %}
5132   ins_encode %{
5133     __ mov($dst$$Register, ZR);
5134   %}
5135   ins_pipe(ialu_none);
5136 %}
5137 
5138 instruct loadConN(iRegN dst, immN src) %{
5139   match(Set dst src);
5140   ins_cost(DEFAULT_COST * 3/2);
5141   format %{ "SET    $dst,$src\t! compressed ptr" %}
5142   ins_encode %{
5143     Register dst = $dst$$Register;
5144     // FIXME: use $constanttablebase?
5145     __ set_narrow_oop(dst, (jobject)$src$$constant);
5146   %}
5147   ins_pipe(ialu_hi_lo_reg);
5148 %}
5149 
5150 instruct loadConN0(iRegN dst, immN0 src) %{
5151   match(Set dst src);
5152   ins_cost(DEFAULT_COST);
5153   format %{ "MOV    $dst,ZR\t! compressed ptr" %}
5154   ins_encode %{
5155     __ mov($dst$$Register, ZR);
5156   %}
5157   ins_pipe(ialu_none);
5158 %}
5159 
5160 instruct loadConNKlass(iRegN dst, immNKlass src) %{
5161   match(Set dst src);
5162   ins_cost(DEFAULT_COST * 3/2);
5163   format %{ "SET    $dst,$src\t! compressed klass ptr" %}
5164   ins_encode %{
5165     Register dst = $dst$$Register;
5166     // FIXME: use $constanttablebase?
5167     __ set_narrow_klass(dst, (Klass*)$src$$constant);
5168   %}
5169   ins_pipe(ialu_hi_lo_reg);
5170 %}
5171 
5172 instruct load_limmL(iRegL dst, limmL src) %{
5173   match(Set dst src);
5174   ins_cost(DEFAULT_COST);
5175   format %{ "ORR    $dst, ZR, $src\t! long"  %}
5176   ins_encode %{
5177     __ orr($dst$$Register, ZR, (uintx)$src$$constant);
5178   %}
5179   ins_pipe(loadConL);
5180 %}
5181 instruct load_immLMov(iRegL dst, immLMov src) %{
5182   match(Set dst src);
5183   ins_cost(DEFAULT_COST);
5184   format %{ "MOV    $dst, $src\t! long"  %}
5185   ins_encode %{
5186     __ mov($dst$$Register, $src$$constant);
5187   %}
5188   ins_pipe(loadConL);
5189 %}
5190 instruct loadConL(iRegL dst, immL src) %{
5191   match(Set dst src);
5192   ins_cost(DEFAULT_COST * 4); // worst case
5193   format %{ "mov_slow   $dst, $src\t! long"  %}
5194   ins_encode %{
5195     // FIXME: use $constanttablebase?
5196     __ mov_slow($dst$$Register, $src$$constant);
5197   %}
5198   ins_pipe(loadConL);
5199 %}
5200 #else
5201 instruct loadConL(iRegL dst, immL src) %{
5202   match(Set dst src);
5203   ins_cost(DEFAULT_COST * 4);
5204   format %{ "MOV_SLOW   $dst.lo, $src & 0x0FFFFFFFFL \t! long\n\t"
5205             "MOV_SLOW   $dst.hi, $src >> 32" %}
5206   ins_encode %{
5207     __ mov_slow(reg_to_register_object($dst$$reg), $src$$constant & 0x0FFFFFFFFL);
5208     __ mov_slow(reg_to_register_object($dst$$reg + 1), ((julong)($src$$constant)) >> 32);
5209   %}
5210   ins_pipe(loadConL);
5211 %}
5212 
5213 instruct loadConL16( iRegL dst, immL16 src ) %{
5214   match(Set dst src);
5215   ins_cost(DEFAULT_COST * 2);
5216 
5217   size(8);
5218   format %{ "MOVW    $dst.lo, $src \n\t"
5219             "MOVW    $dst.hi, 0 \n\t" %}
5220   ins_encode %{
5221     __ movw($dst$$Register, $src$$constant);
5222     __ movw($dst$$Register->successor(), 0);
5223   %}
5224   ins_pipe(ialu_imm);
5225 %}
5226 #endif
5227 
5228 instruct loadConF_imm8(regF dst, imm8F src) %{
5229   match(Set dst src);
5230   ins_cost(DEFAULT_COST);
5231   size(4);
5232 
5233   format %{ "FCONSTS      $dst, $src"%}
5234 
5235   ins_encode %{
5236     __ fconsts($dst$$FloatRegister, Assembler::float_num($src$$constant).imm8());
5237   %}
5238   ins_pipe(loadConFD); // FIXME
5239 %}
5240 
5241 #ifdef AARCH64
5242 instruct loadIConF(iRegI dst, immF src) %{
5243   match(Set dst src);
5244   ins_cost(DEFAULT_COST * 2);
5245 
5246   format %{ "MOV_SLOW  $dst, $src\t! loadIConF"  %}
5247 
5248   ins_encode %{
5249     // FIXME revisit once 6961697 is in
5250     union {
5251       jfloat f;
5252       int i;
5253     } v;
5254     v.f = $src$$constant;
5255     __ mov_slow($dst$$Register, v.i);
5256   %}
5257   ins_pipe(ialu_imm);
5258 %}
5259 #endif
5260 
5261 instruct loadConF(regF dst, immF src, iRegI tmp) %{
5262   match(Set dst src);
5263   ins_cost(DEFAULT_COST * 2);
5264   effect(TEMP tmp);
5265   size(3*4);
5266 
5267   format %{ "MOV_SLOW  $tmp, $src\n\t"
5268             "FMSR      $dst, $tmp"%}
5269 
5270   ins_encode %{
5271     // FIXME revisit once 6961697 is in
5272     union {
5273       jfloat f;
5274       int i;
5275     } v;
5276     v.f = $src$$constant;
5277     __ mov_slow($tmp$$Register, v.i);
5278     __ fmsr($dst$$FloatRegister, $tmp$$Register);
5279   %}
5280   ins_pipe(loadConFD); // FIXME
5281 %}
5282 
5283 instruct loadConD_imm8(regD dst, imm8D src) %{
5284   match(Set dst src);
5285   ins_cost(DEFAULT_COST);
5286   size(4);
5287 
5288   format %{ "FCONSTD      $dst, $src"%}
5289 
5290   ins_encode %{
5291     __ fconstd($dst$$FloatRegister, Assembler::double_num($src$$constant).imm8());
5292   %}
5293   ins_pipe(loadConFD); // FIXME
5294 %}
5295 
5296 instruct loadConD(regD dst, immD src, iRegP tmp) %{
5297   match(Set dst src);
5298   effect(TEMP tmp);
5299   ins_cost(MEMORY_REF_COST);
5300   format %{ "FLDD  $dst, [$constanttablebase + $constantoffset]\t! load from constant table: double=$src" %}
5301 
5302   ins_encode %{
5303     Register r = $constanttablebase;
5304     int offset  = $constantoffset($src);
5305     if (!is_memoryD(offset)) {                // can't use a predicate
5306                                               // in load constant instructs
5307       __ add_slow($tmp$$Register, r, offset);
5308       r = $tmp$$Register;
5309       offset = 0;
5310     }
5311     __ ldr_double($dst$$FloatRegister, Address(r, offset));
5312   %}
5313   ins_pipe(loadConFD);
5314 %}
5315 
5316 // Prefetch instructions.
5317 // Must be safe to execute with invalid address (cannot fault).
5318 
5319 instruct prefetchAlloc_mp( memoryP mem ) %{
5320   predicate(os::is_MP());
5321   match( PrefetchAllocation mem );
5322   ins_cost(MEMORY_REF_COST);
5323   size(4);
5324 
5325   format %{ "PLDW $mem\t! Prefetch allocation" %}
5326   ins_encode %{
5327 #ifdef AARCH64
5328     __ prfm(pstl1keep, $mem$$Address);
5329 #else
5330     __ pldw($mem$$Address);
5331 #endif
5332   %}
5333   ins_pipe(iload_mem);
5334 %}
5335 
5336 instruct prefetchAlloc_sp( memoryP mem ) %{
5337   predicate(!os::is_MP());
5338   match( PrefetchAllocation mem );
5339   ins_cost(MEMORY_REF_COST);
5340   size(4);
5341 
5342   format %{ "PLD $mem\t! Prefetch allocation" %}
5343   ins_encode %{
5344 #ifdef AARCH64
5345     __ prfm(pstl1keep, $mem$$Address);
5346 #else
5347     __ pld($mem$$Address);
5348 #endif
5349   %}
5350   ins_pipe(iload_mem);
5351 %}
5352 
5353 //----------Store Instructions-------------------------------------------------
5354 // Store Byte
5355 instruct storeB(memoryB mem, store_RegI src) %{
5356   match(Set mem (StoreB mem src));
5357   ins_cost(MEMORY_REF_COST);
5358 
5359   size(4);
5360   format %{ "STRB    $src,$mem\t! byte" %}
5361   ins_encode %{
5362     __ strb($src$$Register, $mem$$Address);
5363   %}
5364   ins_pipe(istore_mem_reg);
5365 %}
5366 
5367 instruct storeCM(memoryB mem, store_RegI src) %{
5368   match(Set mem (StoreCM mem src));
5369   ins_cost(MEMORY_REF_COST);
5370 
5371   size(4);
5372   format %{ "STRB    $src,$mem\t! CMS card-mark byte" %}
5373   ins_encode %{
5374     __ strb($src$$Register, $mem$$Address);
5375   %}
5376   ins_pipe(istore_mem_reg);
5377 %}
5378 
5379 // Store Char/Short
5380 
5381 #ifdef AARCH64
5382 // XXX This variant shouldn't be necessary if 6217251 is implemented
5383 instruct storeCoff(store_RegI src, memoryScaledS mem, aimmX off, iRegP tmp) %{
5384   match(Set mem (StoreC (AddP mem off) src));
5385   ins_cost(MEMORY_REF_COST + DEFAULT_COST); // assume shift/sign-extend is free
5386   effect(TEMP tmp);
5387   size(4 * 2);
5388 
5389   format %{ "STRH    $src,$mem+$off\t! short temp=$tmp" %}
5390   ins_encode %{
5391     Register base = reg_to_register_object($mem$$base);
5392     __ add($tmp$$Register, base, $off$$constant);
5393     Address nmem = Address::make_raw($tmp$$reg, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
5394     __ strh($src$$Register, nmem);
5395   %}
5396   ins_pipe(istore_mem_reg);
5397 %}
5398 #endif
5399 
5400 instruct storeC(memoryS mem, store_RegI src) %{
5401   match(Set mem (StoreC mem src));
5402   ins_cost(MEMORY_REF_COST);
5403 
5404   size(4);
5405   format %{ "STRH    $src,$mem\t! short" %}
5406   ins_encode %{
5407     __ strh($src$$Register, $mem$$Address);
5408   %}
5409   ins_pipe(istore_mem_reg);
5410 %}
5411 
5412 // Store Integer
5413 
5414 #ifdef AARCH64
5415 // XXX This variant shouldn't be necessary if 6217251 is implemented
5416 instruct storeIoff(store_RegI src, memoryScaledI mem, aimmX off, iRegP tmp) %{
5417   match(Set mem (StoreI (AddP mem off) src));
5418   ins_cost(MEMORY_REF_COST + DEFAULT_COST); // assume shift/sign-extend is free
5419   effect(TEMP tmp);
5420   size(4 * 2);
5421 
5422   format %{ "str_32 $src,$mem+$off\t! int temp=$tmp" %}
5423   ins_encode %{
5424     Register base = reg_to_register_object($mem$$base);
5425     __ add($tmp$$Register, base, $off$$constant);
5426     Address nmem = Address::make_raw($tmp$$reg, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
5427     __ str_32($src$$Register, nmem);
5428   %}
5429   ins_pipe(istore_mem_reg);
5430 %}
5431 #endif
5432 
5433 instruct storeI(memoryI mem, store_RegI src) %{
5434   match(Set mem (StoreI mem src));
5435   ins_cost(MEMORY_REF_COST);
5436 
5437   size(4);
5438   format %{ "str_32 $src,$mem" %}
5439   ins_encode %{
5440     __ str_32($src$$Register, $mem$$Address);
5441   %}
5442   ins_pipe(istore_mem_reg);
5443 %}
5444 
5445 // Store Long
5446 
5447 #ifdef AARCH64
5448 // XXX This variant shouldn't be necessary if 6217251 is implemented
5449 instruct storeLoff(store_RegLd src, memoryScaledL mem, aimmX off, iRegP tmp) %{
5450   match(Set mem (StoreL (AddP mem off) src));
5451   ins_cost(MEMORY_REF_COST + DEFAULT_COST); // assume shift/sign-extend is free
5452   effect(TEMP tmp);
5453   size(4 * 2);
5454 
5455   format %{ "str_64 $src,$mem+$off\t! long temp=$tmp" %}
5456   ins_encode %{
5457     Register base = reg_to_register_object($mem$$base);
5458     __ add($tmp$$Register, base, $off$$constant);
5459     Address nmem = Address::make_raw($tmp$$reg, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
5460     __ str_64($src$$Register, nmem);
5461   %}
5462   ins_pipe(istore_mem_reg);
5463 %}
5464 #endif
5465 
5466 instruct storeL(memoryL mem, store_RegLd src) %{
5467 #ifdef AARCH64
5468   // already atomic for Aarch64
5469 #else
5470   predicate(!((StoreLNode*)n)->require_atomic_access());
5471 #endif
5472   match(Set mem (StoreL mem src));
5473   ins_cost(MEMORY_REF_COST);
5474 
5475   size(4);
5476   format %{ "str_64  $src,$mem\t! long\n\t" %}
5477 
5478   ins_encode %{
5479     __ str_64($src$$Register, $mem$$Address);
5480   %}
5481   ins_pipe(istore_mem_reg);
5482 %}
5483 
5484 #ifndef AARCH64
5485 instruct storeL_2instr(memorylong mem, iRegL src) %{
5486   predicate(!((StoreLNode*)n)->require_atomic_access());
5487   match(Set mem (StoreL mem src));
5488   ins_cost(MEMORY_REF_COST + DEFAULT_COST);
5489 
5490   size(8);
5491   format %{ "STR    $src.lo,$mem\t! long\n\t"
5492             "STR    $src.hi,$mem+4" %}
5493 
5494   ins_encode %{
5495     Address Amemlo = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
5496     Address Amemhi = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp + 4, relocInfo::none);
5497     __ str($src$$Register, Amemlo);
5498     __ str($src$$Register->successor(), Amemhi);
5499   %}
5500   ins_pipe(istore_mem_reg);
5501 %}
5502 
5503 instruct storeL_volatile(indirect mem, iRegL src) %{
5504   predicate(((StoreLNode*)n)->require_atomic_access());
5505   match(Set mem (StoreL mem src));
5506   ins_cost(MEMORY_REF_COST);
5507   size(4);
5508   format %{ "STMIA    $src,$mem\t! long" %}
5509   ins_encode %{
5510     // FIXME: why is stmia considered atomic?  Should be strexd
5511     RegisterSet set($src$$Register);
5512     set = set | reg_to_register_object($src$$reg + 1);
5513     __ stmia(reg_to_register_object($mem$$base), set);
5514   %}
5515   ins_pipe(istore_mem_reg);
5516 %}
5517 #endif // !AARCH64
5518 
5519 #ifndef AARCH64
5520 instruct storeL_volatile_fp(memoryD mem, iRegL src) %{
5521   predicate(((StoreLNode*)n)->require_atomic_access());
5522   match(Set mem (StoreL mem src));
5523   ins_cost(MEMORY_REF_COST);
5524   size(8);
5525   format %{ "FMDRR    S14, $src\t! long \n\t"
5526             "FSTD     S14, $mem" %}
5527   ins_encode %{
5528     __ fmdrr(S14, $src$$Register, $src$$Register->successor());
5529     __ fstd(S14, $mem$$Address);
5530   %}
5531   ins_pipe(istore_mem_reg);
5532 %}
5533 #endif
5534 
5535 #ifdef XXX
5536 // Move SP Pointer
5537 //instruct movSP(sp_ptr_RegP dst, SPRegP src) %{
5538 //instruct movSP(iRegP dst, SPRegP src) %{
5539 instruct movSP(store_ptr_RegP dst, SPRegP src) %{
5540   match(Set dst src);
5541 //predicate(!_kids[1]->_leaf->is_Proj() || _kids[1]->_leaf->as_Proj()->_con == TypeFunc::FramePtr);
5542   ins_cost(MEMORY_REF_COST);
5543   size(4);
5544 
5545   format %{ "MOV    $dst,$src\t! SP ptr\n\t" %}
5546   ins_encode %{
5547     assert(false, "XXX1 got here");
5548     __ mov($dst$$Register, SP);
5549     __ mov($dst$$Register, $src$$Register);
5550   %}
5551   ins_pipe(ialu_reg);
5552 %}
5553 #endif
5554 
5555 #ifdef AARCH64
5556 // FIXME
5557 // Store SP Pointer
5558 instruct storeSP(memoryP mem, SPRegP src, iRegP tmp) %{
5559   match(Set mem (StoreP mem src));
5560   predicate(_kids[1]->_leaf->is_Proj() && _kids[1]->_leaf->as_Proj()->_con == TypeFunc::FramePtr);
5561   // Multiple StoreP rules, different only in register mask.
5562   // Matcher makes the last always valid.  The others will
5563   // only be valid if they cost less than the last valid
5564   // rule.  So cost(rule1) < cost(rule2) < cost(last)
5565   // Unlike immediates, register constraints are not checked
5566   // at match time.
5567   ins_cost(MEMORY_REF_COST+DEFAULT_COST+4);
5568   effect(TEMP tmp);
5569   size(8);
5570 
5571   format %{ "MOV    $tmp,$src\t! SP ptr\n\t"
5572             "STR    $tmp,$mem\t! SP ptr" %}
5573   ins_encode %{
5574     assert($src$$Register == SP, "SP expected");
5575     __ mov($tmp$$Register, $src$$Register);
5576     __ str($tmp$$Register, $mem$$Address);
5577   %}
5578   ins_pipe(istore_mem_spORreg); // FIXME
5579 %}
5580 #endif // AARCH64
5581 
5582 // Store Pointer
5583 
5584 #ifdef AARCH64
5585 // XXX This variant shouldn't be necessary if 6217251 is implemented
5586 instruct storePoff(store_ptr_RegP src, memoryScaledP mem, aimmX off, iRegP tmp) %{
5587   predicate(!_kids[1]->_leaf->is_Proj() || _kids[1]->_leaf->as_Proj()->_con != TypeFunc::FramePtr);
5588   match(Set mem (StoreP (AddP mem off) src));
5589   ins_cost(MEMORY_REF_COST + DEFAULT_COST); // assume shift/sign-extend is free
5590   effect(TEMP tmp);
5591   size(4 * 2);
5592 
5593   format %{ "STR    $src,$mem+$off\t! ptr temp=$tmp" %}
5594   ins_encode %{
5595     Register base = reg_to_register_object($mem$$base);
5596     __ add($tmp$$Register, base, $off$$constant);
5597     Address nmem = Address::make_raw($tmp$$reg, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
5598     __ str($src$$Register, nmem);
5599   %}
5600   ins_pipe(istore_mem_reg);
5601 %}
5602 #endif
5603 
5604 instruct storeP(memoryP mem, store_ptr_RegP src) %{
5605   match(Set mem (StoreP mem src));
5606 #ifdef AARCH64
5607   predicate(!_kids[1]->_leaf->is_Proj() || _kids[1]->_leaf->as_Proj()->_con != TypeFunc::FramePtr);
5608 #endif
5609   ins_cost(MEMORY_REF_COST);
5610   size(4);
5611 
5612   format %{ "STR    $src,$mem\t! ptr" %}
5613   ins_encode %{
5614     __ str($src$$Register, $mem$$Address);
5615   %}
5616   ins_pipe(istore_mem_spORreg);
5617 %}
5618 
5619 #ifdef AARCH64
5620 // Store NULL Pointer
5621 instruct storeP0(memoryP mem, immP0 src) %{
5622   match(Set mem (StoreP mem src));
5623   ins_cost(MEMORY_REF_COST);
5624   size(4);
5625 
5626   format %{ "STR    ZR,$mem\t! ptr" %}
5627   ins_encode %{
5628     __ str(ZR, $mem$$Address);
5629   %}
5630   ins_pipe(istore_mem_spORreg);
5631 %}
5632 #endif // AARCH64
5633 
5634 #ifdef _LP64
5635 // Store Compressed Pointer
5636 
5637 #ifdef AARCH64
5638 // XXX This variant shouldn't be necessary if 6217251 is implemented
5639 instruct storeNoff(store_RegN src, memoryScaledI mem, aimmX off, iRegP tmp) %{
5640   match(Set mem (StoreN (AddP mem off) src));
5641   ins_cost(MEMORY_REF_COST + DEFAULT_COST); // assume shift/sign-extend is free
5642   effect(TEMP tmp);
5643   size(4 * 2);
5644 
5645   format %{ "str_32 $src,$mem+$off\t! compressed ptr temp=$tmp" %}
5646   ins_encode %{
5647     Register base = reg_to_register_object($mem$$base);
5648     __ add($tmp$$Register, base, $off$$constant);
5649     Address nmem = Address::make_raw($tmp$$reg, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
5650     __ str_32($src$$Register, nmem);
5651   %}
5652   ins_pipe(istore_mem_reg);
5653 %}
5654 #endif
5655 
5656 instruct storeN(memoryI mem, store_RegN src) %{
5657   match(Set mem (StoreN mem src));
5658   ins_cost(MEMORY_REF_COST);
5659   size(4);
5660 
5661   format %{ "str_32 $src,$mem\t! compressed ptr" %}
5662   ins_encode %{
5663     __ str_32($src$$Register, $mem$$Address);
5664   %}
5665   ins_pipe(istore_mem_reg);
5666 %}
5667 
5668 #ifdef AARCH64
5669 // Store NULL Pointer
5670 instruct storeN0(memoryI mem, immN0 src) %{
5671   match(Set mem (StoreN mem src));
5672   ins_cost(MEMORY_REF_COST);
5673   size(4);
5674 
5675   format %{ "str_32 ZR,$mem\t! compressed ptr" %}
5676   ins_encode %{
5677     __ str_32(ZR, $mem$$Address);
5678   %}
5679   ins_pipe(istore_mem_reg);
5680 %}
5681 #endif
5682 
5683 // Store Compressed Klass Pointer
5684 instruct storeNKlass(memoryI mem, store_RegN src) %{
5685   match(Set mem (StoreNKlass mem src));
5686   ins_cost(MEMORY_REF_COST);
5687   size(4);
5688 
5689   format %{ "str_32 $src,$mem\t! compressed klass ptr" %}
5690   ins_encode %{
5691     __ str_32($src$$Register, $mem$$Address);
5692   %}
5693   ins_pipe(istore_mem_reg);
5694 %}
5695 #endif
5696 
5697 // Store Double
5698 
5699 #ifdef AARCH64
5700 // XXX This variant shouldn't be necessary if 6217251 is implemented
5701 instruct storeDoff(regD src, memoryScaledD mem, aimmX off, iRegP tmp) %{
5702   match(Set mem (StoreD (AddP mem off) src));
5703   ins_cost(MEMORY_REF_COST + DEFAULT_COST); // assume shift/sign-extend is free
5704   effect(TEMP tmp);
5705   size(4 * 2);
5706 
5707   format %{ "STR    $src,$mem+$off\t! double temp=$tmp" %}
5708   ins_encode %{
5709     Register base = reg_to_register_object($mem$$base);
5710     __ add($tmp$$Register, base, $off$$constant);
5711     Address nmem = Address::make_raw($tmp$$reg, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
5712     __ str_d($src$$FloatRegister, nmem);
5713   %}
5714   ins_pipe(fstoreD_mem_reg);
5715 %}
5716 #endif
5717 
5718 instruct storeD(memoryD mem, regD src) %{
5719   match(Set mem (StoreD mem src));
5720   ins_cost(MEMORY_REF_COST);
5721 
5722   size(4);
5723   // FIXME: needs to be atomic, but  ARMv7 A.R.M. guarantees
5724   // only LDREXD and STREXD are 64-bit single-copy atomic
5725   format %{ "FSTD   $src,$mem" %}
5726   ins_encode %{
5727     __ str_double($src$$FloatRegister, $mem$$Address);
5728   %}
5729   ins_pipe(fstoreD_mem_reg);
5730 %}
5731 
5732 #ifdef AARCH64
5733 instruct movI2F(regF dst, iRegI src) %{
5734   match(Set dst src);
5735   size(4);
5736 
5737   format %{ "FMOV_sw $dst,$src\t! movI2F" %}
5738   ins_encode %{
5739     __ fmov_sw($dst$$FloatRegister, $src$$Register);
5740   %}
5741   ins_pipe(ialu_reg); // FIXME
5742 %}
5743 
5744 instruct movF2I(iRegI dst, regF src) %{
5745   match(Set dst src);
5746   size(4);
5747 
5748   format %{ "FMOV_ws $dst,$src\t! movF2I" %}
5749   ins_encode %{
5750     __ fmov_ws($dst$$Register, $src$$FloatRegister);
5751   %}
5752   ins_pipe(ialu_reg); // FIXME
5753 %}
5754 #endif
5755 
5756 // Store Float
5757 
5758 #ifdef AARCH64
5759 // XXX This variant shouldn't be necessary if 6217251 is implemented
5760 instruct storeFoff(regF src, memoryScaledF mem, aimmX off, iRegP tmp) %{
5761   match(Set mem (StoreF (AddP mem off) src));
5762   ins_cost(MEMORY_REF_COST + DEFAULT_COST); // assume shift/sign-extend is free
5763   effect(TEMP tmp);
5764   size(4 * 2);
5765 
5766   format %{ "str_s  $src,$mem+$off\t! float temp=$tmp" %}
5767   ins_encode %{
5768     Register base = reg_to_register_object($mem$$base);
5769     __ add($tmp$$Register, base, $off$$constant);
5770     Address nmem = Address::make_raw($tmp$$reg, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
5771     __ str_s($src$$FloatRegister, nmem);
5772   %}
5773   ins_pipe(fstoreF_mem_reg);
5774 %}
5775 #endif
5776 
5777 instruct storeF( memoryF mem, regF src) %{
5778   match(Set mem (StoreF mem src));
5779   ins_cost(MEMORY_REF_COST);
5780 
5781   size(4);
5782   format %{ "FSTS    $src,$mem" %}
5783   ins_encode %{
5784     __ str_float($src$$FloatRegister, $mem$$Address);
5785   %}
5786   ins_pipe(fstoreF_mem_reg);
5787 %}
5788 
5789 #ifdef AARCH64
5790 // Convert oop pointer into compressed form
5791 instruct encodeHeapOop(iRegN dst, iRegP src, flagsReg ccr) %{
5792   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
5793   match(Set dst (EncodeP src));
5794   effect(KILL ccr);
5795   format %{ "encode_heap_oop $dst, $src" %}
5796   ins_encode %{
5797     __ encode_heap_oop($dst$$Register, $src$$Register);
5798   %}
5799   ins_pipe(ialu_reg);
5800 %}
5801 
5802 instruct encodeHeapOop_not_null(iRegN dst, iRegP src) %{
5803   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
5804   match(Set dst (EncodeP src));
5805   format %{ "encode_heap_oop_not_null $dst, $src" %}
5806   ins_encode %{
5807     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
5808   %}
5809   ins_pipe(ialu_reg);
5810 %}
5811 
5812 instruct decodeHeapOop(iRegP dst, iRegN src, flagsReg ccr) %{
5813   predicate(n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull &&
5814             n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant);
5815   match(Set dst (DecodeN src));
5816   effect(KILL ccr);
5817   format %{ "decode_heap_oop $dst, $src" %}
5818   ins_encode %{
5819     __ decode_heap_oop($dst$$Register, $src$$Register);
5820   %}
5821   ins_pipe(ialu_reg);
5822 %}
5823 
5824 instruct decodeHeapOop_not_null(iRegP dst, iRegN src) %{
5825   predicate(n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull ||
5826             n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant);
5827   match(Set dst (DecodeN src));
5828   format %{ "decode_heap_oop_not_null $dst, $src" %}
5829   ins_encode %{
5830     __ decode_heap_oop_not_null($dst$$Register, $src$$Register);
5831   %}
5832   ins_pipe(ialu_reg);
5833 %}
5834 
5835 instruct encodeKlass_not_null(iRegN dst, iRegP src) %{
5836   match(Set dst (EncodePKlass src));
5837   format %{ "encode_klass_not_null $dst, $src" %}
5838   ins_encode %{
5839     __ encode_klass_not_null($dst$$Register, $src$$Register);
5840   %}
5841   ins_pipe(ialu_reg);
5842 %}
5843 
5844 instruct decodeKlass_not_null(iRegP dst, iRegN src) %{
5845   match(Set dst (DecodeNKlass src));
5846   format %{ "decode_klass_not_null $dst, $src" %}
5847   ins_encode %{
5848     __ decode_klass_not_null($dst$$Register, $src$$Register);
5849   %}
5850   ins_pipe(ialu_reg);
5851 %}
5852 #endif // AARCH64
5853 
5854 //----------MemBar Instructions-----------------------------------------------
5855 // Memory barrier flavors
5856 
5857 // TODO: take advantage of Aarch64 load-acquire, store-release, etc
5858 // pattern-match out unnecessary membars
5859 instruct membar_storestore() %{
5860   match(MemBarStoreStore);
5861   ins_cost(4*MEMORY_REF_COST);
5862 
5863   size(4);
5864   format %{ "MEMBAR-storestore" %}
5865   ins_encode %{
5866     __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore), noreg);
5867   %}
5868   ins_pipe(long_memory_op);
5869 %}
5870 
5871 instruct membar_acquire() %{
5872   match(MemBarAcquire);
5873   match(LoadFence);
5874   ins_cost(4*MEMORY_REF_COST);
5875 
5876   size(4);
5877   format %{ "MEMBAR-acquire" %}
5878   ins_encode %{
5879     __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), noreg);
5880   %}
5881   ins_pipe(long_memory_op);
5882 %}
5883 
5884 instruct membar_acquire_lock() %{
5885   match(MemBarAcquireLock);
5886   ins_cost(0);
5887 
5888   size(0);
5889   format %{ "!MEMBAR-acquire (CAS in prior FastLock so empty encoding)" %}
5890   ins_encode( );
5891   ins_pipe(empty);
5892 %}
5893 
5894 instruct membar_release() %{
5895   match(MemBarRelease);
5896   match(StoreFence);
5897   ins_cost(4*MEMORY_REF_COST);
5898 
5899   size(4);
5900   format %{ "MEMBAR-release" %}
5901   ins_encode %{
5902     __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), noreg);
5903   %}
5904   ins_pipe(long_memory_op);
5905 %}
5906 
5907 instruct membar_release_lock() %{
5908   match(MemBarReleaseLock);
5909   ins_cost(0);
5910 
5911   size(0);
5912   format %{ "!MEMBAR-release (CAS in succeeding FastUnlock so empty encoding)" %}
5913   ins_encode( );
5914   ins_pipe(empty);
5915 %}
5916 
5917 instruct membar_volatile() %{
5918   match(MemBarVolatile);
5919   ins_cost(4*MEMORY_REF_COST);
5920 
5921   size(4);
5922   format %{ "MEMBAR-volatile" %}
5923   ins_encode %{
5924     __ membar(MacroAssembler::StoreLoad, noreg);
5925   %}
5926   ins_pipe(long_memory_op);
5927 %}
5928 
5929 instruct unnecessary_membar_volatile() %{
5930   match(MemBarVolatile);
5931   predicate(Matcher::post_store_load_barrier(n));
5932   ins_cost(0);
5933 
5934   size(0);
5935   format %{ "!MEMBAR-volatile (unnecessary so empty encoding)" %}
5936   ins_encode( );
5937   ins_pipe(empty);
5938 %}
5939 
5940 //----------Register Move Instructions-----------------------------------------
5941 // instruct roundDouble_nop(regD dst) %{
5942 //   match(Set dst (RoundDouble dst));
5943 //   ins_pipe(empty);
5944 // %}
5945 
5946 
5947 // instruct roundFloat_nop(regF dst) %{
5948 //   match(Set dst (RoundFloat dst));
5949 //   ins_pipe(empty);
5950 // %}
5951 
5952 
5953 #ifdef AARCH64
5954 // 0 constant in register
5955 instruct zrImmI0(ZRRegI dst, immI0 imm) %{
5956   match(Set dst imm);
5957   size(0);
5958   ins_cost(0);
5959 
5960   format %{ "! ZR (int 0)" %}
5961   ins_encode( /*empty encoding*/ );
5962   ins_pipe(ialu_none);
5963 %}
5964 
5965 // 0 constant in register
5966 instruct zrImmL0(ZRRegL dst, immL0 imm) %{
5967   match(Set dst imm);
5968   size(0);
5969   ins_cost(0);
5970 
5971   format %{ "! ZR (long 0)" %}
5972   ins_encode( /*empty encoding*/ );
5973   ins_pipe(ialu_none);
5974 %}
5975 
5976 #ifdef XXX
5977 // 0 constant in register
5978 instruct zrImmN0(ZRRegN dst, immN0 imm) %{
5979   match(Set dst imm);
5980   size(0);
5981   ins_cost(0);
5982 
5983   format %{ "! ZR (compressed pointer NULL)" %}
5984   ins_encode( /*empty encoding*/ );
5985   ins_pipe(ialu_none);
5986 %}
5987 
5988 // 0 constant in register
5989 instruct zrImmP0(ZRRegP dst, immP0 imm) %{
5990   match(Set dst imm);
5991   size(0);
5992   ins_cost(0);
5993 
5994   format %{ "! ZR (NULL)" %}
5995   ins_encode( /*empty encoding*/ );
5996   ins_pipe(ialu_none);
5997 %}
5998 #endif
5999 #endif // AARCH64
6000 
6001 // Cast Index to Pointer for unsafe natives
6002 instruct castX2P(iRegX src, iRegP dst) %{
6003   match(Set dst (CastX2P src));
6004 
6005   format %{ "MOV    $dst,$src\t! IntX->Ptr if $dst != $src" %}
6006   ins_encode %{
6007     if ($dst$$Register !=  $src$$Register) {
6008       __ mov($dst$$Register, $src$$Register);
6009     }
6010   %}
6011   ins_pipe(ialu_reg);
6012 %}
6013 
6014 // Cast Pointer to Index for unsafe natives
6015 instruct castP2X(iRegP src, iRegX dst) %{
6016   match(Set dst (CastP2X src));
6017 
6018   format %{ "MOV    $dst,$src\t! Ptr->IntX if $dst != $src" %}
6019   ins_encode %{
6020     if ($dst$$Register !=  $src$$Register) {
6021       __ mov($dst$$Register, $src$$Register);
6022     }
6023   %}
6024   ins_pipe(ialu_reg);
6025 %}
6026 
6027 #ifndef AARCH64
6028 //----------Conditional Move---------------------------------------------------
6029 // Conditional move
6030 instruct cmovIP_reg(cmpOpP cmp, flagsRegP pcc, iRegI dst, iRegI src) %{
6031   match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src)));
6032   ins_cost(150);
6033   size(4);
6034   format %{ "MOV$cmp  $dst,$src\t! int" %}
6035   ins_encode %{
6036     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
6037   %}
6038   ins_pipe(ialu_reg);
6039 %}
6040 #endif
6041 
6042 #ifdef AARCH64
6043 instruct cmovI_reg3(cmpOp cmp, flagsReg icc, iRegI dst, iRegI src1, iRegI src2) %{
6044   match(Set dst (CMoveI (Binary cmp icc) (Binary src2 src1)));
6045   ins_cost(150);
6046   size(4);
6047   format %{ "CSEL $dst,$src1,$src2,$cmp\t! int" %}
6048   ins_encode %{
6049     __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
6050   %}
6051   ins_pipe(ialu_reg);
6052 %}
6053 
6054 instruct cmovL_reg3(cmpOp cmp, flagsReg icc, iRegL dst, iRegL src1, iRegL src2) %{
6055   match(Set dst (CMoveL (Binary cmp icc) (Binary src2 src1)));
6056   ins_cost(150);
6057   size(4);
6058   format %{ "CSEL $dst,$src1,$src2,$cmp\t! long" %}
6059   ins_encode %{
6060     __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
6061   %}
6062   ins_pipe(ialu_reg);
6063 %}
6064 
6065 instruct cmovP_reg3(cmpOp cmp, flagsReg icc, iRegP dst, iRegP src1, iRegP src2) %{
6066   match(Set dst (CMoveP (Binary cmp icc) (Binary src2 src1)));
6067   ins_cost(150);
6068   size(4);
6069   format %{ "CSEL $dst,$src1,$src2,$cmp\t! ptr" %}
6070   ins_encode %{
6071     __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
6072   %}
6073   ins_pipe(ialu_reg);
6074 %}
6075 
6076 instruct cmovN_reg3(cmpOp cmp, flagsReg icc, iRegN dst, iRegN src1, iRegN src2) %{
6077   match(Set dst (CMoveN (Binary cmp icc) (Binary src2 src1)));
6078   ins_cost(150);
6079   size(4);
6080   format %{ "CSEL $dst,$src1,$src2,$cmp\t! compressed ptr" %}
6081   ins_encode %{
6082     __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
6083   %}
6084   ins_pipe(ialu_reg);
6085 %}
6086 
6087 instruct cmovIP_reg3(cmpOpP cmp, flagsRegP icc, iRegI dst, iRegI src1, iRegI src2) %{
6088   match(Set dst (CMoveI (Binary cmp icc) (Binary src2 src1)));
6089   ins_cost(150);
6090   size(4);
6091   format %{ "CSEL $dst,$src1,$src2,$cmp\t! int" %}
6092   ins_encode %{
6093     __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
6094   %}
6095   ins_pipe(ialu_reg);
6096 %}
6097 
6098 instruct cmovLP_reg3(cmpOpP cmp, flagsRegP icc, iRegL dst, iRegL src1, iRegL src2) %{
6099   match(Set dst (CMoveL (Binary cmp icc) (Binary src2 src1)));
6100   ins_cost(150);
6101   size(4);
6102   format %{ "CSEL $dst,$src1,$src2,$cmp\t! long" %}
6103   ins_encode %{
6104     __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
6105   %}
6106   ins_pipe(ialu_reg);
6107 %}
6108 
6109 instruct cmovPP_reg3(cmpOpP cmp, flagsRegP icc, iRegP dst, iRegP src1, iRegP src2) %{
6110   match(Set dst (CMoveP (Binary cmp icc) (Binary src2 src1)));
6111   ins_cost(150);
6112   size(4);
6113   format %{ "CSEL $dst,$src1,$src2,$cmp\t! ptr" %}
6114   ins_encode %{
6115     __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
6116   %}
6117   ins_pipe(ialu_reg);
6118 %}
6119 
6120 instruct cmovNP_reg3(cmpOpP cmp, flagsRegP icc, iRegN dst, iRegN src1, iRegN src2) %{
6121   match(Set dst (CMoveN (Binary cmp icc) (Binary src2 src1)));
6122   ins_cost(150);
6123   size(4);
6124   format %{ "CSEL $dst,$src1,$src2,$cmp\t! compressed ptr" %}
6125   ins_encode %{
6126     __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
6127   %}
6128   ins_pipe(ialu_reg);
6129 %}
6130 
6131 instruct cmovIU_reg3(cmpOpU cmp, flagsRegU icc, iRegI dst, iRegI src1, iRegI src2) %{
6132   match(Set dst (CMoveI (Binary cmp icc) (Binary src2 src1)));
6133   ins_cost(150);
6134   size(4);
6135   format %{ "CSEL $dst,$src1,$src2,$cmp\t! int" %}
6136   ins_encode %{
6137     __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
6138   %}
6139   ins_pipe(ialu_reg);
6140 %}
6141 
6142 instruct cmovLU_reg3(cmpOpU cmp, flagsRegU icc, iRegL dst, iRegL src1, iRegL src2) %{
6143   match(Set dst (CMoveL (Binary cmp icc) (Binary src2 src1)));
6144   ins_cost(150);
6145   size(4);
6146   format %{ "CSEL $dst,$src1,$src2,$cmp\t! long" %}
6147   ins_encode %{
6148     __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
6149   %}
6150   ins_pipe(ialu_reg);
6151 %}
6152 
6153 instruct cmovPU_reg3(cmpOpU cmp, flagsRegU icc, iRegP dst, iRegP src1, iRegP src2) %{
6154   match(Set dst (CMoveP (Binary cmp icc) (Binary src2 src1)));
6155   ins_cost(150);
6156   size(4);
6157   format %{ "CSEL $dst,$src1,$src2,$cmp\t! ptr" %}
6158   ins_encode %{
6159     __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
6160   %}
6161   ins_pipe(ialu_reg);
6162 %}
6163 
6164 instruct cmovNU_reg3(cmpOpU cmp, flagsRegU icc, iRegN dst, iRegN src1, iRegN src2) %{
6165   match(Set dst (CMoveN (Binary cmp icc) (Binary src2 src1)));
6166   ins_cost(150);
6167   size(4);
6168   format %{ "CSEL $dst,$src1,$src2,$cmp\t! compressed ptr" %}
6169   ins_encode %{
6170     __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
6171   %}
6172   ins_pipe(ialu_reg);
6173 %}
6174 
6175 instruct cmovIZ_reg3(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegI dst, iRegI src1, iRegI src2) %{
6176   match(Set dst (CMoveI (Binary cmp icc) (Binary src2 src1)));
6177   ins_cost(150);
6178   size(4);
6179   format %{ "CSEL $dst,$src1,$src2,$cmp\t! int" %}
6180   ins_encode %{
6181     __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
6182   %}
6183   ins_pipe(ialu_reg);
6184 %}
6185 
6186 instruct cmovLZ_reg3(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegL dst, iRegL src1, iRegL src2) %{
6187   match(Set dst (CMoveL (Binary cmp icc) (Binary src2 src1)));
6188   ins_cost(150);
6189   size(4);
6190   format %{ "CSEL $dst,$src1,$src2,$cmp\t! long" %}
6191   ins_encode %{
6192     __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
6193   %}
6194   ins_pipe(ialu_reg);
6195 %}
6196 
6197 instruct cmovPZ_reg3(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegP dst, iRegP src1, iRegP src2) %{
6198   match(Set dst (CMoveP (Binary cmp icc) (Binary src2 src1)));
6199   ins_cost(150);
6200   size(4);
6201   format %{ "CSEL $dst,$src1,$src2,$cmp\t! ptr" %}
6202   ins_encode %{
6203     __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
6204   %}
6205   ins_pipe(ialu_reg);
6206 %}
6207 
6208 instruct cmovNZ_reg3(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegN dst, iRegN src1, iRegN src2) %{
6209   match(Set dst (CMoveN (Binary cmp icc) (Binary src2 src1)));
6210   ins_cost(150);
6211   size(4);
6212   format %{ "CSEL $dst,$src1,$src2,$cmp\t! compressed ptr" %}
6213   ins_encode %{
6214     __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
6215   %}
6216   ins_pipe(ialu_reg);
6217 %}
6218 #endif // AARCH64
6219 
6220 #ifndef AARCH64
6221 instruct cmovIP_immMov(cmpOpP cmp, flagsRegP pcc, iRegI dst, immIMov src) %{
6222   match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src)));
6223   ins_cost(140);
6224   size(4);
6225   format %{ "MOV$cmp  $dst,$src" %}
6226   ins_encode %{
6227     __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
6228   %}
6229   ins_pipe(ialu_imm);
6230 %}
6231 
6232 instruct cmovIP_imm16(cmpOpP cmp, flagsRegP pcc, iRegI dst, immI16 src) %{
6233   match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src)));
6234   ins_cost(140);
6235   size(4);
6236   format %{ "MOVw$cmp  $dst,$src" %}
6237   ins_encode %{
6238     __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
6239   %}
6240   ins_pipe(ialu_imm);
6241 %}
6242 #endif
6243 
6244 instruct cmovI_reg(cmpOp cmp, flagsReg icc, iRegI dst, iRegI src) %{
6245   match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
6246   ins_cost(150);
6247   size(4);
6248   format %{ "MOV$cmp  $dst,$src" %}
6249   ins_encode %{
6250     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
6251   %}
6252   ins_pipe(ialu_reg);
6253 %}
6254 
6255 #ifdef AARCH64
6256 instruct cmovL_reg(cmpOp cmp, flagsReg icc, iRegL dst, iRegL src) %{
6257   match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
6258   ins_cost(150);
6259   size(4);
6260   format %{ "MOV$cmp  $dst,$src\t! long" %}
6261   ins_encode %{
6262     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
6263   %}
6264   ins_pipe(ialu_reg);
6265 %}
6266 #endif
6267 
6268 #ifndef AARCH64
6269 instruct cmovI_immMov(cmpOp cmp, flagsReg icc, iRegI dst, immIMov src) %{
6270   match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
6271   ins_cost(140);
6272   size(4);
6273   format %{ "MOV$cmp  $dst,$src" %}
6274   ins_encode %{
6275     __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
6276   %}
6277   ins_pipe(ialu_imm);
6278 %}
6279 
6280 instruct cmovII_imm16(cmpOp cmp, flagsReg icc, iRegI dst, immI16 src) %{
6281   match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
6282   ins_cost(140);
6283   size(4);
6284   format %{ "MOVw$cmp  $dst,$src" %}
6285   ins_encode %{
6286     __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
6287   %}
6288   ins_pipe(ialu_imm);
6289 %}
6290 #endif
6291 
6292 instruct cmovII_reg_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegI dst, iRegI src) %{
6293   match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
6294   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq ||
6295             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ||
6296             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt ||
6297             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
6298   ins_cost(150);
6299   size(4);
6300   format %{ "MOV$cmp  $dst,$src" %}
6301   ins_encode %{
6302     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
6303   %}
6304   ins_pipe(ialu_reg);
6305 %}
6306 
6307 #ifndef AARCH64
6308 instruct cmovII_immMov_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegI dst, immIMov src) %{
6309   match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
6310   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq ||
6311             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ||
6312             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt ||
6313             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
6314   ins_cost(140);
6315   size(4);
6316   format %{ "MOV$cmp  $dst,$src" %}
6317   ins_encode %{
6318     __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
6319   %}
6320   ins_pipe(ialu_imm);
6321 %}
6322 
6323 instruct cmovII_imm16_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegI dst, immI16 src) %{
6324   match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
6325   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq ||
6326             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ||
6327             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt ||
6328             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
6329   ins_cost(140);
6330   size(4);
6331   format %{ "MOVW$cmp  $dst,$src" %}
6332   ins_encode %{
6333     __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
6334   %}
6335   ins_pipe(ialu_imm);
6336 %}
6337 #endif
6338 
6339 instruct cmovIIu_reg(cmpOpU cmp, flagsRegU icc, iRegI dst, iRegI src) %{
6340   match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
6341   ins_cost(150);
6342   size(4);
6343   format %{ "MOV$cmp  $dst,$src" %}
6344   ins_encode %{
6345     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
6346   %}
6347   ins_pipe(ialu_reg);
6348 %}
6349 
6350 #ifndef AARCH64
6351 instruct cmovIIu_immMov(cmpOpU cmp, flagsRegU icc, iRegI dst, immIMov src) %{
6352   match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
6353   ins_cost(140);
6354   size(4);
6355   format %{ "MOV$cmp  $dst,$src" %}
6356   ins_encode %{
6357     __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
6358   %}
6359   ins_pipe(ialu_imm);
6360 %}
6361 
6362 instruct cmovIIu_imm16(cmpOpU cmp, flagsRegU icc, iRegI dst, immI16 src) %{
6363   match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
6364   ins_cost(140);
6365   size(4);
6366   format %{ "MOVW$cmp  $dst,$src" %}
6367   ins_encode %{
6368     __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
6369   %}
6370   ins_pipe(ialu_imm);
6371 %}
6372 #endif
6373 
6374 // Conditional move
6375 instruct cmovPP_reg(cmpOpP cmp, flagsRegP pcc, iRegP dst, iRegP src) %{
6376   match(Set dst (CMoveP (Binary cmp pcc) (Binary dst src)));
6377   ins_cost(150);
6378   size(4);
6379   format %{ "MOV$cmp  $dst,$src" %}
6380   ins_encode %{
6381     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
6382   %}
6383   ins_pipe(ialu_reg);
6384 %}
6385 
6386 instruct cmovPP_imm(cmpOpP cmp, flagsRegP pcc, iRegP dst, immP0 src) %{
6387   match(Set dst (CMoveP (Binary cmp pcc) (Binary dst src)));
6388   ins_cost(140);
6389   size(4);
6390 #ifdef AARCH64
6391   format %{ "MOV$cmp  $dst,ZR" %}
6392 #else
6393   format %{ "MOV$cmp  $dst,$src" %}
6394 #endif
6395   ins_encode %{
6396 #ifdef AARCH64
6397     __ mov($dst$$Register,             ZR, (AsmCondition)($cmp$$cmpcode));
6398 #else
6399     __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
6400 #endif
6401   %}
6402   ins_pipe(ialu_imm);
6403 %}
6404 
6405 // This instruction also works with CmpN so we don't need cmovPN_reg.
6406 instruct cmovPI_reg(cmpOp cmp, flagsReg icc, iRegP dst, iRegP src) %{
6407   match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
6408   ins_cost(150);
6409 
6410   size(4);
6411   format %{ "MOV$cmp  $dst,$src\t! ptr" %}
6412   ins_encode %{
6413     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
6414   %}
6415   ins_pipe(ialu_reg);
6416 %}
6417 
6418 instruct cmovPI_reg_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegP dst, iRegP src) %{
6419   match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
6420   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq ||
6421             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ||
6422             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt ||
6423             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
6424   ins_cost(150);
6425 
6426   size(4);
6427   format %{ "MOV$cmp  $dst,$src\t! ptr" %}
6428   ins_encode %{
6429     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
6430   %}
6431   ins_pipe(ialu_reg);
6432 %}
6433 
6434 instruct cmovPIu_reg(cmpOpU cmp, flagsRegU icc, iRegP dst, iRegP src) %{
6435   match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
6436   ins_cost(150);
6437 
6438   size(4);
6439   format %{ "MOV$cmp  $dst,$src\t! ptr" %}
6440   ins_encode %{
6441     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
6442   %}
6443   ins_pipe(ialu_reg);
6444 %}
6445 
6446 instruct cmovPI_imm(cmpOp cmp, flagsReg icc, iRegP dst, immP0 src) %{
6447   match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
6448   ins_cost(140);
6449 
6450   size(4);
6451 #ifdef AARCH64
6452   format %{ "MOV$cmp  $dst,ZR\t! ptr" %}
6453 #else
6454   format %{ "MOV$cmp  $dst,$src\t! ptr" %}
6455 #endif
6456   ins_encode %{
6457 #ifdef AARCH64
6458     __ mov($dst$$Register,             ZR, (AsmCondition)($cmp$$cmpcode));
6459 #else
6460     __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
6461 #endif
6462   %}
6463   ins_pipe(ialu_imm);
6464 %}
6465 
6466 instruct cmovPI_imm_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegP dst, immP0 src) %{
6467   match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
6468   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq ||
6469             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ||
6470             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt ||
6471             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
6472   ins_cost(140);
6473 
6474   size(4);
6475 #ifdef AARCH64
6476   format %{ "MOV$cmp  $dst,ZR\t! ptr" %}
6477 #else
6478   format %{ "MOV$cmp  $dst,$src\t! ptr" %}
6479 #endif
6480   ins_encode %{
6481 #ifdef AARCH64
6482     __ mov($dst$$Register,             ZR, (AsmCondition)($cmp$$cmpcode));
6483 #else
6484     __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
6485 #endif
6486   %}
6487   ins_pipe(ialu_imm);
6488 %}
6489 
6490 instruct cmovPIu_imm(cmpOpU cmp, flagsRegU icc, iRegP dst, immP0 src) %{
6491   match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
6492   ins_cost(140);
6493 
6494   size(4);
6495 #ifdef AARCH64
6496   format %{ "MOV$cmp  $dst,ZR\t! ptr" %}
6497 #else
6498   format %{ "MOV$cmp  $dst,$src\t! ptr" %}
6499 #endif
6500   ins_encode %{
6501 #ifdef AARCH64
6502     __ mov($dst$$Register,             ZR, (AsmCondition)($cmp$$cmpcode));
6503 #else
6504     __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
6505 #endif
6506   %}
6507   ins_pipe(ialu_imm);
6508 %}
6509 
6510 #ifdef AARCH64
6511 // Conditional move
6512 instruct cmovF_reg(cmpOp cmp, flagsReg icc, regF dst, regF src1, regF src2) %{
6513   match(Set dst (CMoveF (Binary cmp icc) (Binary src2 src1)));
6514   ins_cost(150);
6515   size(4);
6516   format %{ "FCSEL_s $dst,$src1,$src2,$cmp" %}
6517   ins_encode %{
6518     __ fcsel_s($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
6519   %}
6520   ins_pipe(int_conditional_float_move);
6521 %}
6522 
6523 instruct cmovD_reg(cmpOp cmp, flagsReg icc, regD dst, regD src1, regD src2) %{
6524   match(Set dst (CMoveD (Binary cmp icc) (Binary src2 src1)));
6525   ins_cost(150);
6526   size(4);
6527   format %{ "FCSEL_d $dst,$src1,$src2,$cmp" %}
6528   ins_encode %{
6529     __ fcsel_d($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
6530   %}
6531   ins_pipe(int_conditional_float_move);
6532 %}
6533 
6534 instruct cmovFP_reg(cmpOpP cmp, flagsRegP icc, regF dst, regF src1, regF src2) %{
6535   match(Set dst (CMoveF (Binary cmp icc) (Binary src2 src1)));
6536   ins_cost(150);
6537   size(4);
6538   format %{ "FCSEL_s $dst,$src1,$src2,$cmp" %}
6539   ins_encode %{
6540     __ fcsel_s($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
6541   %}
6542   ins_pipe(int_conditional_float_move);
6543 %}
6544 
6545 instruct cmovDP_reg(cmpOpP cmp, flagsRegP icc, regD dst, regD src1, regD src2) %{
6546   match(Set dst (CMoveD (Binary cmp icc) (Binary src2 src1)));
6547   ins_cost(150);
6548   size(4);
6549   format %{ "FCSEL_d $dst,$src1,$src2,$cmp" %}
6550   ins_encode %{
6551     __ fcsel_d($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
6552   %}
6553   ins_pipe(int_conditional_float_move);
6554 %}
6555 
6556 instruct cmovFU_reg(cmpOpU cmp, flagsRegU icc, regF dst, regF src1, regF src2) %{
6557   match(Set dst (CMoveF (Binary cmp icc) (Binary src2 src1)));
6558   ins_cost(150);
6559   size(4);
6560   format %{ "FCSEL_s $dst,$src1,$src2,$cmp" %}
6561   ins_encode %{
6562     __ fcsel_s($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
6563   %}
6564   ins_pipe(int_conditional_float_move);
6565 %}
6566 
6567 instruct cmovDU_reg(cmpOpU cmp, flagsRegU icc, regD dst, regD src1, regD src2) %{
6568   match(Set dst (CMoveD (Binary cmp icc) (Binary src2 src1)));
6569   ins_cost(150);
6570   size(4);
6571   format %{ "FCSEL_d $dst,$src1,$src2,$cmp" %}
6572   ins_encode %{
6573     __ fcsel_d($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
6574   %}
6575   ins_pipe(int_conditional_float_move);
6576 %}
6577 
6578 instruct cmovFZ_reg(cmpOp0 cmp, flagsReg_EQNELTGE icc, regF dst, regF src1, regF src2) %{
6579   match(Set dst (CMoveF (Binary cmp icc) (Binary src2 src1)));
6580   ins_cost(150);
6581   size(4);
6582   format %{ "FCSEL_s $dst,$src1,$src2,$cmp" %}
6583   ins_encode %{
6584     __ fcsel_s($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
6585   %}
6586   ins_pipe(int_conditional_float_move);
6587 %}
6588 
6589 instruct cmovDZ_reg(cmpOp0 cmp, flagsReg_EQNELTGE icc, regD dst, regD src1, regD src2) %{
6590   match(Set dst (CMoveD (Binary cmp icc) (Binary src2 src1)));
6591   ins_cost(150);
6592   size(4);
6593   format %{ "FCSEL_d $dst,$src1,$src2,$cmp" %}
6594   ins_encode %{
6595     __ fcsel_d($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
6596   %}
6597   ins_pipe(int_conditional_float_move);
6598 %}
6599 
6600 #else // !AARCH64
6601 
6602 // Conditional move
6603 instruct cmovFP_reg(cmpOpP cmp, flagsRegP pcc, regF dst, regF src) %{
6604   match(Set dst (CMoveF (Binary cmp pcc) (Binary dst src)));
6605   ins_cost(150);
6606   size(4);
6607   format %{ "FCPYS$cmp $dst,$src" %}
6608   ins_encode %{
6609     __ fcpys($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
6610   %}
6611   ins_pipe(int_conditional_float_move);
6612 %}
6613 
6614 instruct cmovFI_reg(cmpOp cmp, flagsReg icc, regF dst, regF src) %{
6615   match(Set dst (CMoveF (Binary cmp icc) (Binary dst src)));
6616   ins_cost(150);
6617 
6618   size(4);
6619   format %{ "FCPYS$cmp $dst,$src" %}
6620   ins_encode %{
6621     __ fcpys($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
6622   %}
6623   ins_pipe(int_conditional_float_move);
6624 %}
6625 
6626 instruct cmovFI_reg_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, regF dst, regF src) %{
6627   match(Set dst (CMoveF (Binary cmp icc) (Binary dst src)));
6628   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq ||
6629             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ||
6630             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt ||
6631             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
6632   ins_cost(150);
6633 
6634   size(4);
6635   format %{ "FCPYS$cmp $dst,$src" %}
6636   ins_encode %{
6637     __ fcpys($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
6638   %}
6639   ins_pipe(int_conditional_float_move);
6640 %}
6641 
6642 instruct cmovFIu_reg(cmpOpU cmp, flagsRegU icc, regF dst, regF src) %{
6643   match(Set dst (CMoveF (Binary cmp icc) (Binary dst src)));
6644   ins_cost(150);
6645 
6646   size(4);
6647   format %{ "FCPYS$cmp $dst,$src" %}
6648   ins_encode %{
6649     __ fcpys($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
6650   %}
6651   ins_pipe(int_conditional_float_move);
6652 %}
6653 
6654 // Conditional move
6655 instruct cmovDP_reg(cmpOpP cmp, flagsRegP pcc, regD dst, regD src) %{
6656   match(Set dst (CMoveD (Binary cmp pcc) (Binary dst src)));
6657   ins_cost(150);
6658   size(4);
6659   format %{ "FCPYD$cmp $dst,$src" %}
6660   ins_encode %{
6661     __ fcpyd($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
6662   %}
6663   ins_pipe(int_conditional_double_move);
6664 %}
6665 
6666 instruct cmovDI_reg(cmpOp cmp, flagsReg icc, regD dst, regD src) %{
6667   match(Set dst (CMoveD (Binary cmp icc) (Binary dst src)));
6668   ins_cost(150);
6669 
6670   size(4);
6671   format %{ "FCPYD$cmp $dst,$src" %}
6672   ins_encode %{
6673     __ fcpyd($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
6674   %}
6675   ins_pipe(int_conditional_double_move);
6676 %}
6677 
6678 instruct cmovDI_reg_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, regD dst, regD src) %{
6679   match(Set dst (CMoveD (Binary cmp icc) (Binary dst src)));
6680   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
6681   ins_cost(150);
6682 
6683   size(4);
6684   format %{ "FCPYD$cmp $dst,$src" %}
6685   ins_encode %{
6686     __ fcpyd($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
6687   %}
6688   ins_pipe(int_conditional_double_move);
6689 %}
6690 
6691 instruct cmovDIu_reg(cmpOpU cmp, flagsRegU icc, regD dst, regD src) %{
6692   match(Set dst (CMoveD (Binary cmp icc) (Binary dst src)));
6693   ins_cost(150);
6694 
6695   size(4);
6696   format %{ "FCPYD$cmp $dst,$src" %}
6697   ins_encode %{
6698     __ fcpyd($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
6699   %}
6700   ins_pipe(int_conditional_double_move);
6701 %}
6702 
6703 // Conditional move
6704 instruct cmovLP_reg(cmpOpP cmp, flagsRegP pcc, iRegL dst, iRegL src) %{
6705   match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src)));
6706   ins_cost(150);
6707 
6708   size(8);
6709   format %{ "MOV$cmp  $dst.lo,$src.lo\t! long\n\t"
6710             "MOV$cmp  $dst.hi,$src.hi" %}
6711   ins_encode %{
6712     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
6713     __ mov($dst$$Register->successor(), $src$$Register->successor(), (AsmCondition)($cmp$$cmpcode));
6714   %}
6715   ins_pipe(ialu_reg);
6716 %}
6717 
6718 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
6719 // (hi($con$$constant), lo($con$$constant)) becomes
6720 instruct cmovLP_immRot(cmpOpP cmp, flagsRegP pcc, iRegL dst, immLlowRot src) %{
6721   match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src)));
6722   ins_cost(140);
6723 
6724   size(8);
6725   format %{ "MOV$cmp  $dst.lo,$src\t! long\n\t"
6726             "MOV$cmp  $dst.hi,0" %}
6727   ins_encode %{
6728     __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
6729     __ mov($dst$$Register->successor(), 0, (AsmCondition)($cmp$$cmpcode));
6730   %}
6731   ins_pipe(ialu_imm);
6732 %}
6733 
6734 instruct cmovLP_imm16(cmpOpP cmp, flagsRegP pcc, iRegL dst, immL16 src) %{
6735   match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src)));
6736   ins_cost(140);
6737 
6738   size(8);
6739   format %{ "MOV$cmp  $dst.lo,$src\t! long\n\t"
6740             "MOV$cmp  $dst.hi,0" %}
6741   ins_encode %{
6742     __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
6743     __ mov($dst$$Register->successor(), 0, (AsmCondition)($cmp$$cmpcode));
6744   %}
6745   ins_pipe(ialu_imm);
6746 %}
6747 
6748 instruct cmovLI_reg(cmpOp cmp, flagsReg icc, iRegL dst, iRegL src) %{
6749   match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
6750   ins_cost(150);
6751 
6752   size(8);
6753   format %{ "MOV$cmp  $dst.lo,$src.lo\t! long\n\t"
6754             "MOV$cmp  $dst.hi,$src.hi" %}
6755   ins_encode %{
6756     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
6757     __ mov($dst$$Register->successor(), $src$$Register->successor(), (AsmCondition)($cmp$$cmpcode));
6758   %}
6759   ins_pipe(ialu_reg);
6760 %}
6761 
6762 instruct cmovLI_reg_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegL dst, iRegL src) %{
6763   match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
6764   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq ||
6765             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ||
6766             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt ||
6767             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
6768   ins_cost(150);
6769 
6770   size(8);
6771   format %{ "MOV$cmp  $dst.lo,$src.lo\t! long\n\t"
6772             "MOV$cmp  $dst.hi,$src.hi" %}
6773   ins_encode %{
6774     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
6775     __ mov($dst$$Register->successor(), $src$$Register->successor(), (AsmCondition)($cmp$$cmpcode));
6776   %}
6777   ins_pipe(ialu_reg);
6778 %}
6779 
6780 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
6781 // (hi($con$$constant), lo($con$$constant)) becomes
6782 instruct cmovLI_immRot(cmpOp cmp, flagsReg icc, iRegL dst, immLlowRot src) %{
6783   match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
6784   ins_cost(140);
6785 
6786   size(8);
6787   format %{ "MOV$cmp  $dst.lo,$src\t! long\n\t"
6788             "MOV$cmp  $dst.hi,0" %}
6789   ins_encode %{
6790     __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
6791     __ mov($dst$$Register->successor(), 0, (AsmCondition)($cmp$$cmpcode));
6792   %}
6793   ins_pipe(ialu_imm);
6794 %}
6795 
6796 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
6797 // (hi($con$$constant), lo($con$$constant)) becomes
6798 instruct cmovLI_immRot_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegL dst, immLlowRot src) %{
6799   match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
6800   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq ||
6801             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ||
6802             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt ||
6803             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
6804   ins_cost(140);
6805 
6806   size(8);
6807   format %{ "MOV$cmp  $dst.lo,$src\t! long\n\t"
6808             "MOV$cmp  $dst.hi,0" %}
6809   ins_encode %{
6810     __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
6811     __ mov($dst$$Register->successor(), 0, (AsmCondition)($cmp$$cmpcode));
6812   %}
6813   ins_pipe(ialu_imm);
6814 %}
6815 
6816 instruct cmovLI_imm16(cmpOp cmp, flagsReg icc, iRegL dst, immL16 src) %{
6817   match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
6818   ins_cost(140);
6819 
6820   size(8);
6821   format %{ "MOV$cmp  $dst.lo,$src\t! long\n\t"
6822             "MOV$cmp  $dst.hi,0" %}
6823   ins_encode %{
6824     __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
6825     __ movw($dst$$Register->successor(), 0, (AsmCondition)($cmp$$cmpcode));
6826   %}
6827   ins_pipe(ialu_imm);
6828 %}
6829 
6830 instruct cmovLI_imm16_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegL dst, immL16 src) %{
6831   match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
6832   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq ||
6833             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ||
6834             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt ||
6835             _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
6836   ins_cost(140);
6837 
6838   size(8);
6839   format %{ "MOV$cmp  $dst.lo,$src\t! long\n\t"
6840             "MOV$cmp  $dst.hi,0" %}
6841   ins_encode %{
6842     __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
6843     __ movw($dst$$Register->successor(), 0, (AsmCondition)($cmp$$cmpcode));
6844   %}
6845   ins_pipe(ialu_imm);
6846 %}
6847 
6848 instruct cmovLIu_reg(cmpOpU cmp, flagsRegU icc, iRegL dst, iRegL src) %{
6849   match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
6850   ins_cost(150);
6851 
6852   size(8);
6853   format %{ "MOV$cmp  $dst.lo,$src.lo\t! long\n\t"
6854             "MOV$cmp  $dst.hi,$src.hi" %}
6855   ins_encode %{
6856     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
6857     __ mov($dst$$Register->successor(), $src$$Register->successor(), (AsmCondition)($cmp$$cmpcode));
6858   %}
6859   ins_pipe(ialu_reg);
6860 %}
6861 #endif // !AARCH64
6862 
6863 
6864 //----------OS and Locking Instructions----------------------------------------
6865 
6866 // This name is KNOWN by the ADLC and cannot be changed.
6867 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
6868 // for this guy.
6869 instruct tlsLoadP(RthreadRegP dst) %{
6870   match(Set dst (ThreadLocal));
6871 
6872   size(0);
6873   ins_cost(0);
6874   format %{ "! TLS is in $dst" %}
6875   ins_encode( /*empty encoding*/ );
6876   ins_pipe(ialu_none);
6877 %}
6878 
6879 instruct checkCastPP( iRegP dst ) %{
6880   match(Set dst (CheckCastPP dst));
6881 
6882   size(0);
6883   format %{ "! checkcastPP of $dst" %}
6884   ins_encode( /*empty encoding*/ );
6885   ins_pipe(empty);
6886 %}
6887 
6888 
6889 instruct castPP( iRegP dst ) %{
6890   match(Set dst (CastPP dst));
6891   format %{ "! castPP of $dst" %}
6892   ins_encode( /*empty encoding*/ );
6893   ins_pipe(empty);
6894 %}
6895 
6896 instruct castII( iRegI dst ) %{
6897   match(Set dst (CastII dst));
6898   format %{ "! castII of $dst" %}
6899   ins_encode( /*empty encoding*/ );
6900   ins_cost(0);
6901   ins_pipe(empty);
6902 %}
6903 
6904 //----------Arithmetic Instructions--------------------------------------------
6905 // Addition Instructions
6906 // Register Addition
6907 instruct addI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
6908   match(Set dst (AddI src1 src2));
6909 
6910   size(4);
6911   format %{ "add_32 $dst,$src1,$src2\t! int" %}
6912   ins_encode %{
6913     __ add_32($dst$$Register, $src1$$Register, $src2$$Register);
6914   %}
6915   ins_pipe(ialu_reg_reg);
6916 %}
6917 
6918 #ifndef AARCH64
6919 instruct addshlI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
6920   match(Set dst (AddI (LShiftI src1 src2) src3));
6921 
6922   size(4);
6923   format %{ "add_32 $dst,$src3,$src1<<$src2\t! int" %}
6924   ins_encode %{
6925     __ add_32($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, lsl, $src2$$Register));
6926   %}
6927   ins_pipe(ialu_reg_reg);
6928 %}
6929 #endif
6930 
6931 #ifdef AARCH64
6932 #ifdef TODO
6933 instruct addshlL_reg_imm_reg(iRegL dst, iRegL src1, immU6 src2, iRegL src3) %{
6934   match(Set dst (AddL (LShiftL src1 src2) src3));
6935 
6936   size(4);
6937   format %{ "ADD    $dst,$src3,$src1<<$src2\t! long" %}
6938   ins_encode %{
6939     __ add($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, lsl, $src2$$constant));
6940   %}
6941   ins_pipe(ialu_reg_reg);
6942 %}
6943 #endif
6944 #endif
6945 
6946 instruct addshlI_reg_imm_reg(iRegI dst, iRegI src1, immU5 src2, iRegI src3) %{
6947   match(Set dst (AddI (LShiftI src1 src2) src3));
6948 
6949   size(4);
6950   format %{ "add_32 $dst,$src3,$src1<<$src2\t! int" %}
6951   ins_encode %{
6952     __ add_32($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, lsl, $src2$$constant));
6953   %}
6954   ins_pipe(ialu_reg_reg);
6955 %}
6956 
6957 #ifndef AARCH64
6958 instruct addsarI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
6959   match(Set dst (AddI (RShiftI src1 src2) src3));
6960 
6961   size(4);
6962   format %{ "add_32 $dst,$src3,$src1>>$src2\t! int" %}
6963   ins_encode %{
6964     __ add_32($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, asr, $src2$$Register));
6965   %}
6966   ins_pipe(ialu_reg_reg);
6967 %}
6968 #endif
6969 
6970 instruct addsarI_reg_imm_reg(iRegI dst, iRegI src1, immU5 src2, iRegI src3) %{
6971   match(Set dst (AddI (RShiftI src1 src2) src3));
6972 
6973   size(4);
6974   format %{ "add_32 $dst,$src3,$src1>>$src2\t! int" %}
6975   ins_encode %{
6976     __ add_32($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, asr, $src2$$constant));
6977   %}
6978   ins_pipe(ialu_reg_reg);
6979 %}
6980 
6981 #ifndef AARCH64
6982 instruct addshrI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
6983   match(Set dst (AddI (URShiftI src1 src2) src3));
6984 
6985   size(4);
6986   format %{ "add_32 $dst,$src3,$src1>>>$src2\t! int" %}
6987   ins_encode %{
6988     __ add_32($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, lsr, $src2$$Register));
6989   %}
6990   ins_pipe(ialu_reg_reg);
6991 %}
6992 #endif
6993 
6994 instruct addshrI_reg_imm_reg(iRegI dst, iRegI src1, immU5 src2, iRegI src3) %{
6995   match(Set dst (AddI (URShiftI src1 src2) src3));
6996 
6997   size(4);
6998   format %{ "add_32 $dst,$src3,$src1>>>$src2\t! int" %}
6999   ins_encode %{
7000     __ add_32($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, lsr, $src2$$constant));
7001   %}
7002   ins_pipe(ialu_reg_reg);
7003 %}
7004 
7005 // Immediate Addition
7006 instruct addI_reg_aimmI(iRegI dst, iRegI src1, aimmI src2) %{
7007   match(Set dst (AddI src1 src2));
7008 
7009   size(4);
7010   format %{ "add_32 $dst,$src1,$src2\t! int" %}
7011   ins_encode %{
7012     __ add_32($dst$$Register, $src1$$Register, $src2$$constant);
7013   %}
7014   ins_pipe(ialu_reg_imm);
7015 %}
7016 
7017 // Pointer Register Addition
7018 instruct addP_reg_reg(iRegP dst, iRegP src1, iRegX src2) %{
7019   match(Set dst (AddP src1 src2));
7020 
7021   size(4);
7022   format %{ "ADD    $dst,$src1,$src2\t! ptr" %}
7023   ins_encode %{
7024     __ add($dst$$Register, $src1$$Register, $src2$$Register);
7025   %}
7026   ins_pipe(ialu_reg_reg);
7027 %}
7028 
7029 #ifdef AARCH64
7030 // unshifted I2L operand
7031 operand unshiftedI2L(iRegI src2) %{
7032 //constraint(ALLOC_IN_RC(sp_ptr_reg));
7033   match(ConvI2L src2);
7034 
7035   op_cost(1);
7036   format %{ "$src2.w" %}
7037   interface(MEMORY_INTER) %{
7038     base($src2);
7039     index(0xff);
7040     scale(0x0);
7041     disp(0x0);
7042   %}
7043 %}
7044 
7045 // shifted I2L operand
7046 operand shiftedI2L(iRegI src2, immI_0_4 src3) %{
7047 //constraint(ALLOC_IN_RC(sp_ptr_reg));
7048   match(LShiftX (ConvI2L src2) src3);
7049 
7050   op_cost(1);
7051   format %{ "$src2.w << $src3" %}
7052   interface(MEMORY_INTER) %{
7053     base($src2);
7054     index(0xff);
7055     scale($src3);
7056     disp(0x0);
7057   %}
7058 %}
7059 
7060 opclass shiftedRegI(shiftedI2L, unshiftedI2L);
7061 
7062 instruct shlL_reg_regI(iRegL dst, iRegI src1, immU6 src2) %{
7063   match(Set dst (LShiftL (ConvI2L src1) src2));
7064 
7065   size(4);
7066   format %{ "LSL    $dst,$src1.w,$src2\t! ptr" %}
7067   ins_encode %{
7068     int c = $src2$$constant;
7069     int r = 64 - c;
7070     int s = 31;
7071     if (s >= r) {
7072       s = r - 1;
7073     }
7074     __ sbfm($dst$$Register, $src1$$Register, r, s);
7075   %}
7076   ins_pipe(ialu_reg_reg);
7077 %}
7078 
7079 instruct addP_reg_regI(iRegP dst, iRegP src1, shiftedRegI src2) %{
7080   match(Set dst (AddP src1 src2));
7081 
7082   ins_cost(DEFAULT_COST * 3/2);
7083   size(4);
7084   format %{ "ADD    $dst,$src1,$src2, sxtw\t! ptr" %}
7085   ins_encode %{
7086     Register base = reg_to_register_object($src2$$base);
7087     __ add($dst$$Register, $src1$$Register, base, ex_sxtw, $src2$$scale);
7088   %}
7089   ins_pipe(ialu_reg_reg);
7090 %}
7091 #endif
7092 
7093 // shifted iRegX operand
7094 operand shiftedX(iRegX src2, shimmX src3) %{
7095 //constraint(ALLOC_IN_RC(sp_ptr_reg));
7096   match(LShiftX src2 src3);
7097 
7098   op_cost(1);
7099   format %{ "$src2 << $src3" %}
7100   interface(MEMORY_INTER) %{
7101     base($src2);
7102     index(0xff);
7103     scale($src3);
7104     disp(0x0);
7105   %}
7106 %}
7107 
7108 instruct addshlP_reg_reg_imm(iRegP dst, iRegP src1, shiftedX src2) %{
7109   match(Set dst (AddP src1 src2));
7110 
7111   ins_cost(DEFAULT_COST * 3/2);
7112   size(4);
7113   format %{ "ADD    $dst,$src1,$src2\t! ptr" %}
7114   ins_encode %{
7115     Register base = reg_to_register_object($src2$$base);
7116     __ add($dst$$Register, $src1$$Register, AsmOperand(base, lsl, $src2$$scale));
7117   %}
7118   ins_pipe(ialu_reg_reg);
7119 %}
7120 
7121 // Pointer Immediate Addition
7122 instruct addP_reg_aimmX(iRegP dst, iRegP src1, aimmX src2) %{
7123   match(Set dst (AddP src1 src2));
7124 
7125   size(4);
7126   format %{ "ADD    $dst,$src1,$src2\t! ptr" %}
7127   ins_encode %{
7128     __ add($dst$$Register, $src1$$Register, $src2$$constant);
7129   %}
7130   ins_pipe(ialu_reg_imm);
7131 %}
7132 
7133 // Long Addition
7134 #ifdef AARCH64
7135 instruct addL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
7136   match(Set dst (AddL src1 src2));
7137   size(4);
7138   format %{ "ADD     $dst,$src1,$src2\t! long" %}
7139   ins_encode %{
7140     __ add($dst$$Register, $src1$$Register, $src2$$Register);
7141   %}
7142   ins_pipe(ialu_reg_reg);
7143 %}
7144 
7145 instruct addL_reg_regI(iRegL dst, iRegL src1, shiftedRegI src2) %{
7146   match(Set dst (AddL src1 src2));
7147 
7148   ins_cost(DEFAULT_COST * 3/2);
7149   size(4);
7150   format %{ "ADD    $dst,$src1,$src2, sxtw\t! long" %}
7151   ins_encode %{
7152     Register base = reg_to_register_object($src2$$base);
7153     __ add($dst$$Register, $src1$$Register, base, ex_sxtw, $src2$$scale);
7154   %}
7155   ins_pipe(ialu_reg_reg);
7156 %}
7157 #else
7158 instruct addL_reg_reg(iRegL dst, iRegL src1, iRegL src2, flagsReg ccr) %{
7159   match(Set dst (AddL src1 src2));
7160   effect(KILL ccr);
7161   size(8);
7162   format %{ "ADDS    $dst.lo,$src1.lo,$src2.lo\t! long\n\t"
7163             "ADC     $dst.hi,$src1.hi,$src2.hi" %}
7164   ins_encode %{
7165     __ adds($dst$$Register, $src1$$Register, $src2$$Register);
7166     __ adc($dst$$Register->successor(), $src1$$Register->successor(), $src2$$Register->successor());
7167   %}
7168   ins_pipe(ialu_reg_reg);
7169 %}
7170 #endif
7171 
7172 #ifdef AARCH64
7173 // Immediate Addition
7174 instruct addL_reg_aimm(iRegL dst, iRegL src1, aimmL src2) %{
7175   match(Set dst (AddL src1 src2));
7176 
7177   size(4);
7178   format %{ "ADD    $dst,$src1,$src2\t! long" %}
7179   ins_encode %{
7180     __ add($dst$$Register, $src1$$Register, $src2$$constant);
7181   %}
7182   ins_pipe(ialu_reg_imm);
7183 %}
7184 
7185 instruct addL_reg_immLneg(iRegL dst, iRegL src1, aimmLneg src2) %{
7186   match(Set dst (SubL src1 src2));
7187 
7188   size(4);
7189   format %{ "ADD    $dst,$src1,-($src2)\t! long" %}
7190   ins_encode %{
7191     __ add($dst$$Register, $src1$$Register, -$src2$$constant);
7192   %}
7193   ins_pipe(ialu_reg_imm);
7194 %}
7195 #else
7196 // TODO
7197 #endif
7198 
7199 #ifndef AARCH64
7200 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
7201 // (hi($con$$constant), lo($con$$constant)) becomes
7202 instruct addL_reg_immRot(iRegL dst, iRegL src1, immLlowRot con, flagsReg ccr) %{
7203   match(Set dst (AddL src1 con));
7204   effect(KILL ccr);
7205   size(8);
7206   format %{ "ADDS    $dst.lo,$src1.lo,$con\t! long\n\t"
7207             "ADC     $dst.hi,$src1.hi,0" %}
7208   ins_encode %{
7209     __ adds($dst$$Register, $src1$$Register, $con$$constant);
7210     __ adc($dst$$Register->successor(), $src1$$Register->successor(), 0);
7211   %}
7212   ins_pipe(ialu_reg_imm);
7213 %}
7214 #endif
7215 
7216 //----------Conditional_store--------------------------------------------------
7217 // Conditional-store of the updated heap-top.
7218 // Used during allocation of the shared heap.
7219 // Sets flags (EQ) on success.
7220 
7221 // TODO: optimize out barriers with AArch64 load-acquire/store-release
7222 // LoadP-locked.
7223 instruct loadPLocked(iRegP dst, memoryex mem) %{
7224   match(Set dst (LoadPLocked mem));
7225   size(4);
7226   format %{ "LDREX  $dst,$mem" %}
7227   ins_encode %{
7228 #ifdef AARCH64
7229     Register base = reg_to_register_object($mem$$base);
7230     __ ldxr($dst$$Register, base);
7231 #else
7232     __ ldrex($dst$$Register,$mem$$Address);
7233 #endif
7234   %}
7235   ins_pipe(iload_mem);
7236 %}
7237 
7238 instruct storePConditional( memoryex heap_top_ptr, iRegP oldval, iRegP newval, iRegI tmp, flagsRegP pcc ) %{
7239   predicate(_kids[1]->_kids[0]->_leaf->Opcode() == Op_LoadPLocked); // only works in conjunction with a LoadPLocked node
7240   match(Set pcc (StorePConditional heap_top_ptr (Binary oldval newval)));
7241   effect( TEMP tmp );
7242   size(8);
7243   format %{ "STREX  $tmp,$newval,$heap_top_ptr\n\t"
7244             "CMP    $tmp, 0" %}
7245   ins_encode %{
7246 #ifdef AARCH64
7247     Register base = reg_to_register_object($heap_top_ptr$$base);
7248     __ stxr($tmp$$Register, $newval$$Register, base);
7249 #else
7250     __ strex($tmp$$Register, $newval$$Register, $heap_top_ptr$$Address);
7251 #endif
7252     __ cmp($tmp$$Register, 0);
7253   %}
7254   ins_pipe( long_memory_op );
7255 %}
7256 
7257 // Conditional-store of an intx value.
7258 instruct storeXConditional( memoryex mem, iRegX oldval, iRegX newval, iRegX tmp, flagsReg icc ) %{
7259 #ifdef AARCH64
7260   match(Set icc (StoreLConditional mem (Binary oldval newval)));
7261   effect( TEMP tmp );
7262   size(28);
7263   format %{ "loop:\n\t"
7264             "LDXR     $tmp, $mem\t! If $oldval==[$mem] Then store $newval into [$mem], DOESN'T set $newval=[$mem] in any case\n\t"
7265             "SUBS     $tmp, $tmp, $oldval\n\t"
7266             "B.ne     done\n\t"
7267             "STXR     $tmp, $newval, $mem\n\t"
7268             "CBNZ_w   $tmp, loop\n\t"
7269             "CMP      $tmp, 0\n\t"
7270             "done:\n\t"
7271             "membar   LoadStore|LoadLoad" %}
7272 #else
7273   match(Set icc (StoreIConditional mem (Binary oldval newval)));
7274   effect( TEMP tmp );
7275   size(28);
7276   format %{ "loop: \n\t"
7277             "LDREX    $tmp, $mem\t! If $oldval==[$mem] Then store $newval into [$mem], DOESN'T set $newval=[$mem] in any case\n\t"
7278             "XORS     $tmp,$tmp, $oldval\n\t"
7279             "STREX.eq $tmp, $newval, $mem\n\t"
7280             "CMP.eq   $tmp, 1 \n\t"
7281             "B.eq     loop \n\t"
7282             "TEQ      $tmp, 0\n\t"
7283             "membar   LoadStore|LoadLoad" %}
7284 #endif
7285   ins_encode %{
7286     Label loop;
7287     __ bind(loop);
7288 #ifdef AARCH64
7289 // FIXME: use load-acquire/store-release, remove membar?
7290     Label done;
7291     Register base = reg_to_register_object($mem$$base);
7292     __ ldxr($tmp$$Register, base);
7293     __ subs($tmp$$Register, $tmp$$Register, $oldval$$Register);
7294     __ b(done, ne);
7295     __ stxr($tmp$$Register, $newval$$Register, base);
7296     __ cbnz_w($tmp$$Register, loop);
7297     __ cmp($tmp$$Register, 0);
7298     __ bind(done);
7299 #else
7300     __ ldrex($tmp$$Register, $mem$$Address);
7301     __ eors($tmp$$Register, $tmp$$Register, $oldval$$Register);
7302     __ strex($tmp$$Register, $newval$$Register, $mem$$Address, eq);
7303     __ cmp($tmp$$Register, 1, eq);
7304     __ b(loop, eq);
7305     __ teq($tmp$$Register, 0);
7306 #endif
7307     // used by biased locking only. Requires a membar.
7308     __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadStore | MacroAssembler::LoadLoad), noreg);
7309   %}
7310   ins_pipe( long_memory_op );
7311 %}
7312 
7313 // No flag versions for CompareAndSwap{P,I,L} because matcher can't match them
7314 
7315 #ifdef AARCH64
7316 // TODO: if combined with membar, elide membar and use
7317 // load-acquire/store-release if appropriate
7318 instruct compareAndSwapL_bool(memoryex mem, iRegL oldval, iRegL newval, iRegI res, iRegI tmp, flagsReg ccr) %{
7319   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
7320   effect( KILL ccr, TEMP tmp);
7321   size(24);
7322   format %{ "loop:\n\t"
7323             "LDXR     $tmp, $mem\t! If $oldval==[$mem] Then store $newval into [$mem]\n\t"
7324             "CMP      $tmp, $oldval\n\t"
7325             "B.ne     done\n\t"
7326             "STXR     $tmp, $newval, $mem\n\t"
7327             "CBNZ_w   $tmp, loop\n\t"
7328             "done:\n\t"
7329             "CSET_w   $res, eq" %}
7330   ins_encode %{
7331     Register base = reg_to_register_object($mem$$base);
7332     Label loop, done;
7333     __ bind(loop);
7334     __ ldxr($tmp$$Register, base);
7335     __ cmp($tmp$$Register, $oldval$$Register);
7336     __ b(done, ne);
7337     __ stxr($tmp$$Register, $newval$$Register, base);
7338     __ cbnz_w($tmp$$Register, loop);
7339     __ bind(done);
7340     __ cset_w($res$$Register, eq);
7341   %}
7342   ins_pipe( long_memory_op );
7343 %}
7344 
7345 instruct compareAndSwapI_bool(memoryex mem, iRegI oldval, iRegI newval, iRegI res, iRegI tmp, flagsReg ccr) %{
7346   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
7347   effect( KILL ccr, TEMP tmp);
7348   size(24);
7349   format %{ "loop:\n\t"
7350             "LDXR_w   $tmp, $mem\t! If $oldval==[$mem] Then store $newval into [$mem]\n\t"
7351             "CMP_w    $tmp, $oldval\n\t"
7352             "B.ne     done\n\t"
7353             "STXR_w   $tmp, $newval, $mem\n\t"
7354             "CBNZ_w   $tmp, loop\n\t"
7355             "done:\n\t"
7356             "CSET_w   $res, eq" %}
7357   ins_encode %{
7358     Register base = reg_to_register_object($mem$$base);
7359     Label loop, done;
7360     __ bind(loop);
7361     __ ldxr_w($tmp$$Register, base);
7362     __ cmp_w($tmp$$Register, $oldval$$Register);
7363     __ b(done, ne);
7364     __ stxr_w($tmp$$Register, $newval$$Register,  base);
7365     __ cbnz_w($tmp$$Register, loop);
7366     __ bind(done);
7367     __ cset_w($res$$Register, eq);
7368   %}
7369   ins_pipe( long_memory_op );
7370 %}
7371 
7372 // tmp must use iRegI instead of iRegN until 8051805 is fixed.
7373 instruct compareAndSwapN_bool(memoryex mem, iRegN oldval, iRegN newval, iRegI res, iRegI tmp, flagsReg ccr) %{
7374   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
7375   effect( KILL ccr, TEMP tmp);
7376   size(24);
7377   format %{ "loop:\n\t"
7378             "LDXR_w   $tmp, $mem\t! If $oldval==[$mem] Then store $newval into [$mem]\n\t"
7379             "CMP_w    $tmp, $oldval\n\t"
7380             "B.ne     done\n\t"
7381             "STXR_w   $tmp, $newval, $mem\n\t"
7382             "CBNZ_w   $tmp, loop\n\t"
7383             "done:\n\t"
7384             "CSET_w   $res, eq" %}
7385   ins_encode %{
7386     Register base = reg_to_register_object($mem$$base);
7387     Label loop, done;
7388     __ bind(loop);
7389     __ ldxr_w($tmp$$Register, base);
7390     __ cmp_w($tmp$$Register, $oldval$$Register);
7391     __ b(done, ne);
7392     __ stxr_w($tmp$$Register, $newval$$Register,  base);
7393     __ cbnz_w($tmp$$Register, loop);
7394     __ bind(done);
7395     __ cset_w($res$$Register, eq);
7396   %}
7397   ins_pipe( long_memory_op );
7398 %}
7399 
7400 instruct compareAndSwapP_bool(memoryex mem, iRegP oldval, iRegP newval, iRegI res, iRegI tmp, flagsReg ccr) %{
7401   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
7402   effect( KILL ccr, TEMP tmp);
7403   size(24);
7404   format %{ "loop:\n\t"
7405             "LDXR     $tmp, $mem\t! If $oldval==[$mem] Then store $newval into [$mem]\n\t"
7406             "CMP      $tmp, $oldval\n\t"
7407             "B.ne     done\n\t"
7408             "STXR     $tmp, $newval, $mem\n\t"
7409             "CBNZ_w   $tmp, loop\n\t"
7410             "done:\n\t"
7411             "CSET_w   $res, eq" %}
7412   ins_encode %{
7413     Register base = reg_to_register_object($mem$$base);
7414     Label loop, done;
7415     __ bind(loop);
7416     __ ldxr($tmp$$Register, base);
7417     __ cmp($tmp$$Register, $oldval$$Register);
7418     __ b(done, ne);
7419     __ stxr($tmp$$Register, $newval$$Register,  base);
7420     __ cbnz_w($tmp$$Register, loop);
7421     __ bind(done);
7422     __ cset_w($res$$Register, eq);
7423   %}
7424   ins_pipe( long_memory_op );
7425 %}
7426 #else // !AARCH64
7427 instruct compareAndSwapL_bool(memoryex mem, iRegL oldval, iRegLd newval, iRegI res, iRegLd tmp, flagsReg ccr ) %{
7428   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
7429   effect( KILL ccr, TEMP tmp);
7430   size(32);
7431   format %{ "loop: \n\t"
7432             "LDREXD   $tmp, $mem\t! If $oldval==[$mem] Then store $newval into [$mem]\n\t"
7433             "CMP      $tmp.lo, $oldval.lo\n\t"
7434             "CMP.eq   $tmp.hi, $oldval.hi\n\t"
7435             "STREXD.eq $tmp, $newval, $mem\n\t"
7436             "MOV.ne   $tmp, 0 \n\t"
7437             "XORS.eq  $tmp,$tmp, 1 \n\t"
7438             "B.eq     loop \n\t"
7439             "MOV      $res, $tmp" %}
7440   ins_encode %{
7441     Label loop;
7442     __ bind(loop);
7443     __ ldrexd($tmp$$Register, $mem$$Address);
7444     __ cmp($tmp$$Register, $oldval$$Register);
7445     __ cmp($tmp$$Register->successor(), $oldval$$Register->successor(), eq);
7446     __ strexd($tmp$$Register, $newval$$Register, $mem$$Address, eq);
7447     __ mov($tmp$$Register, 0, ne);
7448     __ eors($tmp$$Register, $tmp$$Register, 1, eq);
7449     __ b(loop, eq);
7450     __ mov($res$$Register, $tmp$$Register);
7451   %}
7452   ins_pipe( long_memory_op );
7453 %}
7454 
7455 
7456 instruct compareAndSwapI_bool(memoryex mem, iRegI oldval, iRegI newval, iRegI res, iRegI tmp, flagsReg ccr ) %{
7457   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
7458   effect( KILL ccr, TEMP tmp);
7459   size(28);
7460   format %{ "loop: \n\t"
7461             "LDREX    $tmp, $mem\t! If $oldval==[$mem] Then store $newval into [$mem]\n\t"
7462             "CMP      $tmp, $oldval\n\t"
7463             "STREX.eq $tmp, $newval, $mem\n\t"
7464             "MOV.ne   $tmp, 0 \n\t"
7465             "XORS.eq  $tmp,$tmp, 1 \n\t"
7466             "B.eq     loop \n\t"
7467             "MOV      $res, $tmp" %}
7468 
7469   ins_encode %{
7470     Label loop;
7471     __ bind(loop);
7472     __ ldrex($tmp$$Register,$mem$$Address);
7473     __ cmp($tmp$$Register, $oldval$$Register);
7474     __ strex($tmp$$Register, $newval$$Register, $mem$$Address, eq);
7475     __ mov($tmp$$Register, 0, ne);
7476     __ eors($tmp$$Register, $tmp$$Register, 1, eq);
7477     __ b(loop, eq);
7478     __ mov($res$$Register, $tmp$$Register);
7479   %}
7480   ins_pipe( long_memory_op );
7481 %}
7482 
7483 instruct compareAndSwapP_bool(memoryex mem, iRegP oldval, iRegP newval, iRegI res, iRegI tmp, flagsReg ccr ) %{
7484   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
7485   effect( KILL ccr, TEMP tmp);
7486   size(28);
7487   format %{ "loop: \n\t"
7488             "LDREX    $tmp, $mem\t! If $oldval==[$mem] Then store $newval into [$mem]\n\t"
7489             "CMP      $tmp, $oldval\n\t"
7490             "STREX.eq $tmp, $newval, $mem\n\t"
7491             "MOV.ne   $tmp, 0 \n\t"
7492             "EORS.eq  $tmp,$tmp, 1 \n\t"
7493             "B.eq     loop \n\t"
7494             "MOV      $res, $tmp" %}
7495 
7496   ins_encode %{
7497     Label loop;
7498     __ bind(loop);
7499     __ ldrex($tmp$$Register,$mem$$Address);
7500     __ cmp($tmp$$Register, $oldval$$Register);
7501     __ strex($tmp$$Register, $newval$$Register, $mem$$Address, eq);
7502     __ mov($tmp$$Register, 0, ne);
7503     __ eors($tmp$$Register, $tmp$$Register, 1, eq);
7504     __ b(loop, eq);
7505     __ mov($res$$Register, $tmp$$Register);
7506   %}
7507   ins_pipe( long_memory_op );
7508 %}
7509 #endif // !AARCH64
7510 
7511 #ifdef AARCH64
7512 instruct xaddI_aimmI_no_res(memoryex mem, aimmI add, Universe dummy, iRegI tmp1, iRegI tmp2) %{
7513   predicate(n->as_LoadStore()->result_not_used());
7514   match(Set dummy (GetAndAddI mem add));
7515   effect(TEMP tmp1, TEMP tmp2);
7516   size(16);
7517   format %{ "loop:\n\t"
7518             "LDXR_w   $tmp1, $mem\n\t"
7519             "ADD_w    $tmp1, $tmp1, $add\n\t"
7520             "STXR_w   $tmp2, $tmp1, $mem\n\t"
7521             "CBNZ_w   $tmp2, loop" %}
7522 
7523   ins_encode %{
7524     Label loop;
7525     Register base = reg_to_register_object($mem$$base);
7526     __ bind(loop);
7527     __ ldxr_w($tmp1$$Register, base);
7528     __ add_w($tmp1$$Register, $tmp1$$Register, $add$$constant);
7529     __ stxr_w($tmp2$$Register, $tmp1$$Register, base);
7530     __ cbnz_w($tmp2$$Register, loop);
7531   %}
7532   ins_pipe( long_memory_op );
7533 %}
7534 #else
7535 instruct xaddI_aimmI_no_res(memoryex mem, aimmI add, Universe dummy, iRegI tmp1, iRegI tmp2, flagsReg ccr) %{
7536   predicate(n->as_LoadStore()->result_not_used());
7537   match(Set dummy (GetAndAddI mem add));
7538   effect(KILL ccr, TEMP tmp1, TEMP tmp2);
7539   size(20);
7540   format %{ "loop: \n\t"
7541             "LDREX    $tmp1, $mem\n\t"
7542             "ADD      $tmp1, $tmp1, $add\n\t"
7543             "STREX    $tmp2, $tmp1, $mem\n\t"
7544             "CMP      $tmp2, 0 \n\t"
7545             "B.ne     loop \n\t" %}
7546 
7547   ins_encode %{
7548     Label loop;
7549     __ bind(loop);
7550     __ ldrex($tmp1$$Register,$mem$$Address);
7551     __ add($tmp1$$Register, $tmp1$$Register, $add$$constant);
7552     __ strex($tmp2$$Register, $tmp1$$Register, $mem$$Address);
7553     __ cmp($tmp2$$Register, 0);
7554     __ b(loop, ne);
7555   %}
7556   ins_pipe( long_memory_op );
7557 %}
7558 #endif
7559 
7560 #ifdef AARCH64
7561 instruct xaddI_reg_no_res(memoryex mem, iRegI add, Universe dummy, iRegI tmp1, iRegI tmp2) %{
7562   predicate(n->as_LoadStore()->result_not_used());
7563   match(Set dummy (GetAndAddI mem add));
7564   effect(TEMP tmp1, TEMP tmp2);
7565   size(16);
7566   format %{ "loop:\n\t"
7567             "LDXR_w   $tmp1, $mem\n\t"
7568             "ADD_w    $tmp1, $tmp1, $add\n\t"
7569             "STXR_w   $tmp2, $tmp1, $mem\n\t"
7570             "CBNZ_w   $tmp2, loop" %}
7571 
7572   ins_encode %{
7573     Label loop;
7574     Register base = reg_to_register_object($mem$$base);
7575     __ bind(loop);
7576     __ ldxr_w($tmp1$$Register, base);
7577     __ add_w($tmp1$$Register, $tmp1$$Register, $add$$Register);
7578     __ stxr_w($tmp2$$Register, $tmp1$$Register, base);
7579     __ cbnz_w($tmp2$$Register, loop);
7580   %}
7581   ins_pipe( long_memory_op );
7582 %}
7583 #else
7584 instruct xaddI_reg_no_res(memoryex mem, iRegI add, Universe dummy, iRegI tmp1, iRegI tmp2, flagsReg ccr) %{
7585   predicate(n->as_LoadStore()->result_not_used());
7586   match(Set dummy (GetAndAddI mem add));
7587   effect(KILL ccr, TEMP tmp1, TEMP tmp2);
7588   size(20);
7589   format %{ "loop: \n\t"
7590             "LDREX    $tmp1, $mem\n\t"
7591             "ADD      $tmp1, $tmp1, $add\n\t"
7592             "STREX    $tmp2, $tmp1, $mem\n\t"
7593             "CMP      $tmp2, 0 \n\t"
7594             "B.ne     loop \n\t" %}
7595 
7596   ins_encode %{
7597     Label loop;
7598     __ bind(loop);
7599     __ ldrex($tmp1$$Register,$mem$$Address);
7600     __ add($tmp1$$Register, $tmp1$$Register, $add$$Register);
7601     __ strex($tmp2$$Register, $tmp1$$Register, $mem$$Address);
7602     __ cmp($tmp2$$Register, 0);
7603     __ b(loop, ne);
7604   %}
7605   ins_pipe( long_memory_op );
7606 %}
7607 #endif
7608 
7609 #ifdef AARCH64
7610 instruct xaddI_aimmI(memoryex mem, aimmI add, iRegI res, iRegI tmp1, iRegI tmp2) %{
7611   match(Set res (GetAndAddI mem add));
7612   effect(TEMP tmp1, TEMP tmp2, TEMP res);
7613   size(16);
7614   format %{ "loop:\n\t"
7615             "LDXR_w   $res, $mem\n\t"
7616             "ADD_w    $tmp1, $res, $add\n\t"
7617             "STXR_w   $tmp2, $tmp1, $mem\n\t"
7618             "CBNZ_w   $tmp2, loop" %}
7619 
7620   ins_encode %{
7621     Label loop;
7622     Register base = reg_to_register_object($mem$$base);
7623     __ bind(loop);
7624     __ ldxr_w($res$$Register, base);
7625     __ add_w($tmp1$$Register, $res$$Register, $add$$constant);
7626     __ stxr_w($tmp2$$Register, $tmp1$$Register, base);
7627     __ cbnz_w($tmp2$$Register, loop);
7628   %}
7629   ins_pipe( long_memory_op );
7630 %}
7631 #else
7632 instruct xaddI_aimmI(memoryex mem, aimmI add, iRegI res, iRegI tmp1, iRegI tmp2, flagsReg ccr) %{
7633   match(Set res (GetAndAddI mem add));
7634   effect(KILL ccr, TEMP tmp1, TEMP tmp2, TEMP res);
7635   size(20);
7636   format %{ "loop: \n\t"
7637             "LDREX    $res, $mem\n\t"
7638             "ADD      $tmp1, $res, $add\n\t"
7639             "STREX    $tmp2, $tmp1, $mem\n\t"
7640             "CMP      $tmp2, 0 \n\t"
7641             "B.ne     loop \n\t" %}
7642 
7643   ins_encode %{
7644     Label loop;
7645     __ bind(loop);
7646     __ ldrex($res$$Register,$mem$$Address);
7647     __ add($tmp1$$Register, $res$$Register, $add$$constant);
7648     __ strex($tmp2$$Register, $tmp1$$Register, $mem$$Address);
7649     __ cmp($tmp2$$Register, 0);
7650     __ b(loop, ne);
7651   %}
7652   ins_pipe( long_memory_op );
7653 %}
7654 #endif
7655 
7656 #ifdef AARCH64
7657 instruct xaddI_reg(memoryex mem, iRegI add, iRegI res, iRegI tmp1, iRegI tmp2) %{
7658   match(Set res (GetAndAddI mem add));
7659   effect(TEMP tmp1, TEMP tmp2, TEMP res);
7660   size(16);
7661   format %{ "loop:\n\t"
7662             "LDXR_w   $res, $mem\n\t"
7663             "ADD_w    $tmp1, $res, $add\n\t"
7664             "STXR_w   $tmp2, $tmp1, $mem\n\t"
7665             "CBNZ_w   $tmp2, loop" %}
7666 
7667   ins_encode %{
7668     Label loop;
7669     Register base = reg_to_register_object($mem$$base);
7670     __ bind(loop);
7671     __ ldxr_w($res$$Register, base);
7672     __ add_w($tmp1$$Register, $res$$Register, $add$$Register);
7673     __ stxr_w($tmp2$$Register, $tmp1$$Register, base);
7674     __ cbnz_w($tmp2$$Register, loop);
7675   %}
7676   ins_pipe( long_memory_op );
7677 %}
7678 #else
7679 instruct xaddI_reg(memoryex mem, iRegI add, iRegI res, iRegI tmp1, iRegI tmp2, flagsReg ccr) %{
7680   match(Set res (GetAndAddI mem add));
7681   effect(KILL ccr, TEMP tmp1, TEMP tmp2, TEMP res);
7682   size(20);
7683   format %{ "loop: \n\t"
7684             "LDREX    $res, $mem\n\t"
7685             "ADD      $tmp1, $res, $add\n\t"
7686             "STREX    $tmp2, $tmp1, $mem\n\t"
7687             "CMP      $tmp2, 0 \n\t"
7688             "B.ne     loop \n\t" %}
7689 
7690   ins_encode %{
7691     Label loop;
7692     __ bind(loop);
7693     __ ldrex($res$$Register,$mem$$Address);
7694     __ add($tmp1$$Register, $res$$Register, $add$$Register);
7695     __ strex($tmp2$$Register, $tmp1$$Register, $mem$$Address);
7696     __ cmp($tmp2$$Register, 0);
7697     __ b(loop, ne);
7698   %}
7699   ins_pipe( long_memory_op );
7700 %}
7701 #endif
7702 
7703 #ifdef AARCH64
7704 instruct xaddL_reg_no_res(memoryex mem, iRegL add, Universe dummy, iRegL tmp1, iRegI tmp2) %{
7705   predicate(n->as_LoadStore()->result_not_used());
7706   match(Set dummy (GetAndAddL mem add));
7707   effect(TEMP tmp1, TEMP tmp2);
7708   size(16);
7709   format %{ "loop:\n\t"
7710             "LDXR     $tmp1, $mem\n\t"
7711             "ADD      $tmp1, $tmp1, $add\n\t"
7712             "STXR     $tmp2, $tmp1, $mem\n\t"
7713             "CBNZ_w   $tmp2, loop" %}
7714 
7715   ins_encode %{
7716     Label loop;
7717     Register base = reg_to_register_object($mem$$base);
7718     __ bind(loop);
7719     __ ldxr($tmp1$$Register, base);
7720     __ add($tmp1$$Register, $tmp1$$Register, $add$$Register);
7721     __ stxr($tmp2$$Register, $tmp1$$Register, base);
7722     __ cbnz_w($tmp2$$Register, loop);
7723   %}
7724   ins_pipe( long_memory_op );
7725 %}
7726 #else
7727 instruct xaddL_reg_no_res(memoryex mem, iRegL add, Universe dummy, iRegLd tmp1, iRegI tmp2, flagsReg ccr) %{
7728   predicate(n->as_LoadStore()->result_not_used());
7729   match(Set dummy (GetAndAddL mem add));
7730   effect( KILL ccr, TEMP tmp1, TEMP tmp2);
7731   size(24);
7732   format %{ "loop: \n\t"
7733             "LDREXD   $tmp1, $mem\n\t"
7734             "ADDS     $tmp1.lo, $tmp1.lo, $add.lo\n\t"
7735             "ADC      $tmp1.hi, $tmp1.hi, $add.hi\n\t"
7736             "STREXD   $tmp2, $tmp1, $mem\n\t"
7737             "CMP      $tmp2, 0 \n\t"
7738             "B.ne     loop \n\t" %}
7739 
7740   ins_encode %{
7741     Label loop;
7742     __ bind(loop);
7743     __ ldrexd($tmp1$$Register, $mem$$Address);
7744     __ adds($tmp1$$Register, $tmp1$$Register, $add$$Register);
7745     __ adc($tmp1$$Register->successor(), $tmp1$$Register->successor(), $add$$Register->successor());
7746     __ strexd($tmp2$$Register, $tmp1$$Register, $mem$$Address);
7747     __ cmp($tmp2$$Register, 0);
7748     __ b(loop, ne);
7749   %}
7750   ins_pipe( long_memory_op );
7751 %}
7752 #endif
7753 
7754 #ifdef AARCH64
7755 instruct xaddL_imm_no_res(memoryex mem, aimmL add, Universe dummy, iRegL tmp1, iRegI tmp2) %{
7756   predicate(n->as_LoadStore()->result_not_used());
7757   match(Set dummy (GetAndAddL mem add));
7758   effect(TEMP tmp1, TEMP tmp2);
7759   size(16);
7760   format %{ "loop:\n\t"
7761             "LDXR     $tmp1, $mem\n\t"
7762             "ADD      $tmp1, $tmp1, $add\n\t"
7763             "STXR     $tmp2, $tmp1, $mem\n\t"
7764             "CBNZ_w   $tmp2, loop" %}
7765 
7766   ins_encode %{
7767     Label loop;
7768     Register base = reg_to_register_object($mem$$base);
7769     __ bind(loop);
7770     __ ldxr($tmp1$$Register, base);
7771     __ add($tmp1$$Register, $tmp1$$Register, $add$$constant);
7772     __ stxr($tmp2$$Register, $tmp1$$Register, base);
7773     __ cbnz_w($tmp2$$Register, loop);
7774   %}
7775   ins_pipe( long_memory_op );
7776 %}
7777 #else
7778 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
7779 // (hi($con$$constant), lo($con$$constant)) becomes
7780 instruct xaddL_immRot_no_res(memoryex mem, immLlowRot add, Universe dummy, iRegLd tmp1, iRegI tmp2, flagsReg ccr) %{
7781   predicate(n->as_LoadStore()->result_not_used());
7782   match(Set dummy (GetAndAddL mem add));
7783   effect( KILL ccr, TEMP tmp1, TEMP tmp2);
7784   size(24);
7785   format %{ "loop: \n\t"
7786             "LDREXD   $tmp1, $mem\n\t"
7787             "ADDS     $tmp1.lo, $tmp1.lo, $add\n\t"
7788             "ADC      $tmp1.hi, $tmp1.hi, 0\n\t"
7789             "STREXD   $tmp2, $tmp1, $mem\n\t"
7790             "CMP      $tmp2, 0 \n\t"
7791             "B.ne     loop \n\t" %}
7792 
7793   ins_encode %{
7794     Label loop;
7795     __ bind(loop);
7796     __ ldrexd($tmp1$$Register, $mem$$Address);
7797     __ adds($tmp1$$Register, $tmp1$$Register, $add$$constant);
7798     __ adc($tmp1$$Register->successor(), $tmp1$$Register->successor(), 0);
7799     __ strexd($tmp2$$Register, $tmp1$$Register, $mem$$Address);
7800     __ cmp($tmp2$$Register, 0);
7801     __ b(loop, ne);
7802   %}
7803   ins_pipe( long_memory_op );
7804 %}
7805 #endif
7806 
7807 #ifdef AARCH64
7808 instruct xaddL_reg(memoryex mem, iRegL add, iRegL res, iRegL tmp1, iRegI tmp2) %{
7809   match(Set res (GetAndAddL mem add));
7810   effect(TEMP tmp1, TEMP tmp2, TEMP res);
7811   size(16);
7812   format %{ "loop:\n\t"
7813             "LDXR     $res, $mem\n\t"
7814             "ADD      $tmp1, $res, $add\n\t"
7815             "STXR     $tmp2, $tmp1, $mem\n\t"
7816             "CBNZ_w   $tmp2, loop" %}
7817 
7818   ins_encode %{
7819     Label loop;
7820     Register base = reg_to_register_object($mem$$base);
7821     __ bind(loop);
7822     __ ldxr($res$$Register, base);
7823     __ add($tmp1$$Register, $res$$Register, $add$$Register);
7824     __ stxr($tmp2$$Register, $tmp1$$Register, base);
7825     __ cbnz_w($tmp2$$Register, loop);
7826   %}
7827   ins_pipe( long_memory_op );
7828 %}
7829 #else
7830 instruct xaddL_reg(memoryex mem, iRegL add, iRegLd res, iRegLd tmp1, iRegI tmp2, flagsReg ccr) %{
7831   match(Set res (GetAndAddL mem add));
7832   effect( KILL ccr, TEMP tmp1, TEMP tmp2, TEMP res);
7833   size(24);
7834   format %{ "loop: \n\t"
7835             "LDREXD   $res, $mem\n\t"
7836             "ADDS     $tmp1.lo, $res.lo, $add.lo\n\t"
7837             "ADC      $tmp1.hi, $res.hi, $add.hi\n\t"
7838             "STREXD   $tmp2, $tmp1, $mem\n\t"
7839             "CMP      $tmp2, 0 \n\t"
7840             "B.ne     loop \n\t" %}
7841 
7842   ins_encode %{
7843     Label loop;
7844     __ bind(loop);
7845     __ ldrexd($res$$Register, $mem$$Address);
7846     __ adds($tmp1$$Register, $res$$Register, $add$$Register);
7847     __ adc($tmp1$$Register->successor(), $res$$Register->successor(), $add$$Register->successor());
7848     __ strexd($tmp2$$Register, $tmp1$$Register, $mem$$Address);
7849     __ cmp($tmp2$$Register, 0);
7850     __ b(loop, ne);
7851   %}
7852   ins_pipe( long_memory_op );
7853 %}
7854 #endif
7855 
7856 #ifdef AARCH64
7857 instruct xaddL_imm(memoryex mem, aimmL add, iRegL res, iRegL tmp1, iRegI tmp2) %{
7858   match(Set res (GetAndAddL mem add));
7859   effect(TEMP tmp1, TEMP tmp2, TEMP res);
7860   size(16);
7861   format %{ "loop:\n\t"
7862             "LDXR     $res, $mem\n\t"
7863             "ADD      $tmp1, $res, $add\n\t"
7864             "STXR     $tmp2, $tmp1, $mem\n\t"
7865             "CBNZ_w   $tmp2, loop" %}
7866 
7867   ins_encode %{
7868     Label loop;
7869     Register base = reg_to_register_object($mem$$base);
7870     __ bind(loop);
7871     __ ldxr($res$$Register, base);
7872     __ add($tmp1$$Register, $res$$Register, $add$$constant);
7873     __ stxr($tmp2$$Register, $tmp1$$Register, base);
7874     __ cbnz_w($tmp2$$Register, loop);
7875   %}
7876   ins_pipe( long_memory_op );
7877 %}
7878 #else
7879 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
7880 // (hi($con$$constant), lo($con$$constant)) becomes
7881 instruct xaddL_immRot(memoryex mem, immLlowRot add, iRegLd res, iRegLd tmp1, iRegI tmp2, flagsReg ccr) %{
7882   match(Set res (GetAndAddL mem add));
7883   effect( KILL ccr, TEMP tmp1, TEMP tmp2, TEMP res);
7884   size(24);
7885   format %{ "loop: \n\t"
7886             "LDREXD   $res, $mem\n\t"
7887             "ADDS     $tmp1.lo, $res.lo, $add\n\t"
7888             "ADC      $tmp1.hi, $res.hi, 0\n\t"
7889             "STREXD   $tmp2, $tmp1, $mem\n\t"
7890             "CMP      $tmp2, 0 \n\t"
7891             "B.ne     loop \n\t" %}
7892 
7893   ins_encode %{
7894     Label loop;
7895     __ bind(loop);
7896     __ ldrexd($res$$Register, $mem$$Address);
7897     __ adds($tmp1$$Register, $res$$Register, $add$$constant);
7898     __ adc($tmp1$$Register->successor(), $res$$Register->successor(), 0);
7899     __ strexd($tmp2$$Register, $tmp1$$Register, $mem$$Address);
7900     __ cmp($tmp2$$Register, 0);
7901     __ b(loop, ne);
7902   %}
7903   ins_pipe( long_memory_op );
7904 %}
7905 #endif
7906 
7907 #ifdef AARCH64
7908 instruct xchgI(memoryex mem, iRegI newval, iRegI res, iRegI tmp) %{
7909   match(Set res (GetAndSetI mem newval));
7910   effect(TEMP tmp, TEMP res);
7911   size(12);
7912   format %{ "loop:\n\t"
7913             "LDXR_w   $res, $mem\n\t"
7914             "STXR_w   $tmp, $newval, $mem\n\t"
7915             "CBNZ_w   $tmp, loop" %}
7916 
7917   ins_encode %{
7918     Label loop;
7919     Register base = reg_to_register_object($mem$$base);
7920     __ bind(loop);
7921     __ ldxr_w($res$$Register, base);
7922     __ stxr_w($tmp$$Register, $newval$$Register, base);
7923     __ cbnz_w($tmp$$Register, loop);
7924   %}
7925   ins_pipe( long_memory_op );
7926 %}
7927 
7928 #ifdef XXX
7929 // Disabled until 8051805 is fixed.
7930 instruct xchgN(memoryex mem, iRegN newval, iRegN res, iRegN tmp) %{
7931   match(Set res (GetAndSetN mem newval));
7932   effect(TEMP tmp, TEMP res);
7933   size(12);
7934   format %{ "loop:\n\t"
7935             "LDXR_w   $res, $mem\n\t"
7936             "STXR_w   $tmp, $newval, $mem\n\t"
7937             "CBNZ_w   $tmp, loop" %}
7938 
7939   ins_encode %{
7940     Label loop;
7941     Register base = reg_to_register_object($mem$$base);
7942     __ bind(loop);
7943     __ ldxr_w($res$$Register, base);
7944     __ stxr_w($tmp$$Register, $newval$$Register, base);
7945     __ cbnz_w($tmp$$Register, loop);
7946   %}
7947   ins_pipe( long_memory_op );
7948 %}
7949 #endif
7950 #else
7951 instruct xchgI(memoryex mem, iRegI newval, iRegI res, iRegI tmp, flagsReg ccr) %{
7952   match(Set res (GetAndSetI mem newval));
7953   effect(KILL ccr, TEMP tmp, TEMP res);
7954   size(16);
7955   format %{ "loop: \n\t"
7956             "LDREX    $res, $mem\n\t"
7957             "STREX    $tmp, $newval, $mem\n\t"
7958             "CMP      $tmp, 0 \n\t"
7959             "B.ne     loop \n\t" %}
7960 
7961   ins_encode %{
7962     Label loop;
7963     __ bind(loop);
7964     __ ldrex($res$$Register,$mem$$Address);
7965     __ strex($tmp$$Register, $newval$$Register, $mem$$Address);
7966     __ cmp($tmp$$Register, 0);
7967     __ b(loop, ne);
7968   %}
7969   ins_pipe( long_memory_op );
7970 %}
7971 #endif
7972 
7973 #ifdef AARCH64
7974 instruct xchgL(memoryex mem, iRegL newval, iRegL res, iRegI tmp) %{
7975   match(Set res (GetAndSetL mem newval));
7976   effect(TEMP tmp, TEMP res);
7977   size(12);
7978   format %{ "loop:\n\t"
7979             "LDXR     $res, $mem\n\t"
7980             "STXR     $tmp, $newval, $mem\n\t"
7981             "CBNZ_w   $tmp, loop" %}
7982 
7983   ins_encode %{
7984     Label loop;
7985     Register base = reg_to_register_object($mem$$base);
7986     __ bind(loop);
7987     __ ldxr($res$$Register, base);
7988     __ stxr($tmp$$Register, $newval$$Register, base);
7989     __ cbnz_w($tmp$$Register, loop);
7990   %}
7991   ins_pipe( long_memory_op );
7992 %}
7993 #else
7994 instruct xchgL(memoryex mem, iRegLd newval, iRegLd res, iRegI tmp, flagsReg ccr) %{
7995   match(Set res (GetAndSetL mem newval));
7996   effect( KILL ccr, TEMP tmp, TEMP res);
7997   size(16);
7998   format %{ "loop: \n\t"
7999             "LDREXD   $res, $mem\n\t"
8000             "STREXD   $tmp, $newval, $mem\n\t"
8001             "CMP      $tmp, 0 \n\t"
8002             "B.ne     loop \n\t" %}
8003 
8004   ins_encode %{
8005     Label loop;
8006     __ bind(loop);
8007     __ ldrexd($res$$Register, $mem$$Address);
8008     __ strexd($tmp$$Register, $newval$$Register, $mem$$Address);
8009     __ cmp($tmp$$Register, 0);
8010     __ b(loop, ne);
8011   %}
8012   ins_pipe( long_memory_op );
8013 %}
8014 #endif // !AARCH64
8015 
8016 #ifdef AARCH64
8017 instruct xchgP(memoryex mem, iRegP newval, iRegP res, iRegI tmp) %{
8018   match(Set res (GetAndSetP mem newval));
8019   effect(TEMP tmp, TEMP res);
8020   size(12);
8021   format %{ "loop:\n\t"
8022             "LDREX    $res, $mem\n\t"
8023             "STREX    $tmp, $newval, $mem\n\t"
8024             "CBNZ_w   $tmp, loop" %}
8025 
8026   ins_encode %{
8027     Label loop;
8028     Register base = reg_to_register_object($mem$$base);
8029     __ bind(loop);
8030     __ ldrex($res$$Register, base);
8031     __ strex($tmp$$Register, $newval$$Register, base);
8032     __ cbnz_w($tmp$$Register, loop);
8033   %}
8034   ins_pipe( long_memory_op );
8035 %}
8036 #else
8037 instruct xchgP(memoryex mem, iRegP newval, iRegP res, iRegI tmp, flagsReg ccr) %{
8038   match(Set res (GetAndSetP mem newval));
8039   effect(KILL ccr, TEMP tmp, TEMP res);
8040   size(16);
8041   format %{ "loop: \n\t"
8042             "LDREX    $res, $mem\n\t"
8043             "STREX    $tmp, $newval, $mem\n\t"
8044             "CMP      $tmp, 0 \n\t"
8045             "B.ne     loop \n\t" %}
8046 
8047   ins_encode %{
8048     Label loop;
8049     __ bind(loop);
8050     __ ldrex($res$$Register,$mem$$Address);
8051     __ strex($tmp$$Register, $newval$$Register, $mem$$Address);
8052     __ cmp($tmp$$Register, 0);
8053     __ b(loop, ne);
8054   %}
8055   ins_pipe( long_memory_op );
8056 %}
8057 #endif // !AARCH64
8058 
8059 //---------------------
8060 // Subtraction Instructions
8061 // Register Subtraction
8062 instruct subI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
8063   match(Set dst (SubI src1 src2));
8064 
8065   size(4);
8066   format %{ "sub_32 $dst,$src1,$src2\t! int" %}
8067   ins_encode %{
8068     __ sub_32($dst$$Register, $src1$$Register, $src2$$Register);
8069   %}
8070   ins_pipe(ialu_reg_reg);
8071 %}
8072 
8073 #ifndef AARCH64
8074 instruct subshlI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
8075   match(Set dst (SubI src1 (LShiftI src2 src3)));
8076 
8077   size(4);
8078   format %{ "SUB    $dst,$src1,$src2<<$src3" %}
8079   ins_encode %{
8080     __ sub($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsl, $src3$$Register));
8081   %}
8082   ins_pipe(ialu_reg_reg);
8083 %}
8084 #endif
8085 
8086 instruct subshlI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
8087   match(Set dst (SubI src1 (LShiftI src2 src3)));
8088 
8089   size(4);
8090   format %{ "sub_32 $dst,$src1,$src2<<$src3\t! int" %}
8091   ins_encode %{
8092     __ sub_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsl, $src3$$constant));
8093   %}
8094   ins_pipe(ialu_reg_reg);
8095 %}
8096 
8097 #ifndef AARCH64
8098 instruct subsarI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
8099   match(Set dst (SubI src1 (RShiftI src2 src3)));
8100 
8101   size(4);
8102   format %{ "SUB    $dst,$src1,$src2>>$src3" %}
8103   ins_encode %{
8104     __ sub($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, asr, $src3$$Register));
8105   %}
8106   ins_pipe(ialu_reg_reg);
8107 %}
8108 #endif
8109 
8110 instruct subsarI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
8111   match(Set dst (SubI src1 (RShiftI src2 src3)));
8112 
8113   size(4);
8114   format %{ "sub_32 $dst,$src1,$src2>>$src3\t! int" %}
8115   ins_encode %{
8116     __ sub_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, asr, $src3$$constant));
8117   %}
8118   ins_pipe(ialu_reg_reg);
8119 %}
8120 
8121 #ifndef AARCH64
8122 instruct subshrI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
8123   match(Set dst (SubI src1 (URShiftI src2 src3)));
8124 
8125   size(4);
8126   format %{ "SUB    $dst,$src1,$src2>>>$src3" %}
8127   ins_encode %{
8128     __ sub($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsr, $src3$$Register));
8129   %}
8130   ins_pipe(ialu_reg_reg);
8131 %}
8132 #endif
8133 
8134 instruct subshrI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
8135   match(Set dst (SubI src1 (URShiftI src2 src3)));
8136 
8137   size(4);
8138   format %{ "sub_32 $dst,$src1,$src2>>>$src3\t! int" %}
8139   ins_encode %{
8140     __ sub_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsr, $src3$$constant));
8141   %}
8142   ins_pipe(ialu_reg_reg);
8143 %}
8144 
8145 #ifndef AARCH64
8146 instruct rsbshlI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
8147   match(Set dst (SubI (LShiftI src1 src2) src3));
8148 
8149   size(4);
8150   format %{ "RSB    $dst,$src3,$src1<<$src2" %}
8151   ins_encode %{
8152     __ rsb($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, lsl, $src2$$Register));
8153   %}
8154   ins_pipe(ialu_reg_reg);
8155 %}
8156 
8157 instruct rsbshlI_reg_imm_reg(iRegI dst, iRegI src1, immU5 src2, iRegI src3) %{
8158   match(Set dst (SubI (LShiftI src1 src2) src3));
8159 
8160   size(4);
8161   format %{ "RSB    $dst,$src3,$src1<<$src2" %}
8162   ins_encode %{
8163     __ rsb($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, lsl, $src2$$constant));
8164   %}
8165   ins_pipe(ialu_reg_reg);
8166 %}
8167 
8168 instruct rsbsarI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
8169   match(Set dst (SubI (RShiftI src1 src2) src3));
8170 
8171   size(4);
8172   format %{ "RSB    $dst,$src3,$src1>>$src2" %}
8173   ins_encode %{
8174     __ rsb($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, asr, $src2$$Register));
8175   %}
8176   ins_pipe(ialu_reg_reg);
8177 %}
8178 
8179 instruct rsbsarI_reg_imm_reg(iRegI dst, iRegI src1, immU5 src2, iRegI src3) %{
8180   match(Set dst (SubI (RShiftI src1 src2) src3));
8181 
8182   size(4);
8183   format %{ "RSB    $dst,$src3,$src1>>$src2" %}
8184   ins_encode %{
8185     __ rsb($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, asr, $src2$$constant));
8186   %}
8187   ins_pipe(ialu_reg_reg);
8188 %}
8189 
8190 instruct rsbshrI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
8191   match(Set dst (SubI (URShiftI src1 src2) src3));
8192 
8193   size(4);
8194   format %{ "RSB    $dst,$src3,$src1>>>$src2" %}
8195   ins_encode %{
8196     __ rsb($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, lsr, $src2$$Register));
8197   %}
8198   ins_pipe(ialu_reg_reg);
8199 %}
8200 
8201 instruct rsbshrI_reg_imm_reg(iRegI dst, iRegI src1, immU5 src2, iRegI src3) %{
8202   match(Set dst (SubI (URShiftI src1 src2) src3));
8203 
8204   size(4);
8205   format %{ "RSB    $dst,$src3,$src1>>>$src2" %}
8206   ins_encode %{
8207     __ rsb($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, lsr, $src2$$constant));
8208   %}
8209   ins_pipe(ialu_reg_reg);
8210 %}
8211 #endif
8212 
8213 // Immediate Subtraction
8214 instruct subI_reg_aimmI(iRegI dst, iRegI src1, aimmI src2) %{
8215   match(Set dst (SubI src1 src2));
8216 
8217   size(4);
8218   format %{ "sub_32 $dst,$src1,$src2\t! int" %}
8219   ins_encode %{
8220     __ sub_32($dst$$Register, $src1$$Register, $src2$$constant);
8221   %}
8222   ins_pipe(ialu_reg_imm);
8223 %}
8224 
8225 instruct subI_reg_immRotneg(iRegI dst, iRegI src1, aimmIneg src2) %{
8226   match(Set dst (AddI src1 src2));
8227 
8228   size(4);
8229   format %{ "sub_32 $dst,$src1,-($src2)\t! int" %}
8230   ins_encode %{
8231     __ sub_32($dst$$Register, $src1$$Register, -$src2$$constant);
8232   %}
8233   ins_pipe(ialu_reg_imm);
8234 %}
8235 
8236 #ifndef AARCH64
8237 instruct subI_immRot_reg(iRegI dst, immIRot src1, iRegI src2) %{
8238   match(Set dst (SubI src1 src2));
8239 
8240   size(4);
8241   format %{ "RSB    $dst,$src2,src1" %}
8242   ins_encode %{
8243     __ rsb($dst$$Register, $src2$$Register, $src1$$constant);
8244   %}
8245   ins_pipe(ialu_zero_reg);
8246 %}
8247 #endif
8248 
8249 // Register Subtraction
8250 #ifdef AARCH64
8251 instruct subL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
8252   match(Set dst (SubL src1 src2));
8253 
8254   size(4);
8255   format %{ "SUB    $dst,$src1,$src2\t! long" %}
8256   ins_encode %{
8257     __ sub($dst$$Register, $src1$$Register, $src2$$Register);
8258   %}
8259   ins_pipe(ialu_reg_reg);
8260 %}
8261 #else
8262 instruct subL_reg_reg(iRegL dst, iRegL src1, iRegL src2, flagsReg icc ) %{
8263   match(Set dst (SubL src1 src2));
8264   effect (KILL icc);
8265 
8266   size(8);
8267   format %{ "SUBS   $dst.lo,$src1.lo,$src2.lo\t! long\n\t"
8268             "SBC    $dst.hi,$src1.hi,$src2.hi" %}
8269   ins_encode %{
8270     __ subs($dst$$Register, $src1$$Register, $src2$$Register);
8271     __ sbc($dst$$Register->successor(), $src1$$Register->successor(), $src2$$Register->successor());
8272   %}
8273   ins_pipe(ialu_reg_reg);
8274 %}
8275 #endif
8276 
8277 #ifdef AARCH64
8278 // Immediate Subtraction
8279 instruct subL_reg_aimm(iRegL dst, iRegL src1, aimmL src2) %{
8280   match(Set dst (SubL src1 src2));
8281 
8282   size(4);
8283   format %{ "SUB    $dst,$src1,$src2\t! long" %}
8284   ins_encode %{
8285     __ sub($dst$$Register, $src1$$Register, $src2$$constant);
8286   %}
8287   ins_pipe(ialu_reg_imm);
8288 %}
8289 
8290 instruct subL_reg_immLneg(iRegL dst, iRegL src1, aimmLneg src2) %{
8291   match(Set dst (AddL src1 src2));
8292 
8293   size(4);
8294   format %{ "SUB    $dst,$src1,-($src2)\t! long" %}
8295   ins_encode %{
8296     __ sub($dst$$Register, $src1$$Register, -$src2$$constant);
8297   %}
8298   ins_pipe(ialu_reg_imm);
8299 %}
8300 #else
8301 // TODO
8302 #endif
8303 
8304 #ifndef AARCH64
8305 // Immediate Subtraction
8306 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
8307 // (hi($con$$constant), lo($con$$constant)) becomes
8308 instruct subL_reg_immRot(iRegL dst, iRegL src1, immLlowRot con, flagsReg icc) %{
8309   match(Set dst (SubL src1 con));
8310   effect (KILL icc);
8311 
8312   size(8);
8313   format %{ "SUB    $dst.lo,$src1.lo,$con\t! long\n\t"
8314             "SBC    $dst.hi,$src1.hi,0" %}
8315   ins_encode %{
8316     __ subs($dst$$Register, $src1$$Register, $con$$constant);
8317     __ sbc($dst$$Register->successor(), $src1$$Register->successor(), 0);
8318   %}
8319   ins_pipe(ialu_reg_imm);
8320 %}
8321 
8322 // Long negation
8323 instruct negL_reg_reg(iRegL dst, immL0 zero, iRegL src2, flagsReg icc) %{
8324   match(Set dst (SubL zero src2));
8325   effect (KILL icc);
8326 
8327   size(8);
8328   format %{ "RSBS   $dst.lo,$src2.lo,0\t! long\n\t"
8329             "RSC    $dst.hi,$src2.hi,0" %}
8330   ins_encode %{
8331     __ rsbs($dst$$Register, $src2$$Register, 0);
8332     __ rsc($dst$$Register->successor(), $src2$$Register->successor(), 0);
8333   %}
8334   ins_pipe(ialu_zero_reg);
8335 %}
8336 #endif // !AARCH64
8337 
8338 // Multiplication Instructions
8339 // Integer Multiplication
8340 // Register Multiplication
8341 instruct mulI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
8342   match(Set dst (MulI src1 src2));
8343 
8344   size(4);
8345   format %{ "mul_32 $dst,$src1,$src2" %}
8346   ins_encode %{
8347     __ mul_32($dst$$Register, $src1$$Register, $src2$$Register);
8348   %}
8349   ins_pipe(imul_reg_reg);
8350 %}
8351 
8352 #ifdef AARCH64
8353 instruct mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
8354   match(Set dst (MulL src1 src2));
8355   size(4);
8356   format %{ "MUL  $dst,$src1,$src2\t! long" %}
8357   ins_encode %{
8358     __ mul($dst$$Register, $src1$$Register, $src2$$Register);
8359   %}
8360   ins_pipe(imul_reg_reg);
8361 %}
8362 #else
8363 instruct mulL_lo1_hi2(iRegL dst, iRegL src1, iRegL src2) %{
8364   effect(DEF dst, USE src1, USE src2);
8365   size(4);
8366   format %{ "MUL  $dst.hi,$src1.lo,$src2.hi\t! long" %}
8367   ins_encode %{
8368     __ mul($dst$$Register->successor(), $src1$$Register, $src2$$Register->successor());
8369   %}
8370   ins_pipe(imul_reg_reg);
8371 %}
8372 
8373 instruct mulL_hi1_lo2(iRegL dst, iRegL src1, iRegL src2) %{
8374   effect(USE_DEF dst, USE src1, USE src2);
8375   size(8);
8376   format %{ "MLA  $dst.hi,$src1.hi,$src2.lo,$dst.hi\t! long\n\t"
8377             "MOV  $dst.lo, 0"%}
8378   ins_encode %{
8379     __ mla($dst$$Register->successor(), $src1$$Register->successor(), $src2$$Register, $dst$$Register->successor());
8380     __ mov($dst$$Register, 0);
8381   %}
8382   ins_pipe(imul_reg_reg);
8383 %}
8384 
8385 instruct mulL_lo1_lo2(iRegL dst, iRegL src1, iRegL src2) %{
8386   effect(USE_DEF dst, USE src1, USE src2);
8387   size(4);
8388   format %{ "UMLAL  $dst.lo,$dst.hi,$src1,$src2\t! long" %}
8389   ins_encode %{
8390     __ umlal($dst$$Register, $dst$$Register->successor(), $src1$$Register, $src2$$Register);
8391   %}
8392   ins_pipe(imul_reg_reg);
8393 %}
8394 
8395 instruct mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
8396   match(Set dst (MulL src1 src2));
8397 
8398   expand %{
8399     mulL_lo1_hi2(dst, src1, src2);
8400     mulL_hi1_lo2(dst, src1, src2);
8401     mulL_lo1_lo2(dst, src1, src2);
8402   %}
8403 %}
8404 #endif // !AARCH64
8405 
8406 // Integer Division
8407 // Register Division
8408 #ifdef AARCH64
8409 instruct divI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
8410   match(Set dst (DivI src1 src2));
8411 
8412   size(4);
8413   format %{ "SDIV    $dst,$src1,$src2\t! 32-bit" %}
8414   ins_encode %{
8415     __ sdiv_w($dst$$Register, $src1$$Register, $src2$$Register);
8416   %}
8417   ins_pipe(ialu_reg_reg); // FIXME
8418 %}
8419 #else
8420 instruct divI_reg_reg(R1RegI dst, R0RegI src1, R2RegI src2, LRRegP lr, flagsReg ccr) %{
8421   match(Set dst (DivI src1 src2));
8422   effect( KILL ccr, KILL src1, KILL src2, KILL lr);
8423   ins_cost((2+71)*DEFAULT_COST);
8424 
8425   format %{ "DIV   $dst,$src1,$src2 ! call to StubRoutines::Arm::idiv_irem_entry()" %}
8426   ins_encode %{
8427     __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::runtime_call_type);
8428   %}
8429   ins_pipe(sdiv_reg_reg);
8430 %}
8431 #endif
8432 
8433 // Register Long Division
8434 #ifdef AARCH64
8435 instruct divL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
8436   match(Set dst (DivL src1 src2));
8437 
8438   size(4);
8439   format %{ "SDIV    $dst,$src1,$src2" %}
8440   ins_encode %{
8441     __ sdiv($dst$$Register, $src1$$Register, $src2$$Register);
8442   %}
8443   ins_pipe(ialu_reg_reg); // FIXME
8444 %}
8445 #else
8446 instruct divL_reg_reg(R0R1RegL dst, R2R3RegL src1, R0R1RegL src2) %{
8447   match(Set dst (DivL src1 src2));
8448   effect(CALL);
8449   ins_cost(DEFAULT_COST*71);
8450   format %{ "DIVL  $src1,$src2,$dst\t! long ! call to SharedRuntime::ldiv" %}
8451   ins_encode %{
8452     address target = CAST_FROM_FN_PTR(address, SharedRuntime::ldiv);
8453     __ call(target, relocInfo::runtime_call_type);
8454   %}
8455   ins_pipe(divL_reg_reg);
8456 %}
8457 #endif
8458 
8459 // Integer Remainder
8460 // Register Remainder
8461 #ifdef AARCH64
8462 #ifdef TODO
8463 instruct msubI_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
8464   match(Set dst (SubI src1 (MulI src2 src3)));
8465 
8466   size(4);
8467   format %{ "MSUB    $dst,$src2,$src3,$src1\t! 32-bit\n\t" %}
8468   ins_encode %{
8469     __ msub_w($dst$$Register, $src2$$Register, $src3$$Register, $src1$$Register);
8470   %}
8471   ins_pipe(ialu_reg_reg); // FIXME
8472 %}
8473 #endif
8474 
8475 instruct modI_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI temp) %{
8476   match(Set dst (ModI src1 src2));
8477   effect(TEMP temp);
8478 
8479   size(8);
8480   format %{ "SDIV    $temp,$src1,$src2\t! 32-bit\n\t"
8481             "MSUB    $dst,$src2,$temp,$src1\t! 32-bit\n\t" %}
8482   ins_encode %{
8483     __ sdiv_w($temp$$Register, $src1$$Register, $src2$$Register);
8484     __ msub_w($dst$$Register, $src2$$Register, $temp$$Register, $src1$$Register);
8485   %}
8486   ins_pipe(ialu_reg_reg); // FIXME
8487 %}
8488 #else
8489 instruct modI_reg_reg(R0RegI dst, R0RegI src1, R2RegI src2, R1RegI temp, LRRegP lr, flagsReg ccr ) %{
8490   match(Set dst (ModI src1 src2));
8491   effect( KILL ccr, KILL temp, KILL src2, KILL lr);
8492 
8493   format %{ "MODI   $dst,$src1,$src2\t ! call to StubRoutines::Arm::idiv_irem_entry" %}
8494   ins_encode %{
8495     __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::runtime_call_type);
8496   %}
8497   ins_pipe(sdiv_reg_reg);
8498 %}
8499 #endif
8500 
8501 // Register Long Remainder
8502 #ifdef AARCH64
8503 instruct modL_reg_reg(iRegL dst, iRegL src1, iRegL src2, iRegL temp) %{
8504   match(Set dst (ModL src1 src2));
8505   effect(TEMP temp);
8506 
8507   size(8);
8508   format %{ "SDIV    $temp,$src1,$src2\n\t"
8509             "MSUB    $dst,$src2,$temp,$src1" %}
8510   ins_encode %{
8511     __ sdiv($temp$$Register, $src1$$Register, $src2$$Register);
8512     __ msub($dst$$Register, $src2$$Register, $temp$$Register, $src1$$Register);
8513   %}
8514   ins_pipe(ialu_reg_reg); // FIXME
8515 %}
8516 #else
8517 instruct modL_reg_reg(R0R1RegL dst, R2R3RegL src1, R0R1RegL src2) %{
8518   match(Set dst (ModL src1 src2));
8519   effect(CALL);
8520   ins_cost(MEMORY_REF_COST); // FIXME
8521   format %{ "modL    $dst,$src1,$src2\t ! call to SharedRuntime::lrem" %}
8522   ins_encode %{
8523     address target = CAST_FROM_FN_PTR(address, SharedRuntime::lrem);
8524     __ call(target, relocInfo::runtime_call_type);
8525   %}
8526   ins_pipe(divL_reg_reg);
8527 %}
8528 #endif
8529 
8530 // Integer Shift Instructions
8531 
8532 // Register Shift Left
8533 instruct shlI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
8534   match(Set dst (LShiftI src1 src2));
8535 
8536   size(4);
8537 #ifdef AARCH64
8538   format %{ "LSLV   $dst,$src1,$src2\t! int" %}
8539   ins_encode %{
8540     __ lslv_w($dst$$Register, $src1$$Register, $src2$$Register);
8541   %}
8542 #else
8543   format %{ "LSL  $dst,$src1,$src2 \n\t" %}
8544   ins_encode %{
8545     __ mov($dst$$Register, AsmOperand($src1$$Register, lsl, $src2$$Register));
8546   %}
8547 #endif
8548   ins_pipe(ialu_reg_reg);
8549 %}
8550 
8551 // Register Shift Left Immediate
8552 instruct shlI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{
8553   match(Set dst (LShiftI src1 src2));
8554 
8555   size(4);
8556 #ifdef AARCH64
8557   format %{ "LSL_w  $dst,$src1,$src2\t! int" %}
8558   ins_encode %{
8559     __ _lsl($dst$$Register, $src1$$Register, $src2$$constant);
8560   %}
8561 #else
8562   format %{ "LSL    $dst,$src1,$src2\t! int" %}
8563   ins_encode %{
8564     __ logical_shift_left($dst$$Register, $src1$$Register, $src2$$constant);
8565   %}
8566 #endif
8567   ins_pipe(ialu_reg_imm);
8568 %}
8569 
8570 #ifndef AARCH64
8571 instruct shlL_reg_reg_merge_hi(iRegL dst, iRegL src1, iRegI src2) %{
8572   effect(USE_DEF dst, USE src1, USE src2);
8573   size(4);
8574   format %{"OR  $dst.hi,$dst.hi,($src1.hi << $src2)"  %}
8575   ins_encode %{
8576     __ orr($dst$$Register->successor(), $dst$$Register->successor(), AsmOperand($src1$$Register->successor(), lsl, $src2$$Register));
8577   %}
8578   ins_pipe(ialu_reg_reg);
8579 %}
8580 
8581 instruct shlL_reg_reg_merge_lo(iRegL dst, iRegL src1, iRegI src2) %{
8582   effect(USE_DEF dst, USE src1, USE src2);
8583   size(4);
8584   format %{ "LSL  $dst.lo,$src1.lo,$src2 \n\t" %}
8585   ins_encode %{
8586     __ mov($dst$$Register, AsmOperand($src1$$Register, lsl, $src2$$Register));
8587   %}
8588   ins_pipe(ialu_reg_reg);
8589 %}
8590 
8591 instruct shlL_reg_reg_overlap(iRegL dst, iRegL src1, iRegI src2, flagsReg ccr) %{
8592   effect(DEF dst, USE src1, USE src2, KILL ccr);
8593   size(16);
8594   format %{ "SUBS  $dst.hi,$src2,32 \n\t"
8595             "LSLpl $dst.hi,$src1.lo,$dst.hi \n\t"
8596             "RSBmi $dst.hi,$dst.hi,0 \n\t"
8597             "LSRmi $dst.hi,$src1.lo,$dst.hi" %}
8598 
8599   ins_encode %{
8600     // $src1$$Register and $dst$$Register->successor() can't be the same
8601     __ subs($dst$$Register->successor(), $src2$$Register, 32);
8602     __ mov($dst$$Register->successor(), AsmOperand($src1$$Register, lsl, $dst$$Register->successor()), pl);
8603     __ rsb($dst$$Register->successor(), $dst$$Register->successor(), 0, mi);
8604     __ mov($dst$$Register->successor(), AsmOperand($src1$$Register, lsr, $dst$$Register->successor()), mi);
8605   %}
8606   ins_pipe(ialu_reg_reg);
8607 %}
8608 #endif // !AARCH64
8609 
8610 instruct shlL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{
8611   match(Set dst (LShiftL src1 src2));
8612 
8613 #ifdef AARCH64
8614   size(4);
8615   format %{ "LSLV  $dst,$src1,$src2\t! long" %}
8616   ins_encode %{
8617     __ lslv($dst$$Register, $src1$$Register, $src2$$Register);
8618   %}
8619   ins_pipe(ialu_reg_reg);
8620 #else
8621   expand %{
8622     flagsReg ccr;
8623     shlL_reg_reg_overlap(dst, src1, src2, ccr);
8624     shlL_reg_reg_merge_hi(dst, src1, src2);
8625     shlL_reg_reg_merge_lo(dst, src1, src2);
8626   %}
8627 #endif
8628 %}
8629 
8630 #ifdef AARCH64
8631 instruct shlL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{
8632   match(Set dst (LShiftL src1 src2));
8633 
8634   size(4);
8635   format %{ "LSL    $dst,$src1,$src2\t! long" %}
8636   ins_encode %{
8637     __ logical_shift_left($dst$$Register, $src1$$Register, $src2$$constant);
8638   %}
8639   ins_pipe(ialu_reg_imm);
8640 %}
8641 #else
8642 // Register Shift Left Immediate
8643 instruct shlL_reg_imm6(iRegL dst, iRegL src1, immU6Big src2) %{
8644   match(Set dst (LShiftL src1 src2));
8645 
8646   size(8);
8647   format %{ "LSL   $dst.hi,$src1.lo,$src2-32\t! or mov if $src2==32\n\t"
8648             "MOV   $dst.lo, 0" %}
8649   ins_encode %{
8650     if ($src2$$constant == 32) {
8651       __ mov($dst$$Register->successor(), $src1$$Register);
8652     } else {
8653       __ mov($dst$$Register->successor(), AsmOperand($src1$$Register, lsl, $src2$$constant-32));
8654     }
8655     __ mov($dst$$Register, 0);
8656   %}
8657   ins_pipe(ialu_reg_imm);
8658 %}
8659 
8660 instruct shlL_reg_imm5(iRegL dst, iRegL src1, immU5 src2) %{
8661   match(Set dst (LShiftL src1 src2));
8662 
8663   size(12);
8664   format %{ "LSL   $dst.hi,$src1.lo,$src2\n\t"
8665             "OR    $dst.hi, $dst.hi, $src1.lo >> 32-$src2\n\t"
8666             "LSL   $dst.lo,$src1.lo,$src2" %}
8667   ins_encode %{
8668     // The order of the following 3 instructions matters: src1.lo and
8669     // dst.hi can't overlap but src.hi and dst.hi can.
8670     __ mov($dst$$Register->successor(), AsmOperand($src1$$Register->successor(), lsl, $src2$$constant));
8671     __ orr($dst$$Register->successor(), $dst$$Register->successor(), AsmOperand($src1$$Register, lsr, 32-$src2$$constant));
8672     __ mov($dst$$Register, AsmOperand($src1$$Register, lsl, $src2$$constant));
8673   %}
8674   ins_pipe(ialu_reg_imm);
8675 %}
8676 #endif // !AARCH64
8677 
8678 // Register Arithmetic Shift Right
8679 instruct sarI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
8680   match(Set dst (RShiftI src1 src2));
8681   size(4);
8682 #ifdef AARCH64
8683   format %{ "ASRV   $dst,$src1,$src2\t! int" %}
8684   ins_encode %{
8685     __ asrv_w($dst$$Register, $src1$$Register, $src2$$Register);
8686   %}
8687 #else
8688   format %{ "ASR    $dst,$src1,$src2\t! int" %}
8689   ins_encode %{
8690     __ mov($dst$$Register, AsmOperand($src1$$Register, asr, $src2$$Register));
8691   %}
8692 #endif
8693   ins_pipe(ialu_reg_reg);
8694 %}
8695 
8696 // Register Arithmetic Shift Right Immediate
8697 instruct sarI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{
8698   match(Set dst (RShiftI src1 src2));
8699 
8700   size(4);
8701 #ifdef AARCH64
8702   format %{ "ASR_w  $dst,$src1,$src2" %}
8703   ins_encode %{
8704     __ _asr_w($dst$$Register, $src1$$Register, $src2$$constant);
8705   %}
8706 #else
8707   format %{ "ASR    $dst,$src1,$src2" %}
8708   ins_encode %{
8709     __ mov($dst$$Register, AsmOperand($src1$$Register, asr, $src2$$constant));
8710   %}
8711 #endif
8712   ins_pipe(ialu_reg_imm);
8713 %}
8714 
8715 #ifndef AARCH64
8716 // Register Shift Right Arithmetic Long
8717 instruct sarL_reg_reg_merge_lo(iRegL dst, iRegL src1, iRegI src2) %{
8718   effect(USE_DEF dst, USE src1, USE src2);
8719   size(4);
8720   format %{ "OR  $dst.lo,$dst.lo,($src1.lo >> $src2)"  %}
8721   ins_encode %{
8722     __ orr($dst$$Register, $dst$$Register, AsmOperand($src1$$Register, lsr, $src2$$Register));
8723   %}
8724   ins_pipe(ialu_reg_reg);
8725 %}
8726 
8727 instruct sarL_reg_reg_merge_hi(iRegL dst, iRegL src1, iRegI src2) %{
8728   effect(USE_DEF dst, USE src1, USE src2);
8729   size(4);
8730   format %{ "ASR  $dst.hi,$src1.hi,$src2 \n\t" %}
8731   ins_encode %{
8732     __ mov($dst$$Register->successor(), AsmOperand($src1$$Register->successor(), asr, $src2$$Register));
8733   %}
8734   ins_pipe(ialu_reg_reg);
8735 %}
8736 
8737 instruct sarL_reg_reg_overlap(iRegL dst, iRegL src1, iRegI src2, flagsReg ccr) %{
8738   effect(DEF dst, USE src1, USE src2, KILL ccr);
8739   size(16);
8740   format %{ "SUBS  $dst.lo,$src2,32 \n\t"
8741             "ASRpl $dst.lo,$src1.hi,$dst.lo \n\t"
8742             "RSBmi $dst.lo,$dst.lo,0 \n\t"
8743             "LSLmi $dst.lo,$src1.hi,$dst.lo" %}
8744 
8745   ins_encode %{
8746     // $src1$$Register->successor() and $dst$$Register can't be the same
8747     __ subs($dst$$Register, $src2$$Register, 32);
8748     __ mov($dst$$Register, AsmOperand($src1$$Register->successor(), asr, $dst$$Register), pl);
8749     __ rsb($dst$$Register, $dst$$Register, 0, mi);
8750     __ mov($dst$$Register, AsmOperand($src1$$Register->successor(), lsl, $dst$$Register), mi);
8751   %}
8752   ins_pipe(ialu_reg_reg);
8753 %}
8754 #endif // !AARCH64
8755 
8756 instruct sarL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{
8757   match(Set dst (RShiftL src1 src2));
8758 
8759 #ifdef AARCH64
8760   size(4);
8761   format %{ "ASRV  $dst,$src1,$src2\t! long" %}
8762   ins_encode %{
8763     __ asrv($dst$$Register, $src1$$Register, $src2$$Register);
8764   %}
8765   ins_pipe(ialu_reg_reg);
8766 #else
8767   expand %{
8768     flagsReg ccr;
8769     sarL_reg_reg_overlap(dst, src1, src2, ccr);
8770     sarL_reg_reg_merge_lo(dst, src1, src2);
8771     sarL_reg_reg_merge_hi(dst, src1, src2);
8772   %}
8773 #endif
8774 %}
8775 
8776 // Register Shift Left Immediate
8777 #ifdef AARCH64
8778 instruct sarL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{
8779   match(Set dst (RShiftL src1 src2));
8780 
8781   size(4);
8782   format %{ "ASR    $dst,$src1,$src2\t! long" %}
8783   ins_encode %{
8784     __ _asr($dst$$Register, $src1$$Register, $src2$$constant);
8785   %}
8786   ins_pipe(ialu_reg_imm);
8787 %}
8788 #else
8789 instruct sarL_reg_imm6(iRegL dst, iRegL src1, immU6Big src2) %{
8790   match(Set dst (RShiftL src1 src2));
8791 
8792   size(8);
8793   format %{ "ASR   $dst.lo,$src1.hi,$src2-32\t! or mov if $src2==32\n\t"
8794             "ASR   $dst.hi,$src1.hi, $src2" %}
8795   ins_encode %{
8796     if ($src2$$constant == 32) {
8797       __ mov($dst$$Register, $src1$$Register->successor());
8798     } else{
8799       __ mov($dst$$Register, AsmOperand($src1$$Register->successor(), asr, $src2$$constant-32));
8800     }
8801     __ mov($dst$$Register->successor(), AsmOperand($src1$$Register->successor(), asr, 0));
8802   %}
8803 
8804   ins_pipe(ialu_reg_imm);
8805 %}
8806 
8807 instruct sarL_reg_imm5(iRegL dst, iRegL src1, immU5 src2) %{
8808   match(Set dst (RShiftL src1 src2));
8809   size(12);
8810   format %{ "LSR   $dst.lo,$src1.lo,$src2\n\t"
8811             "OR    $dst.lo, $dst.lo, $src1.hi << 32-$src2\n\t"
8812             "ASR   $dst.hi,$src1.hi,$src2" %}
8813   ins_encode %{
8814     // The order of the following 3 instructions matters: src1.lo and
8815     // dst.hi can't overlap but src.hi and dst.hi can.
8816     __ mov($dst$$Register, AsmOperand($src1$$Register, lsr, $src2$$constant));
8817     __ orr($dst$$Register, $dst$$Register, AsmOperand($src1$$Register->successor(), lsl, 32-$src2$$constant));
8818     __ mov($dst$$Register->successor(), AsmOperand($src1$$Register->successor(), asr, $src2$$constant));
8819   %}
8820   ins_pipe(ialu_reg_imm);
8821 %}
8822 #endif
8823 
8824 // Register Shift Right
8825 instruct shrI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
8826   match(Set dst (URShiftI src1 src2));
8827   size(4);
8828 #ifdef AARCH64
8829   format %{ "LSRV   $dst,$src1,$src2\t! int" %}
8830   ins_encode %{
8831     __ lsrv_w($dst$$Register, $src1$$Register, $src2$$Register);
8832   %}
8833 #else
8834   format %{ "LSR    $dst,$src1,$src2\t! int" %}
8835   ins_encode %{
8836     __ mov($dst$$Register, AsmOperand($src1$$Register, lsr, $src2$$Register));
8837   %}
8838 #endif
8839   ins_pipe(ialu_reg_reg);
8840 %}
8841 
8842 // Register Shift Right Immediate
8843 instruct shrI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{
8844   match(Set dst (URShiftI src1 src2));
8845 
8846   size(4);
8847 #ifdef AARCH64
8848   format %{ "LSR_w  $dst,$src1,$src2" %}
8849   ins_encode %{
8850     __ _lsr_w($dst$$Register, $src1$$Register, $src2$$constant);
8851   %}
8852 #else
8853   format %{ "LSR    $dst,$src1,$src2" %}
8854   ins_encode %{
8855     __ mov($dst$$Register, AsmOperand($src1$$Register, lsr, $src2$$constant));
8856   %}
8857 #endif
8858   ins_pipe(ialu_reg_imm);
8859 %}
8860 
8861 #ifndef AARCH64
8862 // Register Shift Right
8863 instruct shrL_reg_reg_merge_lo(iRegL dst, iRegL src1, iRegI src2) %{
8864   effect(USE_DEF dst, USE src1, USE src2);
8865   size(4);
8866   format %{ "OR   $dst.lo,$dst,($src1.lo >>> $src2)"  %}
8867   ins_encode %{
8868     __ orr($dst$$Register, $dst$$Register, AsmOperand($src1$$Register, lsr, $src2$$Register));
8869   %}
8870   ins_pipe(ialu_reg_reg);
8871 %}
8872 
8873 instruct shrL_reg_reg_merge_hi(iRegL dst, iRegL src1, iRegI src2) %{
8874   effect(USE_DEF dst, USE src1, USE src2);
8875   size(4);
8876   format %{ "LSR  $dst.hi,$src1.hi,$src2 \n\t" %}
8877   ins_encode %{
8878     __ mov($dst$$Register->successor(), AsmOperand($src1$$Register->successor(), lsr, $src2$$Register));
8879   %}
8880   ins_pipe(ialu_reg_reg);
8881 %}
8882 
8883 instruct shrL_reg_reg_overlap(iRegL dst, iRegL src1, iRegI src2, flagsReg ccr) %{
8884   effect(DEF dst, USE src1, USE src2, KILL ccr);
8885   size(16);
8886   format %{ "SUBS  $dst,$src2,32 \n\t"
8887             "LSRpl $dst,$src1.hi,$dst \n\t"
8888             "RSBmi $dst,$dst,0 \n\t"
8889             "LSLmi $dst,$src1.hi,$dst" %}
8890 
8891   ins_encode %{
8892     // $src1$$Register->successor() and $dst$$Register can't be the same
8893     __ subs($dst$$Register, $src2$$Register, 32);
8894     __ mov($dst$$Register, AsmOperand($src1$$Register->successor(), lsr, $dst$$Register), pl);
8895     __ rsb($dst$$Register, $dst$$Register, 0, mi);
8896     __ mov($dst$$Register, AsmOperand($src1$$Register->successor(), lsl, $dst$$Register), mi);
8897   %}
8898   ins_pipe(ialu_reg_reg);
8899 %}
8900 #endif // !AARCH64
8901 
8902 instruct shrL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{
8903   match(Set dst (URShiftL src1 src2));
8904 
8905 #ifdef AARCH64
8906   size(4);
8907   format %{ "LSRV  $dst,$src1,$src2\t! long" %}
8908   ins_encode %{
8909     __ lsrv($dst$$Register, $src1$$Register, $src2$$Register);
8910   %}
8911   ins_pipe(ialu_reg_reg);
8912 #else
8913   expand %{
8914     flagsReg ccr;
8915     shrL_reg_reg_overlap(dst, src1, src2, ccr);
8916     shrL_reg_reg_merge_lo(dst, src1, src2);
8917     shrL_reg_reg_merge_hi(dst, src1, src2);
8918   %}
8919 #endif
8920 %}
8921 
8922 // Register Shift Right Immediate
8923 #ifdef AARCH64
8924 instruct shrL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{
8925   match(Set dst (URShiftL src1 src2));
8926 
8927   size(4);
8928   format %{ "LSR    $dst,$src1,$src2" %}
8929   ins_encode %{
8930     __ _lsr($dst$$Register, $src1$$Register, $src2$$constant);
8931   %}
8932   ins_pipe(ialu_reg_imm);
8933 %}
8934 #else
8935 instruct shrL_reg_imm6(iRegL dst, iRegL src1, immU6Big src2) %{
8936   match(Set dst (URShiftL src1 src2));
8937 
8938   size(8);
8939   format %{ "LSR   $dst.lo,$src1.hi,$src2-32\t! or mov if $src2==32\n\t"
8940             "MOV   $dst.hi, 0" %}
8941   ins_encode %{
8942     if ($src2$$constant == 32) {
8943       __ mov($dst$$Register, $src1$$Register->successor());
8944     } else {
8945       __ mov($dst$$Register, AsmOperand($src1$$Register->successor(), lsr, $src2$$constant-32));
8946     }
8947     __ mov($dst$$Register->successor(), 0);
8948   %}
8949 
8950   ins_pipe(ialu_reg_imm);
8951 %}
8952 
8953 instruct shrL_reg_imm5(iRegL dst, iRegL src1, immU5 src2) %{
8954   match(Set dst (URShiftL src1 src2));
8955 
8956   size(12);
8957   format %{ "LSR   $dst.lo,$src1.lo,$src2\n\t"
8958             "OR    $dst.lo, $dst.lo, $src1.hi << 32-$src2\n\t"
8959             "LSR   $dst.hi,$src1.hi,$src2" %}
8960   ins_encode %{
8961     // The order of the following 3 instructions matters: src1.lo and
8962     // dst.hi can't overlap but src.hi and dst.hi can.
8963     __ mov($dst$$Register, AsmOperand($src1$$Register, lsr, $src2$$constant));
8964     __ orr($dst$$Register, $dst$$Register, AsmOperand($src1$$Register->successor(), lsl, 32-$src2$$constant));
8965     __ mov($dst$$Register->successor(), AsmOperand($src1$$Register->successor(), lsr, $src2$$constant));
8966   %}
8967   ins_pipe(ialu_reg_imm);
8968 %}
8969 #endif // !AARCH64
8970 
8971 
8972 instruct shrP_reg_imm5(iRegX dst, iRegP src1, immU5 src2) %{
8973   match(Set dst (URShiftI (CastP2X src1) src2));
8974   size(4);
8975   format %{ "LSR    $dst,$src1,$src2\t! Cast ptr $src1 to int and shift" %}
8976   ins_encode %{
8977     __ logical_shift_right($dst$$Register, $src1$$Register, $src2$$constant);
8978   %}
8979   ins_pipe(ialu_reg_imm);
8980 %}
8981 
8982 //----------Floating Point Arithmetic Instructions-----------------------------
8983 
8984 //  Add float single precision
8985 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
8986   match(Set dst (AddF src1 src2));
8987 
8988   size(4);
8989   format %{ "FADDS  $dst,$src1,$src2" %}
8990   ins_encode %{
8991     __ add_float($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
8992   %}
8993 
8994   ins_pipe(faddF_reg_reg);
8995 %}
8996 
8997 //  Add float double precision
8998 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
8999   match(Set dst (AddD src1 src2));
9000 
9001   size(4);
9002   format %{ "FADDD  $dst,$src1,$src2" %}
9003   ins_encode %{
9004     __ add_double($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
9005   %}
9006 
9007   ins_pipe(faddD_reg_reg);
9008 %}
9009 
9010 //  Sub float single precision
9011 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
9012   match(Set dst (SubF src1 src2));
9013 
9014   size(4);
9015   format %{ "FSUBS  $dst,$src1,$src2" %}
9016   ins_encode %{
9017     __ sub_float($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
9018   %}
9019   ins_pipe(faddF_reg_reg);
9020 %}
9021 
9022 //  Sub float double precision
9023 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
9024   match(Set dst (SubD src1 src2));
9025 
9026   size(4);
9027   format %{ "FSUBD  $dst,$src1,$src2" %}
9028   ins_encode %{
9029     __ sub_double($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
9030   %}
9031   ins_pipe(faddD_reg_reg);
9032 %}
9033 
9034 //  Mul float single precision
9035 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
9036   match(Set dst (MulF src1 src2));
9037 
9038   size(4);
9039   format %{ "FMULS  $dst,$src1,$src2" %}
9040   ins_encode %{
9041     __ mul_float($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
9042   %}
9043 
9044   ins_pipe(fmulF_reg_reg);
9045 %}
9046 
9047 //  Mul float double precision
9048 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
9049   match(Set dst (MulD src1 src2));
9050 
9051   size(4);
9052   format %{ "FMULD  $dst,$src1,$src2" %}
9053   ins_encode %{
9054     __ mul_double($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
9055   %}
9056 
9057   ins_pipe(fmulD_reg_reg);
9058 %}
9059 
9060 //  Div float single precision
9061 instruct divF_reg_reg(regF dst, regF src1, regF src2) %{
9062   match(Set dst (DivF src1 src2));
9063 
9064   size(4);
9065   format %{ "FDIVS  $dst,$src1,$src2" %}
9066   ins_encode %{
9067     __ div_float($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
9068   %}
9069 
9070   ins_pipe(fdivF_reg_reg);
9071 %}
9072 
9073 //  Div float double precision
9074 instruct divD_reg_reg(regD dst, regD src1, regD src2) %{
9075   match(Set dst (DivD src1 src2));
9076 
9077   size(4);
9078   format %{ "FDIVD  $dst,$src1,$src2" %}
9079   ins_encode %{
9080     __ div_double($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
9081   %}
9082 
9083   ins_pipe(fdivD_reg_reg);
9084 %}
9085 
9086 //  Absolute float double precision
9087 instruct absD_reg(regD dst, regD src) %{
9088   match(Set dst (AbsD src));
9089 
9090   size(4);
9091   format %{ "FABSd  $dst,$src" %}
9092   ins_encode %{
9093     __ abs_double($dst$$FloatRegister, $src$$FloatRegister);
9094   %}
9095   ins_pipe(faddD_reg);
9096 %}
9097 
9098 //  Absolute float single precision
9099 instruct absF_reg(regF dst, regF src) %{
9100   match(Set dst (AbsF src));
9101   format %{ "FABSs  $dst,$src" %}
9102   ins_encode %{
9103     __ abs_float($dst$$FloatRegister, $src$$FloatRegister);
9104   %}
9105   ins_pipe(faddF_reg);
9106 %}
9107 
9108 instruct negF_reg(regF dst, regF src) %{
9109   match(Set dst (NegF src));
9110 
9111   size(4);
9112   format %{ "FNEGs  $dst,$src" %}
9113   ins_encode %{
9114     __ neg_float($dst$$FloatRegister, $src$$FloatRegister);
9115   %}
9116   ins_pipe(faddF_reg);
9117 %}
9118 
9119 instruct negD_reg(regD dst, regD src) %{
9120   match(Set dst (NegD src));
9121 
9122   format %{ "FNEGd  $dst,$src" %}
9123   ins_encode %{
9124     __ neg_double($dst$$FloatRegister, $src$$FloatRegister);
9125   %}
9126   ins_pipe(faddD_reg);
9127 %}
9128 
9129 //  Sqrt float double precision
9130 instruct sqrtF_reg_reg(regF dst, regF src) %{
9131   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
9132 
9133   size(4);
9134   format %{ "FSQRTS $dst,$src" %}
9135   ins_encode %{
9136     __ sqrt_float($dst$$FloatRegister, $src$$FloatRegister);
9137   %}
9138   ins_pipe(fdivF_reg_reg);
9139 %}
9140 
9141 //  Sqrt float double precision
9142 instruct sqrtD_reg_reg(regD dst, regD src) %{
9143   match(Set dst (SqrtD src));
9144 
9145   size(4);
9146   format %{ "FSQRTD $dst,$src" %}
9147   ins_encode %{
9148     __ sqrt_double($dst$$FloatRegister, $src$$FloatRegister);
9149   %}
9150   ins_pipe(fdivD_reg_reg);
9151 %}
9152 
9153 //----------Logical Instructions-----------------------------------------------
9154 // And Instructions
9155 // Register And
9156 instruct andI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
9157   match(Set dst (AndI src1 src2));
9158 
9159   size(4);
9160   format %{ "and_32 $dst,$src1,$src2" %}
9161   ins_encode %{
9162     __ and_32($dst$$Register, $src1$$Register, $src2$$Register);
9163   %}
9164   ins_pipe(ialu_reg_reg);
9165 %}
9166 
9167 #ifndef AARCH64
9168 instruct andshlI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
9169   match(Set dst (AndI src1 (LShiftI src2 src3)));
9170 
9171   size(4);
9172   format %{ "AND    $dst,$src1,$src2<<$src3" %}
9173   ins_encode %{
9174     __ andr($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsl, $src3$$Register));
9175   %}
9176   ins_pipe(ialu_reg_reg);
9177 %}
9178 #endif
9179 
9180 instruct andshlI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
9181   match(Set dst (AndI src1 (LShiftI src2 src3)));
9182 
9183   size(4);
9184   format %{ "and_32 $dst,$src1,$src2<<$src3" %}
9185   ins_encode %{
9186     __ and_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsl, $src3$$constant));
9187   %}
9188   ins_pipe(ialu_reg_reg);
9189 %}
9190 
9191 #ifndef AARCH64
9192 instruct andsarI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
9193   match(Set dst (AndI src1 (RShiftI src2 src3)));
9194 
9195   size(4);
9196   format %{ "AND    $dst,$src1,$src2>>$src3" %}
9197   ins_encode %{
9198     __ andr($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, asr, $src3$$Register));
9199   %}
9200   ins_pipe(ialu_reg_reg);
9201 %}
9202 #endif
9203 
9204 instruct andsarI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
9205   match(Set dst (AndI src1 (RShiftI src2 src3)));
9206 
9207   size(4);
9208   format %{ "and_32 $dst,$src1,$src2>>$src3" %}
9209   ins_encode %{
9210     __ and_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, asr, $src3$$constant));
9211   %}
9212   ins_pipe(ialu_reg_reg);
9213 %}
9214 
9215 #ifndef AARCH64
9216 instruct andshrI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
9217   match(Set dst (AndI src1 (URShiftI src2 src3)));
9218 
9219   size(4);
9220   format %{ "AND    $dst,$src1,$src2>>>$src3" %}
9221   ins_encode %{
9222     __ andr($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsr, $src3$$Register));
9223   %}
9224   ins_pipe(ialu_reg_reg);
9225 %}
9226 #endif
9227 
9228 instruct andshrI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
9229   match(Set dst (AndI src1 (URShiftI src2 src3)));
9230 
9231   size(4);
9232   format %{ "and_32 $dst,$src1,$src2>>>$src3" %}
9233   ins_encode %{
9234     __ and_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsr, $src3$$constant));
9235   %}
9236   ins_pipe(ialu_reg_reg);
9237 %}
9238 
9239 // Immediate And
9240 instruct andI_reg_limm(iRegI dst, iRegI src1, limmI src2) %{
9241   match(Set dst (AndI src1 src2));
9242 
9243   size(4);
9244   format %{ "and_32 $dst,$src1,$src2\t! int" %}
9245   ins_encode %{
9246     __ and_32($dst$$Register, $src1$$Register, $src2$$constant);
9247   %}
9248   ins_pipe(ialu_reg_imm);
9249 %}
9250 
9251 #ifndef AARCH64
9252 instruct andI_reg_limmn(iRegI dst, iRegI src1, limmIn src2) %{
9253   match(Set dst (AndI src1 src2));
9254 
9255   size(4);
9256   format %{ "bic    $dst,$src1,~$src2\t! int" %}
9257   ins_encode %{
9258     __ bic($dst$$Register, $src1$$Register, ~$src2$$constant);
9259   %}
9260   ins_pipe(ialu_reg_imm);
9261 %}
9262 #endif
9263 
9264 // Register And Long
9265 instruct andL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
9266   match(Set dst (AndL src1 src2));
9267 
9268   ins_cost(DEFAULT_COST);
9269 #ifdef AARCH64
9270   size(4);
9271   format %{ "AND    $dst,$src1,$src2\t! long" %}
9272   ins_encode %{
9273     __ andr($dst$$Register, $src1$$Register, $src2$$Register);
9274   %}
9275 #else
9276   size(8);
9277   format %{ "AND    $dst,$src1,$src2\t! long" %}
9278   ins_encode %{
9279     __ andr($dst$$Register, $src1$$Register, $src2$$Register);
9280     __ andr($dst$$Register->successor(), $src1$$Register->successor(), $src2$$Register->successor());
9281   %}
9282 #endif
9283   ins_pipe(ialu_reg_reg);
9284 %}
9285 
9286 #ifdef AARCH64
9287 // Immediate And
9288 instruct andL_reg_limm(iRegL dst, iRegL src1, limmL src2) %{
9289   match(Set dst (AndL src1 src2));
9290 
9291   size(4);
9292   format %{ "AND    $dst,$src1,$src2\t! long" %}
9293   ins_encode %{
9294     __ andr($dst$$Register, $src1$$Register, (uintx)$src2$$constant);
9295   %}
9296   ins_pipe(ialu_reg_imm);
9297 %}
9298 #else
9299 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
9300 // (hi($con$$constant), lo($con$$constant)) becomes
9301 instruct andL_reg_immRot(iRegL dst, iRegL src1, immLlowRot con) %{
9302   match(Set dst (AndL src1 con));
9303   ins_cost(DEFAULT_COST);
9304   size(8);
9305   format %{ "AND    $dst,$src1,$con\t! long" %}
9306   ins_encode %{
9307     __ andr($dst$$Register, $src1$$Register, $con$$constant);
9308     __ andr($dst$$Register->successor(), $src1$$Register->successor(), 0);
9309   %}
9310   ins_pipe(ialu_reg_imm);
9311 %}
9312 #endif
9313 
9314 // Or Instructions
9315 // Register Or
9316 instruct orI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
9317   match(Set dst (OrI src1 src2));
9318 
9319   size(4);
9320   format %{ "orr_32 $dst,$src1,$src2\t! int" %}
9321   ins_encode %{
9322     __ orr_32($dst$$Register, $src1$$Register, $src2$$Register);
9323   %}
9324   ins_pipe(ialu_reg_reg);
9325 %}
9326 
9327 #ifndef AARCH64
9328 instruct orshlI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
9329   match(Set dst (OrI src1 (LShiftI src2 src3)));
9330 
9331   size(4);
9332   format %{ "OR    $dst,$src1,$src2<<$src3" %}
9333   ins_encode %{
9334     __ orr($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsl, $src3$$Register));
9335   %}
9336   ins_pipe(ialu_reg_reg);
9337 %}
9338 #endif
9339 
9340 instruct orshlI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
9341   match(Set dst (OrI src1 (LShiftI src2 src3)));
9342 
9343   size(4);
9344   format %{ "orr_32 $dst,$src1,$src2<<$src3" %}
9345   ins_encode %{
9346     __ orr_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsl, $src3$$constant));
9347   %}
9348   ins_pipe(ialu_reg_reg);
9349 %}
9350 
9351 #ifndef AARCH64
9352 instruct orsarI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
9353   match(Set dst (OrI src1 (RShiftI src2 src3)));
9354 
9355   size(4);
9356   format %{ "OR    $dst,$src1,$src2>>$src3" %}
9357   ins_encode %{
9358     __ orr($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, asr, $src3$$Register));
9359   %}
9360   ins_pipe(ialu_reg_reg);
9361 %}
9362 #endif
9363 
9364 instruct orsarI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
9365   match(Set dst (OrI src1 (RShiftI src2 src3)));
9366 
9367   size(4);
9368   format %{ "orr_32 $dst,$src1,$src2>>$src3" %}
9369   ins_encode %{
9370     __ orr_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, asr, $src3$$constant));
9371   %}
9372   ins_pipe(ialu_reg_reg);
9373 %}
9374 
9375 #ifndef AARCH64
9376 instruct orshrI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
9377   match(Set dst (OrI src1 (URShiftI src2 src3)));
9378 
9379   size(4);
9380   format %{ "OR    $dst,$src1,$src2>>>$src3" %}
9381   ins_encode %{
9382     __ orr($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsr, $src3$$Register));
9383   %}
9384   ins_pipe(ialu_reg_reg);
9385 %}
9386 #endif
9387 
9388 instruct orshrI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
9389   match(Set dst (OrI src1 (URShiftI src2 src3)));
9390 
9391   size(4);
9392   format %{ "orr_32 $dst,$src1,$src2>>>$src3" %}
9393   ins_encode %{
9394     __ orr_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsr, $src3$$constant));
9395   %}
9396   ins_pipe(ialu_reg_reg);
9397 %}
9398 
9399 // Immediate Or
9400 instruct orI_reg_limm(iRegI dst, iRegI src1, limmI src2) %{
9401   match(Set dst (OrI src1 src2));
9402 
9403   size(4);
9404   format %{ "orr_32  $dst,$src1,$src2" %}
9405   ins_encode %{
9406     __ orr_32($dst$$Register, $src1$$Register, $src2$$constant);
9407   %}
9408   ins_pipe(ialu_reg_imm);
9409 %}
9410 // TODO: orn_32 with limmIn
9411 
9412 // Register Or Long
9413 instruct orL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
9414   match(Set dst (OrL src1 src2));
9415 
9416   ins_cost(DEFAULT_COST);
9417 #ifdef AARCH64
9418   size(4);
9419   format %{ "OR     $dst,$src1,$src2\t! long" %}
9420   ins_encode %{
9421     __ orr($dst$$Register, $src1$$Register, $src2$$Register);
9422   %}
9423 #else
9424   size(8);
9425   format %{ "OR     $dst.lo,$src1.lo,$src2.lo\t! long\n\t"
9426             "OR     $dst.hi,$src1.hi,$src2.hi" %}
9427   ins_encode %{
9428     __ orr($dst$$Register, $src1$$Register, $src2$$Register);
9429     __ orr($dst$$Register->successor(), $src1$$Register->successor(), $src2$$Register->successor());
9430   %}
9431 #endif
9432   ins_pipe(ialu_reg_reg);
9433 %}
9434 
9435 #ifdef AARCH64
9436 instruct orL_reg_limm(iRegL dst, iRegL src1, limmL src2) %{
9437   match(Set dst (OrL src1 src2));
9438 
9439   size(4);
9440   format %{ "ORR    $dst,$src1,$src2\t! long" %}
9441   ins_encode %{
9442     __ orr($dst$$Register, $src1$$Register, (uintx)$src2$$constant);
9443   %}
9444   ins_pipe(ialu_reg_imm);
9445 %}
9446 #else
9447 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
9448 // (hi($con$$constant), lo($con$$constant)) becomes
9449 instruct orL_reg_immRot(iRegL dst, iRegL src1, immLlowRot con) %{
9450   match(Set dst (OrL src1 con));
9451   ins_cost(DEFAULT_COST);
9452   size(8);
9453   format %{ "OR     $dst.lo,$src1.lo,$con\t! long\n\t"
9454             "OR     $dst.hi,$src1.hi,$con" %}
9455   ins_encode %{
9456     __ orr($dst$$Register, $src1$$Register, $con$$constant);
9457     __ orr($dst$$Register->successor(), $src1$$Register->successor(), 0);
9458   %}
9459   ins_pipe(ialu_reg_imm);
9460 %}
9461 #endif
9462 
9463 #ifdef TODO
9464 // Use SPRegP to match Rthread (TLS register) without spilling.
9465 // Use store_ptr_RegP to match Rthread (TLS register) without spilling.
9466 // Use sp_ptr_RegP to match Rthread (TLS register) without spilling.
9467 instruct orI_reg_castP2X(iRegI dst, iRegI src1, sp_ptr_RegP src2) %{
9468   match(Set dst (OrI src1 (CastP2X src2)));
9469   size(4);
9470   format %{ "OR     $dst,$src1,$src2" %}
9471   ins_encode %{
9472     __ orr($dst$$Register, $src1$$Register, $src2$$Register);
9473   %}
9474   ins_pipe(ialu_reg_reg);
9475 %}
9476 #endif
9477 
9478 // Xor Instructions
9479 // Register Xor
9480 instruct xorI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
9481   match(Set dst (XorI src1 src2));
9482 
9483   size(4);
9484   format %{ "eor_32 $dst,$src1,$src2" %}
9485   ins_encode %{
9486     __ eor_32($dst$$Register, $src1$$Register, $src2$$Register);
9487   %}
9488   ins_pipe(ialu_reg_reg);
9489 %}
9490 
9491 #ifndef AARCH64
9492 instruct xorshlI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
9493   match(Set dst (XorI src1 (LShiftI src2 src3)));
9494 
9495   size(4);
9496   format %{ "XOR    $dst,$src1,$src2<<$src3" %}
9497   ins_encode %{
9498     __ eor($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsl, $src3$$Register));
9499   %}
9500   ins_pipe(ialu_reg_reg);
9501 %}
9502 #endif
9503 
9504 instruct xorshlI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
9505   match(Set dst (XorI src1 (LShiftI src2 src3)));
9506 
9507   size(4);
9508   format %{ "eor_32 $dst,$src1,$src2<<$src3" %}
9509   ins_encode %{
9510     __ eor_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsl, $src3$$constant));
9511   %}
9512   ins_pipe(ialu_reg_reg);
9513 %}
9514 
9515 #ifndef AARCH64
9516 instruct xorsarI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
9517   match(Set dst (XorI src1 (RShiftI src2 src3)));
9518 
9519   size(4);
9520   format %{ "XOR    $dst,$src1,$src2>>$src3" %}
9521   ins_encode %{
9522     __ eor($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, asr, $src3$$Register));
9523   %}
9524   ins_pipe(ialu_reg_reg);
9525 %}
9526 #endif
9527 
9528 instruct xorsarI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
9529   match(Set dst (XorI src1 (RShiftI src2 src3)));
9530 
9531   size(4);
9532   format %{ "eor_32 $dst,$src1,$src2>>$src3" %}
9533   ins_encode %{
9534     __ eor_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, asr, $src3$$constant));
9535   %}
9536   ins_pipe(ialu_reg_reg);
9537 %}
9538 
9539 #ifndef AARCH64
9540 instruct xorshrI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
9541   match(Set dst (XorI src1 (URShiftI src2 src3)));
9542 
9543   size(4);
9544   format %{ "XOR    $dst,$src1,$src2>>>$src3" %}
9545   ins_encode %{
9546     __ eor($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsr, $src3$$Register));
9547   %}
9548   ins_pipe(ialu_reg_reg);
9549 %}
9550 #endif
9551 
9552 instruct xorshrI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
9553   match(Set dst (XorI src1 (URShiftI src2 src3)));
9554 
9555   size(4);
9556   format %{ "eor_32 $dst,$src1,$src2>>>$src3" %}
9557   ins_encode %{
9558     __ eor_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsr, $src3$$constant));
9559   %}
9560   ins_pipe(ialu_reg_reg);
9561 %}
9562 
9563 // Immediate Xor
9564 instruct xorI_reg_imm(iRegI dst, iRegI src1, limmI src2) %{
9565   match(Set dst (XorI src1 src2));
9566 
9567   size(4);
9568   format %{ "eor_32 $dst,$src1,$src2" %}
9569   ins_encode %{
9570     __ eor_32($dst$$Register, $src1$$Register, $src2$$constant);
9571   %}
9572   ins_pipe(ialu_reg_imm);
9573 %}
9574 
9575 // Register Xor Long
9576 instruct xorL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
9577   match(Set dst (XorL src1 src2));
9578   ins_cost(DEFAULT_COST);
9579 #ifdef AARCH64
9580   size(4);
9581   format %{ "XOR     $dst,$src1,$src2\t! long" %}
9582   ins_encode %{
9583     __ eor($dst$$Register, $src1$$Register, $src2$$Register);
9584   %}
9585 #else
9586   size(8);
9587   format %{ "XOR     $dst.hi,$src1.hi,$src2.hi\t! long\n\t"
9588             "XOR     $dst.lo,$src1.lo,$src2.lo\t! long" %}
9589   ins_encode %{
9590     __ eor($dst$$Register, $src1$$Register, $src2$$Register);
9591     __ eor($dst$$Register->successor(), $src1$$Register->successor(), $src2$$Register->successor());
9592   %}
9593 #endif
9594   ins_pipe(ialu_reg_reg);
9595 %}
9596 
9597 #ifdef AARCH64
9598 instruct xorL_reg_limmL(iRegL dst, iRegL src1, limmL con) %{
9599   match(Set dst (XorL src1 con));
9600   ins_cost(DEFAULT_COST);
9601   size(4);
9602   format %{ "EOR     $dst,$src1,$con\t! long" %}
9603   ins_encode %{
9604     __ eor($dst$$Register, $src1$$Register, (uintx)$con$$constant);
9605   %}
9606   ins_pipe(ialu_reg_imm);
9607 %}
9608 #else
9609 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
9610 // (hi($con$$constant), lo($con$$constant)) becomes
9611 instruct xorL_reg_immRot(iRegL dst, iRegL src1, immLlowRot con) %{
9612   match(Set dst (XorL src1 con));
9613   ins_cost(DEFAULT_COST);
9614   size(8);
9615   format %{ "XOR     $dst.hi,$src1.hi,$con\t! long\n\t"
9616             "XOR     $dst.lo,$src1.lo,0\t! long" %}
9617   ins_encode %{
9618     __ eor($dst$$Register, $src1$$Register, $con$$constant);
9619     __ eor($dst$$Register->successor(), $src1$$Register->successor(), 0);
9620   %}
9621   ins_pipe(ialu_reg_imm);
9622 %}
9623 #endif // AARCH64
9624 
9625 //----------Convert to Boolean-------------------------------------------------
9626 instruct convI2B( iRegI dst, iRegI src, flagsReg ccr ) %{
9627   match(Set dst (Conv2B src));
9628   effect(KILL ccr);
9629 #ifdef AARCH64
9630   size(8);
9631   ins_cost(DEFAULT_COST*2);
9632   format %{ "cmp_32 $src,ZR\n\t"
9633             "cset_w $dst, ne" %}
9634   ins_encode %{
9635     __ cmp_32($src$$Register, ZR);
9636     __ cset_w($dst$$Register, ne);
9637   %}
9638 #else
9639   size(12);
9640   ins_cost(DEFAULT_COST*2);
9641   format %{ "TST    $src,$src \n\t"
9642             "MOV    $dst, 0   \n\t"
9643             "MOV.ne $dst, 1" %}
9644   ins_encode %{ // FIXME: can do better?
9645     __ tst($src$$Register, $src$$Register);
9646     __ mov($dst$$Register, 0);
9647     __ mov($dst$$Register, 1, ne);
9648   %}
9649 #endif
9650   ins_pipe(ialu_reg_ialu);
9651 %}
9652 
9653 instruct convP2B( iRegI dst, iRegP src, flagsReg ccr ) %{
9654   match(Set dst (Conv2B src));
9655   effect(KILL ccr);
9656 #ifdef AARCH64
9657   size(8);
9658   ins_cost(DEFAULT_COST*2);
9659   format %{ "CMP    $src,ZR\n\t"
9660             "cset   $dst, ne" %}
9661   ins_encode %{
9662     __ cmp($src$$Register, ZR);
9663     __ cset($dst$$Register, ne);
9664   %}
9665 #else
9666   size(12);
9667   ins_cost(DEFAULT_COST*2);
9668   format %{ "TST    $src,$src \n\t"
9669             "MOV    $dst, 0   \n\t"
9670             "MOV.ne $dst, 1" %}
9671   ins_encode %{
9672     __ tst($src$$Register, $src$$Register);
9673     __ mov($dst$$Register, 0);
9674     __ mov($dst$$Register, 1, ne);
9675   %}
9676 #endif
9677   ins_pipe(ialu_reg_ialu);
9678 %}
9679 
9680 instruct cmpLTMask_reg_reg( iRegI dst, iRegI p, iRegI q, flagsReg ccr ) %{
9681   match(Set dst (CmpLTMask p q));
9682   effect( KILL ccr );
9683 #ifdef AARCH64
9684   size(8);
9685   ins_cost(DEFAULT_COST*2);
9686   format %{ "CMP_w   $p,$q\n\t"
9687             "CSETM_w $dst, lt" %}
9688   ins_encode %{
9689     __ cmp_w($p$$Register, $q$$Register);
9690     __ csetm_w($dst$$Register, lt);
9691   %}
9692 #else
9693   ins_cost(DEFAULT_COST*3);
9694   format %{ "CMP    $p,$q\n\t"
9695             "MOV    $dst, #0\n\t"
9696             "MOV.lt $dst, #-1" %}
9697   ins_encode %{
9698     __ cmp($p$$Register, $q$$Register);
9699     __ mov($dst$$Register, 0);
9700     __ mvn($dst$$Register, 0, lt);
9701   %}
9702 #endif
9703   ins_pipe(ialu_reg_reg_ialu);
9704 %}
9705 
9706 instruct cmpLTMask_reg_imm( iRegI dst, iRegI p, aimmI q, flagsReg ccr ) %{
9707   match(Set dst (CmpLTMask p q));
9708   effect( KILL ccr );
9709 #ifdef AARCH64
9710   size(8);
9711   ins_cost(DEFAULT_COST*2);
9712   format %{ "CMP_w   $p,$q\n\t"
9713             "CSETM_w $dst, lt" %}
9714   ins_encode %{
9715     __ cmp_w($p$$Register, $q$$constant);
9716     __ csetm_w($dst$$Register, lt);
9717   %}
9718 #else
9719   ins_cost(DEFAULT_COST*3);
9720   format %{ "CMP    $p,$q\n\t"
9721             "MOV    $dst, #0\n\t"
9722             "MOV.lt $dst, #-1" %}
9723   ins_encode %{
9724     __ cmp($p$$Register, $q$$constant);
9725     __ mov($dst$$Register, 0);
9726     __ mvn($dst$$Register, 0, lt);
9727   %}
9728 #endif
9729   ins_pipe(ialu_reg_reg_ialu);
9730 %}
9731 
9732 #ifdef AARCH64
9733 instruct cadd_cmpLTMask3( iRegI dst, iRegI p, iRegI q, iRegI y, iRegI x, flagsReg ccr ) %{
9734   match(Set dst (AddI (AndI (CmpLTMask p q) y) x));
9735   effect( TEMP dst, KILL ccr );
9736   size(12);
9737   ins_cost(DEFAULT_COST*3);
9738   format %{ "CMP_w  $p,$q\n\t"
9739             "ADD_w  $dst,$y,$x\n\t"
9740             "CSEL_w $dst,$dst,$x,lt" %}
9741   ins_encode %{
9742     __ cmp_w($p$$Register, $q$$Register);
9743     __ add_w($dst$$Register, $y$$Register, $x$$Register);
9744     __ csel_w($dst$$Register, $dst$$Register, $x$$Register, lt);
9745   %}
9746   ins_pipe( cadd_cmpltmask );
9747 %}
9748 #else
9749 instruct cadd_cmpLTMask3( iRegI p, iRegI q, iRegI y, iRegI z, flagsReg ccr ) %{
9750   match(Set z (AddI (AndI (CmpLTMask p q) y) z));
9751   effect( KILL ccr );
9752   ins_cost(DEFAULT_COST*2);
9753   format %{ "CMP    $p,$q\n\t"
9754             "ADD.lt $z,$y,$z" %}
9755   ins_encode %{
9756     __ cmp($p$$Register, $q$$Register);
9757     __ add($z$$Register, $y$$Register, $z$$Register, lt);
9758   %}
9759   ins_pipe( cadd_cmpltmask );
9760 %}
9761 #endif
9762 
9763 #ifdef AARCH64
9764 instruct cadd_cmpLTMask4( iRegI dst, iRegI p, aimmI q, iRegI y, iRegI x, flagsReg ccr ) %{
9765   match(Set dst (AddI (AndI (CmpLTMask p q) y) x));
9766   effect( TEMP dst, KILL ccr );
9767   size(12);
9768   ins_cost(DEFAULT_COST*3);
9769   format %{ "CMP_w  $p,$q\n\t"
9770             "ADD_w  $dst,$y,$x\n\t"
9771             "CSEL_w $dst,$dst,$x,lt" %}
9772   ins_encode %{
9773     __ cmp_w($p$$Register, $q$$constant);
9774     __ add_w($dst$$Register, $y$$Register, $x$$Register);
9775     __ csel_w($dst$$Register, $dst$$Register, $x$$Register, lt);
9776   %}
9777   ins_pipe( cadd_cmpltmask );
9778 %}
9779 #else
9780 // FIXME: remove unused "dst"
9781 instruct cadd_cmpLTMask4( iRegI dst, iRegI p, aimmI q, iRegI y, iRegI z, flagsReg ccr ) %{
9782   match(Set z (AddI (AndI (CmpLTMask p q) y) z));
9783   effect( KILL ccr );
9784   ins_cost(DEFAULT_COST*2);
9785   format %{ "CMP    $p,$q\n\t"
9786             "ADD.lt $z,$y,$z" %}
9787   ins_encode %{
9788     __ cmp($p$$Register, $q$$constant);
9789     __ add($z$$Register, $y$$Register, $z$$Register, lt);
9790   %}
9791   ins_pipe( cadd_cmpltmask );
9792 %}
9793 #endif // !AARCH64
9794 
9795 #ifdef AARCH64
9796 instruct cadd_cmpLTMask( iRegI dst, iRegI p, iRegI q, iRegI y, flagsReg ccr ) %{
9797   match(Set dst (AddI (AndI (CmpLTMask p q) y) (SubI p q)));
9798   effect( TEMP dst, KILL ccr );
9799   size(12);
9800   ins_cost(DEFAULT_COST*3);
9801   format %{ "SUBS_w $p,$p,$q\n\t"
9802             "ADD_w  $dst,$y,$p\n\t"
9803             "CSEL_w $dst,$dst,$p,lt" %}
9804   ins_encode %{
9805     __ subs_w($p$$Register, $p$$Register, $q$$Register);
9806     __ add_w($dst$$Register, $y$$Register, $p$$Register);
9807     __ csel_w($dst$$Register, $dst$$Register, $p$$Register, lt);
9808   %}
9809   ins_pipe( cadd_cmpltmask ); // FIXME
9810 %}
9811 #else
9812 instruct cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, flagsReg ccr ) %{
9813   match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q)));
9814   effect( KILL ccr );
9815   ins_cost(DEFAULT_COST*2);
9816   format %{ "SUBS   $p,$p,$q\n\t"
9817             "ADD.lt $p,$y,$p" %}
9818   ins_encode %{
9819     __ subs($p$$Register, $p$$Register, $q$$Register);
9820     __ add($p$$Register, $y$$Register, $p$$Register, lt);
9821   %}
9822   ins_pipe( cadd_cmpltmask );
9823 %}
9824 #endif
9825 
9826 //----------Arithmetic Conversion Instructions---------------------------------
9827 // The conversions operations are all Alpha sorted.  Please keep it that way!
9828 
9829 instruct convD2F_reg(regF dst, regD src) %{
9830   match(Set dst (ConvD2F src));
9831   size(4);
9832   format %{ "FCVTSD  $dst,$src" %}
9833   ins_encode %{
9834     __ convert_d2f($dst$$FloatRegister, $src$$FloatRegister);
9835   %}
9836   ins_pipe(fcvtD2F);
9837 %}
9838 
9839 // Convert a double to an int in a float register.
9840 // If the double is a NAN, stuff a zero in instead.
9841 
9842 #ifdef AARCH64
9843 instruct convD2I_reg_reg(iRegI dst, regD src) %{
9844   match(Set dst (ConvD2I src));
9845   ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); // FIXME
9846   format %{ "FCVTZS_wd $dst, $src" %}
9847   ins_encode %{
9848     __ fcvtzs_wd($dst$$Register, $src$$FloatRegister);
9849   %}
9850   ins_pipe(fcvtD2I);
9851 %}
9852 
9853 instruct convD2L_reg_reg(iRegL dst, regD src) %{
9854   match(Set dst (ConvD2L src));
9855   ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); // FIXME
9856   format %{ "FCVTZS_xd $dst, $src" %}
9857   ins_encode %{
9858     __ fcvtzs_xd($dst$$Register, $src$$FloatRegister);
9859   %}
9860   ins_pipe(fcvtD2L);
9861 %}
9862 #else
9863 instruct convD2I_reg_reg(iRegI dst, regD src, regF tmp) %{
9864   match(Set dst (ConvD2I src));
9865   effect( TEMP tmp );
9866   ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); // FIXME
9867   format %{ "FTOSIZD  $tmp,$src\n\t"
9868             "FMRS     $dst, $tmp" %}
9869   ins_encode %{
9870     __ ftosizd($tmp$$FloatRegister, $src$$FloatRegister);
9871     __ fmrs($dst$$Register, $tmp$$FloatRegister);
9872   %}
9873   ins_pipe(fcvtD2I);
9874 %}
9875 #endif
9876 
9877 // Convert a double to a long in a double register.
9878 // If the double is a NAN, stuff a zero in instead.
9879 
9880 #ifndef AARCH64
9881 // Double to Long conversion
9882 instruct convD2L_reg(R0R1RegL dst, regD src) %{
9883   match(Set dst (ConvD2L src));
9884   effect(CALL);
9885   ins_cost(MEMORY_REF_COST); // FIXME
9886   format %{ "convD2L    $dst,$src\t ! call to SharedRuntime::d2l" %}
9887   ins_encode %{
9888 #ifndef __ABI_HARD__
9889     __ fmrrd($dst$$Register, $dst$$Register->successor(), $src$$FloatRegister);
9890 #else
9891     if ($src$$FloatRegister != D0) {
9892       __ mov_double(D0, $src$$FloatRegister);
9893     }
9894 #endif
9895     address target = CAST_FROM_FN_PTR(address, SharedRuntime::d2l);
9896     __ call(target, relocInfo::runtime_call_type);
9897   %}
9898   ins_pipe(fcvtD2L);
9899 %}
9900 #endif
9901 
9902 instruct convF2D_reg(regD dst, regF src) %{
9903   match(Set dst (ConvF2D src));
9904   size(4);
9905   format %{ "FCVTDS  $dst,$src" %}
9906   ins_encode %{
9907     __ convert_f2d($dst$$FloatRegister, $src$$FloatRegister);
9908   %}
9909   ins_pipe(fcvtF2D);
9910 %}
9911 
9912 #ifdef AARCH64
9913 instruct convF2I_reg_reg(iRegI dst, regF src) %{
9914   match(Set dst (ConvF2I src));
9915   ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); // FIXME
9916   size(4);
9917   format %{ "FCVTZS_ws $dst, $src" %}
9918   ins_encode %{
9919     __ fcvtzs_ws($dst$$Register, $src$$FloatRegister);
9920   %}
9921   ins_pipe(fcvtF2I);
9922 %}
9923 
9924 instruct convF2L_reg_reg(iRegL dst, regF src) %{
9925   match(Set dst (ConvF2L src));
9926   ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); // FIXME
9927   size(4);
9928   format %{ "FCVTZS_xs $dst, $src" %}
9929   ins_encode %{
9930     __ fcvtzs_xs($dst$$Register, $src$$FloatRegister);
9931   %}
9932   ins_pipe(fcvtF2L);
9933 %}
9934 #else
9935 instruct convF2I_reg_reg(iRegI dst, regF src, regF tmp) %{
9936   match(Set dst (ConvF2I src));
9937   effect( TEMP tmp );
9938   ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); // FIXME
9939   size(8);
9940   format %{ "FTOSIZS  $tmp,$src\n\t"
9941             "FMRS     $dst, $tmp" %}
9942   ins_encode %{
9943     __ ftosizs($tmp$$FloatRegister, $src$$FloatRegister);
9944     __ fmrs($dst$$Register, $tmp$$FloatRegister);
9945   %}
9946   ins_pipe(fcvtF2I);
9947 %}
9948 
9949 // Float to Long conversion
9950 instruct convF2L_reg(R0R1RegL dst, regF src, R0RegI arg1) %{
9951   match(Set dst (ConvF2L src));
9952   ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); // FIXME
9953   effect(CALL);
9954   format %{ "convF2L  $dst,$src\t! call to SharedRuntime::f2l" %}
9955   ins_encode %{
9956 #ifndef __ABI_HARD__
9957     __ fmrs($arg1$$Register, $src$$FloatRegister);
9958 #else
9959     if($src$$FloatRegister != S0) {
9960       __ mov_float(S0, $src$$FloatRegister);
9961     }
9962 #endif
9963     address target = CAST_FROM_FN_PTR(address, SharedRuntime::f2l);
9964     __ call(target, relocInfo::runtime_call_type);
9965   %}
9966   ins_pipe(fcvtF2L);
9967 %}
9968 #endif
9969 
9970 #ifdef AARCH64
9971 instruct convI2D_reg_reg(iRegI src, regD dst) %{
9972   match(Set dst (ConvI2D src));
9973   ins_cost(DEFAULT_COST + MEMORY_REF_COST); // FIXME
9974   size(4);
9975   format %{ "SCVTF_dw $dst,$src" %}
9976   ins_encode %{
9977       __ scvtf_dw($dst$$FloatRegister, $src$$Register);
9978   %}
9979   ins_pipe(fcvtI2D);
9980 %}
9981 #else
9982 instruct convI2D_reg_reg(iRegI src, regD_low dst) %{
9983   match(Set dst (ConvI2D src));
9984   ins_cost(DEFAULT_COST + MEMORY_REF_COST); // FIXME
9985   size(8);
9986   format %{ "FMSR     $dst,$src \n\t"
9987             "FSITOD   $dst $dst"%}
9988   ins_encode %{
9989       __ fmsr($dst$$FloatRegister, $src$$Register);
9990       __ fsitod($dst$$FloatRegister, $dst$$FloatRegister);
9991   %}
9992   ins_pipe(fcvtI2D);
9993 %}
9994 #endif
9995 
9996 instruct convI2F_reg_reg( regF dst, iRegI src ) %{
9997   match(Set dst (ConvI2F src));
9998   ins_cost(DEFAULT_COST + MEMORY_REF_COST); // FIXME
9999 #ifdef AARCH64
10000   size(4);
10001   format %{ "SCVTF_sw $dst,$src" %}
10002   ins_encode %{
10003       __ scvtf_sw($dst$$FloatRegister, $src$$Register);
10004   %}
10005 #else
10006   size(8);
10007   format %{ "FMSR     $dst,$src \n\t"
10008             "FSITOS   $dst, $dst"%}
10009   ins_encode %{
10010       __ fmsr($dst$$FloatRegister, $src$$Register);
10011       __ fsitos($dst$$FloatRegister, $dst$$FloatRegister);
10012   %}
10013 #endif
10014   ins_pipe(fcvtI2F);
10015 %}
10016 
10017 instruct convI2L_reg(iRegL dst, iRegI src) %{
10018   match(Set dst (ConvI2L src));
10019 #ifdef AARCH64
10020   size(4);
10021   format %{ "SXTW   $dst,$src\t! int->long" %}
10022   ins_encode %{
10023     __ sxtw($dst$$Register, $src$$Register);
10024   %}
10025 #else
10026   size(8);
10027   format %{ "MOV    $dst.lo, $src \n\t"
10028             "ASR    $dst.hi,$src,31\t! int->long" %}
10029   ins_encode %{
10030     __ mov($dst$$Register, $src$$Register);
10031     __ mov($dst$$Register->successor(), AsmOperand($src$$Register, asr, 31));
10032   %}
10033 #endif
10034   ins_pipe(ialu_reg_reg);
10035 %}
10036 
10037 // Zero-extend convert int to long
10038 instruct convI2L_reg_zex(iRegL dst, iRegI src, immL_32bits mask ) %{
10039   match(Set dst (AndL (ConvI2L src) mask) );
10040 #ifdef AARCH64
10041   size(4);
10042   format %{ "mov_w  $dst,$src\t! zero-extend int to long"  %}
10043   ins_encode %{
10044     __ mov_w($dst$$Register, $src$$Register);
10045   %}
10046 #else
10047   size(8);
10048   format %{ "MOV    $dst.lo,$src.lo\t! zero-extend int to long\n\t"
10049             "MOV    $dst.hi, 0"%}
10050   ins_encode %{
10051     __ mov($dst$$Register, $src$$Register);
10052     __ mov($dst$$Register->successor(), 0);
10053   %}
10054 #endif
10055   ins_pipe(ialu_reg_reg);
10056 %}
10057 
10058 // Zero-extend long
10059 instruct zerox_long(iRegL dst, iRegL src, immL_32bits mask ) %{
10060   match(Set dst (AndL src mask) );
10061 #ifdef AARCH64
10062   size(4);
10063   format %{ "mov_w  $dst,$src\t! zero-extend long"  %}
10064   ins_encode %{
10065     __ mov_w($dst$$Register, $src$$Register);
10066   %}
10067 #else
10068   size(8);
10069   format %{ "MOV    $dst.lo,$src.lo\t! zero-extend long\n\t"
10070             "MOV    $dst.hi, 0"%}
10071   ins_encode %{
10072     __ mov($dst$$Register, $src$$Register);
10073     __ mov($dst$$Register->successor(), 0);
10074   %}
10075 #endif
10076   ins_pipe(ialu_reg_reg);
10077 %}
10078 
10079 instruct MoveF2I_reg_reg(iRegI dst, regF src) %{
10080   match(Set dst (MoveF2I src));
10081   effect(DEF dst, USE src);
10082   ins_cost(MEMORY_REF_COST); // FIXME
10083 
10084   size(4);
10085   format %{ "FMRS   $dst,$src\t! MoveF2I" %}
10086   ins_encode %{
10087     __ fmrs($dst$$Register, $src$$FloatRegister);
10088   %}
10089   ins_pipe(iload_mem); // FIXME
10090 %}
10091 
10092 instruct MoveI2F_reg_reg(regF dst, iRegI src) %{
10093   match(Set dst (MoveI2F src));
10094   ins_cost(MEMORY_REF_COST); // FIXME
10095 
10096   size(4);
10097   format %{ "FMSR   $dst,$src\t! MoveI2F" %}
10098   ins_encode %{
10099     __ fmsr($dst$$FloatRegister, $src$$Register);
10100   %}
10101   ins_pipe(iload_mem); // FIXME
10102 %}
10103 
10104 instruct MoveD2L_reg_reg(iRegL dst, regD src) %{
10105   match(Set dst (MoveD2L src));
10106   effect(DEF dst, USE src);
10107   ins_cost(MEMORY_REF_COST); // FIXME
10108 
10109   size(4);
10110 #ifdef AARCH64
10111   format %{ "FMOV_xd  $dst,$src\t! MoveD2L" %}
10112   ins_encode %{
10113     __ fmov_xd($dst$$Register, $src$$FloatRegister);
10114   %}
10115 #else
10116   format %{ "FMRRD    $dst,$src\t! MoveD2L" %}
10117   ins_encode %{
10118     __ fmrrd($dst$$Register, $dst$$Register->successor(), $src$$FloatRegister);
10119   %}
10120 #endif
10121   ins_pipe(iload_mem); // FIXME
10122 %}
10123 
10124 instruct MoveL2D_reg_reg(regD dst, iRegL src) %{
10125   match(Set dst (MoveL2D src));
10126   effect(DEF dst, USE src);
10127   ins_cost(MEMORY_REF_COST); // FIXME
10128 
10129   size(4);
10130 #ifdef AARCH64
10131   format %{ "FMOV_dx $dst,$src\t! MoveL2D" %}
10132   ins_encode %{
10133     __ fmov_dx($dst$$FloatRegister, $src$$Register);
10134   %}
10135 #else
10136   format %{ "FMDRR   $dst,$src\t! MoveL2D" %}
10137   ins_encode %{
10138     __ fmdrr($dst$$FloatRegister, $src$$Register, $src$$Register->successor());
10139   %}
10140 #endif
10141   ins_pipe(ialu_reg_reg); // FIXME
10142 %}
10143 
10144 //-----------
10145 // Long to Double conversion
10146 
10147 #ifdef AARCH64
10148 instruct convL2D(regD dst, iRegL src) %{
10149   match(Set dst (ConvL2D src));
10150   ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); // FIXME
10151   size(4);
10152   format %{ "SCVTF_dx $dst, $src" %}
10153   ins_encode %{
10154     __ scvtf_dx($dst$$FloatRegister, $src$$Register);
10155   %}
10156   ins_pipe(fcvtL2D);
10157 %}
10158 
10159 instruct convL2F(regF dst, iRegL src) %{
10160   match(Set dst (ConvL2F src));
10161   ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); // FIXME
10162   size(4);
10163   format %{ "SCVTF_sx $dst, $src" %}
10164   ins_encode %{
10165     __ scvtf_sx($dst$$FloatRegister, $src$$Register);
10166   %}
10167   ins_pipe(fcvtL2F);
10168 %}
10169 #else
10170 // Magic constant, 0x43300000
10171 instruct loadConI_x43300000(iRegI dst) %{
10172   effect(DEF dst);
10173   size(8);
10174   format %{ "MOV_SLOW  $dst,0x43300000\t! 2^52" %}
10175   ins_encode %{
10176     __ mov_slow($dst$$Register, 0x43300000);
10177   %}
10178   ins_pipe(ialu_none);
10179 %}
10180 
10181 // Magic constant, 0x41f00000
10182 instruct loadConI_x41f00000(iRegI dst) %{
10183   effect(DEF dst);
10184   size(8);
10185   format %{ "MOV_SLOW  $dst, 0x41f00000\t! 2^32" %}
10186   ins_encode %{
10187     __ mov_slow($dst$$Register, 0x41f00000);
10188   %}
10189   ins_pipe(ialu_none);
10190 %}
10191 
10192 instruct loadConI_x0(iRegI dst) %{
10193   effect(DEF dst);
10194   size(4);
10195   format %{ "MOV  $dst, 0x0\t! 0" %}
10196   ins_encode %{
10197     __ mov($dst$$Register, 0);
10198   %}
10199   ins_pipe(ialu_none);
10200 %}
10201 
10202 // Construct a double from two float halves
10203 instruct regDHi_regDLo_to_regD(regD_low dst, regD_low src1, regD_low src2) %{
10204   effect(DEF dst, USE src1, USE src2);
10205   size(8);
10206   format %{ "FCPYS  $dst.hi,$src1.hi\n\t"
10207             "FCPYS  $dst.lo,$src2.lo" %}
10208   ins_encode %{
10209     __ fcpys($dst$$FloatRegister->successor(), $src1$$FloatRegister->successor());
10210     __ fcpys($dst$$FloatRegister, $src2$$FloatRegister);
10211   %}
10212   ins_pipe(faddD_reg_reg);
10213 %}
10214 
10215 #ifndef AARCH64
10216 // Convert integer in high half of a double register (in the lower half of
10217 // the double register file) to double
10218 instruct convI2D_regDHi_regD(regD dst, regD_low src) %{
10219   effect(DEF dst, USE src);
10220   size(4);
10221   format %{ "FSITOD  $dst,$src" %}
10222   ins_encode %{
10223     __ fsitod($dst$$FloatRegister, $src$$FloatRegister->successor());
10224   %}
10225   ins_pipe(fcvtLHi2D);
10226 %}
10227 #endif
10228 
10229 // Add float double precision
10230 instruct addD_regD_regD(regD dst, regD src1, regD src2) %{
10231   effect(DEF dst, USE src1, USE src2);
10232   size(4);
10233   format %{ "FADDD  $dst,$src1,$src2" %}
10234   ins_encode %{
10235     __ add_double($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
10236   %}
10237   ins_pipe(faddD_reg_reg);
10238 %}
10239 
10240 // Sub float double precision
10241 instruct subD_regD_regD(regD dst, regD src1, regD src2) %{
10242   effect(DEF dst, USE src1, USE src2);
10243   size(4);
10244   format %{ "FSUBD  $dst,$src1,$src2" %}
10245   ins_encode %{
10246     __ sub_double($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
10247   %}
10248   ins_pipe(faddD_reg_reg);
10249 %}
10250 
10251 // Mul float double precision
10252 instruct mulD_regD_regD(regD dst, regD src1, regD src2) %{
10253   effect(DEF dst, USE src1, USE src2);
10254   size(4);
10255   format %{ "FMULD  $dst,$src1,$src2" %}
10256   ins_encode %{
10257     __ mul_double($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
10258   %}
10259   ins_pipe(fmulD_reg_reg);
10260 %}
10261 
10262 instruct regL_to_regD(regD dst, iRegL src) %{
10263   // No match rule to avoid chain rule match.
10264   effect(DEF dst, USE src);
10265   ins_cost(MEMORY_REF_COST);
10266   size(4);
10267   format %{ "FMDRR   $dst,$src\t! regL to regD" %}
10268   ins_encode %{
10269     __ fmdrr($dst$$FloatRegister, $src$$Register, $src$$Register->successor());
10270   %}
10271   ins_pipe(ialu_reg_reg); // FIXME
10272 %}
10273 
10274 instruct regI_regI_to_regD(regD dst, iRegI src1, iRegI src2) %{
10275   // No match rule to avoid chain rule match.
10276   effect(DEF dst, USE src1, USE src2);
10277   ins_cost(MEMORY_REF_COST);
10278   size(4);
10279   format %{ "FMDRR   $dst,$src1,$src2\t! regI,regI to regD" %}
10280   ins_encode %{
10281     __ fmdrr($dst$$FloatRegister, $src1$$Register, $src2$$Register);
10282   %}
10283   ins_pipe(ialu_reg_reg); // FIXME
10284 %}
10285 
10286 instruct convL2D_reg_slow_fxtof(regD dst, iRegL src) %{
10287   match(Set dst (ConvL2D src));
10288   ins_cost(DEFAULT_COST*8 + MEMORY_REF_COST*6); // FIXME
10289 
10290   expand %{
10291     regD_low   tmpsrc;
10292     iRegI      ix43300000;
10293     iRegI      ix41f00000;
10294     iRegI      ix0;
10295     regD_low   dx43300000;
10296     regD       dx41f00000;
10297     regD       tmp1;
10298     regD_low   tmp2;
10299     regD       tmp3;
10300     regD       tmp4;
10301 
10302     regL_to_regD(tmpsrc, src);
10303 
10304     loadConI_x43300000(ix43300000);
10305     loadConI_x41f00000(ix41f00000);
10306     loadConI_x0(ix0);
10307 
10308     regI_regI_to_regD(dx43300000, ix0, ix43300000);
10309     regI_regI_to_regD(dx41f00000, ix0, ix41f00000);
10310 
10311     convI2D_regDHi_regD(tmp1, tmpsrc);
10312     regDHi_regDLo_to_regD(tmp2, dx43300000, tmpsrc);
10313     subD_regD_regD(tmp3, tmp2, dx43300000);
10314     mulD_regD_regD(tmp4, tmp1, dx41f00000);
10315     addD_regD_regD(dst, tmp3, tmp4);
10316   %}
10317 %}
10318 #endif // !AARCH64
10319 
10320 instruct convL2I_reg(iRegI dst, iRegL src) %{
10321   match(Set dst (ConvL2I src));
10322   size(4);
10323 #ifdef AARCH64
10324   format %{ "MOV_w  $dst,$src\t! long->int" %}
10325   ins_encode %{
10326     __ mov_w($dst$$Register, $src$$Register);
10327   %}
10328 #else
10329   format %{ "MOV    $dst,$src.lo\t! long->int" %}
10330   ins_encode %{
10331     __ mov($dst$$Register, $src$$Register);
10332   %}
10333 #endif
10334   ins_pipe(ialu_move_reg_I_to_L);
10335 %}
10336 
10337 #ifndef AARCH64
10338 // Register Shift Right Immediate
10339 instruct shrL_reg_imm6_L2I(iRegI dst, iRegL src, immI_32_63 cnt) %{
10340   match(Set dst (ConvL2I (RShiftL src cnt)));
10341   size(4);
10342   format %{ "ASR    $dst,$src.hi,($cnt - 32)\t! long->int or mov if $cnt==32" %}
10343   ins_encode %{
10344     if ($cnt$$constant == 32) {
10345       __ mov($dst$$Register, $src$$Register->successor());
10346     } else {
10347       __ mov($dst$$Register, AsmOperand($src$$Register->successor(), asr, $cnt$$constant - 32));
10348     }
10349   %}
10350   ins_pipe(ialu_reg_imm);
10351 %}
10352 #endif
10353 
10354 
10355 //----------Control Flow Instructions------------------------------------------
10356 // Compare Instructions
10357 // Compare Integers
10358 instruct compI_iReg(flagsReg icc, iRegI op1, iRegI op2) %{
10359   match(Set icc (CmpI op1 op2));
10360   effect( DEF icc, USE op1, USE op2 );
10361 
10362   size(4);
10363   format %{ "cmp_32 $op1,$op2\t! int" %}
10364   ins_encode %{
10365     __ cmp_32($op1$$Register, $op2$$Register);
10366   %}
10367   ins_pipe(ialu_cconly_reg_reg);
10368 %}
10369 
10370 #ifdef _LP64
10371 // Compare compressed pointers
10372 instruct compN_reg2(flagsRegU icc, iRegN op1, iRegN op2) %{
10373   match(Set icc (CmpN op1 op2));
10374   effect( DEF icc, USE op1, USE op2 );
10375 
10376   size(4);
10377   format %{ "cmp_32 $op1,$op2\t! int" %}
10378   ins_encode %{
10379     __ cmp_32($op1$$Register, $op2$$Register);
10380   %}
10381   ins_pipe(ialu_cconly_reg_reg);
10382 %}
10383 #endif
10384 
10385 instruct compU_iReg(flagsRegU icc, iRegI op1, iRegI op2) %{
10386   match(Set icc (CmpU op1 op2));
10387 
10388   size(4);
10389   format %{ "cmp_32 $op1,$op2\t! unsigned int" %}
10390   ins_encode %{
10391     __ cmp_32($op1$$Register, $op2$$Register);
10392   %}
10393   ins_pipe(ialu_cconly_reg_reg);
10394 %}
10395 
10396 instruct compI_iReg_immneg(flagsReg icc, iRegI op1, aimmIneg op2) %{
10397   match(Set icc (CmpI op1 op2));
10398   effect( DEF icc, USE op1 );
10399 
10400   size(4);
10401   format %{ "cmn_32 $op1,-$op2\t! int" %}
10402   ins_encode %{
10403     __ cmn_32($op1$$Register, -$op2$$constant);
10404   %}
10405   ins_pipe(ialu_cconly_reg_imm);
10406 %}
10407 
10408 instruct compI_iReg_imm(flagsReg icc, iRegI op1, aimmI op2) %{
10409   match(Set icc (CmpI op1 op2));
10410   effect( DEF icc, USE op1 );
10411 
10412   size(4);
10413   format %{ "cmp_32 $op1,$op2\t! int" %}
10414   ins_encode %{
10415     __ cmp_32($op1$$Register, $op2$$constant);
10416   %}
10417   ins_pipe(ialu_cconly_reg_imm);
10418 %}
10419 
10420 instruct testI_reg_reg( flagsReg_EQNELTGE icc, iRegI op1, iRegI op2, immI0 zero ) %{
10421   match(Set icc (CmpI (AndI op1 op2) zero));
10422   size(4);
10423   format %{ "tst_32 $op2,$op1" %}
10424 
10425   ins_encode %{
10426     __ tst_32($op1$$Register, $op2$$Register);
10427   %}
10428   ins_pipe(ialu_cconly_reg_reg_zero);
10429 %}
10430 
10431 #ifndef AARCH64
10432 instruct testshlI_reg_reg_reg( flagsReg_EQNELTGE icc, iRegI op1, iRegI op2, iRegI op3, immI0 zero ) %{
10433   match(Set icc (CmpI (AndI op1 (LShiftI op2 op3)) zero));
10434   size(4);
10435   format %{ "TST   $op2,$op1<<$op3" %}
10436 
10437   ins_encode %{
10438     __ tst($op1$$Register, AsmOperand($op2$$Register, lsl, $op3$$Register));
10439   %}
10440   ins_pipe(ialu_cconly_reg_reg_zero);
10441 %}
10442 #endif
10443 
10444 instruct testshlI_reg_reg_imm( flagsReg_EQNELTGE icc, iRegI op1, iRegI op2, immU5 op3, immI0 zero ) %{
10445   match(Set icc (CmpI (AndI op1 (LShiftI op2 op3)) zero));
10446   size(4);
10447   format %{ "tst_32 $op2,$op1<<$op3" %}
10448 
10449   ins_encode %{
10450     __ tst_32($op1$$Register, AsmOperand($op2$$Register, lsl, $op3$$constant));
10451   %}
10452   ins_pipe(ialu_cconly_reg_reg_zero);
10453 %}
10454 
10455 #ifndef AARCH64
10456 instruct testsarI_reg_reg_reg( flagsReg_EQNELTGE icc, iRegI op1, iRegI op2, iRegI op3, immI0 zero ) %{
10457   match(Set icc (CmpI (AndI op1 (RShiftI op2 op3)) zero));
10458   size(4);
10459   format %{ "TST   $op2,$op1<<$op3" %}
10460 
10461   ins_encode %{
10462     __ tst($op1$$Register, AsmOperand($op2$$Register, asr, $op3$$Register));
10463   %}
10464   ins_pipe(ialu_cconly_reg_reg_zero);
10465 %}
10466 #endif
10467 
10468 instruct testsarI_reg_reg_imm( flagsReg_EQNELTGE icc, iRegI op1, iRegI op2, immU5 op3, immI0 zero ) %{
10469   match(Set icc (CmpI (AndI op1 (RShiftI op2 op3)) zero));
10470   size(4);
10471   format %{ "tst_32 $op2,$op1<<$op3" %}
10472 
10473   ins_encode %{
10474     __ tst_32($op1$$Register, AsmOperand($op2$$Register, asr, $op3$$constant));
10475   %}
10476   ins_pipe(ialu_cconly_reg_reg_zero);
10477 %}
10478 
10479 #ifndef AARCH64
10480 instruct testshrI_reg_reg_reg( flagsReg_EQNELTGE icc, iRegI op1, iRegI op2, iRegI op3, immI0 zero ) %{
10481   match(Set icc (CmpI (AndI op1 (URShiftI op2 op3)) zero));
10482   size(4);
10483   format %{ "TST   $op2,$op1<<$op3" %}
10484 
10485   ins_encode %{
10486     __ tst($op1$$Register, AsmOperand($op2$$Register, lsr, $op3$$Register));
10487   %}
10488   ins_pipe(ialu_cconly_reg_reg_zero);
10489 %}
10490 #endif
10491 
10492 instruct testshrI_reg_reg_imm( flagsReg_EQNELTGE icc, iRegI op1, iRegI op2, immU5 op3, immI0 zero ) %{
10493   match(Set icc (CmpI (AndI op1 (URShiftI op2 op3)) zero));
10494   size(4);
10495   format %{ "tst_32 $op2,$op1<<$op3" %}
10496 
10497   ins_encode %{
10498     __ tst_32($op1$$Register, AsmOperand($op2$$Register, lsr, $op3$$constant));
10499   %}
10500   ins_pipe(ialu_cconly_reg_reg_zero);
10501 %}
10502 
10503 instruct testI_reg_imm( flagsReg_EQNELTGE icc, iRegI op1, limmI op2, immI0 zero ) %{
10504   match(Set icc (CmpI (AndI op1 op2) zero));
10505   size(4);
10506   format %{ "tst_32 $op2,$op1" %}
10507 
10508   ins_encode %{
10509     __ tst_32($op1$$Register, $op2$$constant);
10510   %}
10511   ins_pipe(ialu_cconly_reg_imm_zero);
10512 %}
10513 
10514 #ifdef AARCH64
10515 instruct compL_reg_reg(flagsReg xcc, iRegL op1, iRegL op2)
10516 %{
10517   match(Set xcc (CmpL op1 op2));
10518   effect( DEF xcc, USE op1, USE op2 );
10519 
10520   size(4);
10521   format %{ "CMP     $op1,$op2\t! long" %}
10522   ins_encode %{
10523     __ cmp($op1$$Register, $op2$$Register);
10524   %}
10525   ins_pipe(ialu_cconly_reg_reg);
10526 %}
10527 
10528 instruct compUL_iReg(flagsRegU xcc, iRegL op1, iRegL op2) %{
10529   match(Set xcc (CmpUL op1 op2));
10530 
10531   size(4);
10532   format %{ "CMP     $op1,$op2\t! unsigned long" %}
10533   ins_encode %{
10534     __ cmp($op1$$Register, $op2$$Register);
10535   %}
10536   ins_pipe(ialu_cconly_reg_reg);
10537 %}
10538 #else
10539 instruct compL_reg_reg_LTGE(flagsRegL_LTGE xcc, iRegL op1, iRegL op2, iRegL tmp) %{
10540   match(Set xcc (CmpL op1 op2));
10541   effect( DEF xcc, USE op1, USE op2, TEMP tmp );
10542 
10543   size(8);
10544   format %{ "SUBS    $tmp,$op1.low,$op2.low\t\t! long\n\t"
10545             "SBCS    $tmp,$op1.hi,$op2.hi" %}
10546   ins_encode %{
10547     __ subs($tmp$$Register, $op1$$Register, $op2$$Register);
10548     __ sbcs($tmp$$Register->successor(), $op1$$Register->successor(), $op2$$Register->successor());
10549   %}
10550   ins_pipe(ialu_cconly_reg_reg);
10551 %}
10552 
10553 instruct compUL_reg_reg_LTGE(flagsRegUL_LTGE xcc, iRegL op1, iRegL op2, iRegL tmp) %{
10554   match(Set xcc (CmpUL op1 op2));
10555   effect(DEF xcc, USE op1, USE op2, TEMP tmp);
10556 
10557   size(8);
10558   format %{ "SUBS    $tmp,$op1.low,$op2.low\t\t! unsigned long\n\t"
10559             "SBCS    $tmp,$op1.hi,$op2.hi" %}
10560   ins_encode %{
10561     __ subs($tmp$$Register, $op1$$Register, $op2$$Register);
10562     __ sbcs($tmp$$Register->successor(), $op1$$Register->successor(), $op2$$Register->successor());
10563   %}
10564   ins_pipe(ialu_cconly_reg_reg);
10565 %}
10566 #endif
10567 
10568 #ifdef AARCH64
10569 instruct compL_reg_con(flagsReg xcc, iRegL op1, aimmL con) %{
10570   match(Set xcc (CmpL op1 con));
10571   effect( DEF xcc, USE op1, USE con );
10572 
10573   size(8);
10574   format %{ "CMP     $op1,$con\t\t! long"  %}
10575   ins_encode %{
10576     __ cmp($op1$$Register, $con$$constant);
10577   %}
10578 
10579   ins_pipe(ialu_cconly_reg_imm);
10580 %}
10581 
10582 instruct compUL_reg_con(flagsRegU xcc, iRegL op1, aimmL con) %{
10583   match(Set xcc (CmpUL op1 con));
10584   effect(DEF xcc, USE op1, USE con);
10585 
10586   size(8);
10587   format %{ "CMP     $op1,$con\t\t! unsigned long"  %}
10588   ins_encode %{
10589     __ cmp($op1$$Register, $con$$constant);
10590   %}
10591 
10592   ins_pipe(ialu_cconly_reg_imm);
10593 %}
10594 #else
10595 instruct compL_reg_reg_EQNE(flagsRegL_EQNE xcc, iRegL op1, iRegL op2) %{
10596   match(Set xcc (CmpL op1 op2));
10597   effect( DEF xcc, USE op1, USE op2 );
10598 
10599   size(8);
10600   format %{ "TEQ    $op1.hi,$op2.hi\t\t! long\n\t"
10601             "TEQ.eq $op1.lo,$op2.lo" %}
10602   ins_encode %{
10603     __ teq($op1$$Register->successor(), $op2$$Register->successor());
10604     __ teq($op1$$Register, $op2$$Register, eq);
10605   %}
10606   ins_pipe(ialu_cconly_reg_reg);
10607 %}
10608 
10609 instruct compL_reg_reg_LEGT(flagsRegL_LEGT xcc, iRegL op1, iRegL op2, iRegL tmp) %{
10610   match(Set xcc (CmpL op1 op2));
10611   effect( DEF xcc, USE op1, USE op2, TEMP tmp );
10612 
10613   size(8);
10614   format %{ "SUBS    $tmp,$op2.low,$op1.low\t\t! long\n\t"
10615             "SBCS    $tmp,$op2.hi,$op1.hi" %}
10616   ins_encode %{
10617     __ subs($tmp$$Register, $op2$$Register, $op1$$Register);
10618     __ sbcs($tmp$$Register->successor(), $op2$$Register->successor(), $op1$$Register->successor());
10619   %}
10620   ins_pipe(ialu_cconly_reg_reg);
10621 %}
10622 
10623 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
10624 // (hi($con$$constant), lo($con$$constant)) becomes
10625 instruct compL_reg_con_LTGE(flagsRegL_LTGE xcc, iRegL op1, immLlowRot con, iRegL tmp) %{
10626   match(Set xcc (CmpL op1 con));
10627   effect( DEF xcc, USE op1, USE con, TEMP tmp );
10628 
10629   size(8);
10630   format %{ "SUBS    $tmp,$op1.low,$con\t\t! long\n\t"
10631             "SBCS    $tmp,$op1.hi,0" %}
10632   ins_encode %{
10633     __ subs($tmp$$Register, $op1$$Register, $con$$constant);
10634     __ sbcs($tmp$$Register->successor(), $op1$$Register->successor(), 0);
10635   %}
10636 
10637   ins_pipe(ialu_cconly_reg_reg);
10638 %}
10639 
10640 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
10641 // (hi($con$$constant), lo($con$$constant)) becomes
10642 instruct compL_reg_con_EQNE(flagsRegL_EQNE xcc, iRegL op1, immLlowRot con) %{
10643   match(Set xcc (CmpL op1 con));
10644   effect( DEF xcc, USE op1, USE con );
10645 
10646   size(8);
10647   format %{ "TEQ    $op1.hi,0\t\t! long\n\t"
10648             "TEQ.eq $op1.lo,$con" %}
10649   ins_encode %{
10650     __ teq($op1$$Register->successor(), 0);
10651     __ teq($op1$$Register, $con$$constant, eq);
10652   %}
10653 
10654   ins_pipe(ialu_cconly_reg_reg);
10655 %}
10656 
10657 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
10658 // (hi($con$$constant), lo($con$$constant)) becomes
10659 instruct compL_reg_con_LEGT(flagsRegL_LEGT xcc, iRegL op1, immLlowRot con, iRegL tmp) %{
10660   match(Set xcc (CmpL op1 con));
10661   effect( DEF xcc, USE op1, USE con, TEMP tmp );
10662 
10663   size(8);
10664   format %{ "RSBS    $tmp,$op1.low,$con\t\t! long\n\t"
10665             "RSCS    $tmp,$op1.hi,0" %}
10666   ins_encode %{
10667     __ rsbs($tmp$$Register, $op1$$Register, $con$$constant);
10668     __ rscs($tmp$$Register->successor(), $op1$$Register->successor(), 0);
10669   %}
10670 
10671   ins_pipe(ialu_cconly_reg_reg);
10672 %}
10673 
10674 instruct compUL_reg_reg_EQNE(flagsRegUL_EQNE xcc, iRegL op1, iRegL op2) %{
10675   match(Set xcc (CmpUL op1 op2));
10676   effect(DEF xcc, USE op1, USE op2);
10677 
10678   size(8);
10679   format %{ "TEQ    $op1.hi,$op2.hi\t\t! unsigned long\n\t"
10680             "TEQ.eq $op1.lo,$op2.lo" %}
10681   ins_encode %{
10682     __ teq($op1$$Register->successor(), $op2$$Register->successor());
10683     __ teq($op1$$Register, $op2$$Register, eq);
10684   %}
10685   ins_pipe(ialu_cconly_reg_reg);
10686 %}
10687 
10688 instruct compUL_reg_reg_LEGT(flagsRegUL_LEGT xcc, iRegL op1, iRegL op2, iRegL tmp) %{
10689   match(Set xcc (CmpUL op1 op2));
10690   effect(DEF xcc, USE op1, USE op2, TEMP tmp);
10691 
10692   size(8);
10693   format %{ "SUBS    $tmp,$op2.low,$op1.low\t\t! unsigned long\n\t"
10694             "SBCS    $tmp,$op2.hi,$op1.hi" %}
10695   ins_encode %{
10696     __ subs($tmp$$Register, $op2$$Register, $op1$$Register);
10697     __ sbcs($tmp$$Register->successor(), $op2$$Register->successor(), $op1$$Register->successor());
10698   %}
10699   ins_pipe(ialu_cconly_reg_reg);
10700 %}
10701 
10702 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
10703 // (hi($con$$constant), lo($con$$constant)) becomes
10704 instruct compUL_reg_con_LTGE(flagsRegUL_LTGE xcc, iRegL op1, immLlowRot con, iRegL tmp) %{
10705   match(Set xcc (CmpUL op1 con));
10706   effect(DEF xcc, USE op1, USE con, TEMP tmp);
10707 
10708   size(8);
10709   format %{ "SUBS    $tmp,$op1.low,$con\t\t! unsigned long\n\t"
10710             "SBCS    $tmp,$op1.hi,0" %}
10711   ins_encode %{
10712     __ subs($tmp$$Register, $op1$$Register, $con$$constant);
10713     __ sbcs($tmp$$Register->successor(), $op1$$Register->successor(), 0);
10714   %}
10715 
10716   ins_pipe(ialu_cconly_reg_reg);
10717 %}
10718 
10719 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
10720 // (hi($con$$constant), lo($con$$constant)) becomes
10721 instruct compUL_reg_con_EQNE(flagsRegUL_EQNE xcc, iRegL op1, immLlowRot con) %{
10722   match(Set xcc (CmpUL op1 con));
10723   effect(DEF xcc, USE op1, USE con);
10724 
10725   size(8);
10726   format %{ "TEQ    $op1.hi,0\t\t! unsigned long\n\t"
10727             "TEQ.eq $op1.lo,$con" %}
10728   ins_encode %{
10729     __ teq($op1$$Register->successor(), 0);
10730     __ teq($op1$$Register, $con$$constant, eq);
10731   %}
10732 
10733   ins_pipe(ialu_cconly_reg_reg);
10734 %}
10735 
10736 // TODO: try immLRot2 instead, (0, $con$$constant) becomes
10737 // (hi($con$$constant), lo($con$$constant)) becomes
10738 instruct compUL_reg_con_LEGT(flagsRegUL_LEGT xcc, iRegL op1, immLlowRot con, iRegL tmp) %{
10739   match(Set xcc (CmpUL op1 con));
10740   effect(DEF xcc, USE op1, USE con, TEMP tmp);
10741 
10742   size(8);
10743   format %{ "RSBS    $tmp,$op1.low,$con\t\t! unsigned long\n\t"
10744             "RSCS    $tmp,$op1.hi,0" %}
10745   ins_encode %{
10746     __ rsbs($tmp$$Register, $op1$$Register, $con$$constant);
10747     __ rscs($tmp$$Register->successor(), $op1$$Register->successor(), 0);
10748   %}
10749 
10750   ins_pipe(ialu_cconly_reg_reg);
10751 %}
10752 #endif
10753 
10754 /* instruct testL_reg_reg(flagsRegL xcc, iRegL op1, iRegL op2, immL0 zero) %{ */
10755 /*   match(Set xcc (CmpL (AndL op1 op2) zero)); */
10756 /*   ins_encode %{ */
10757 /*     __ stop("testL_reg_reg unimplemented"); */
10758 /*   %} */
10759 /*   ins_pipe(ialu_cconly_reg_reg); */
10760 /* %} */
10761 
10762 /* // useful for checking the alignment of a pointer: */
10763 /* instruct testL_reg_con(flagsRegL xcc, iRegL op1, immLlowRot con, immL0 zero) %{ */
10764 /*   match(Set xcc (CmpL (AndL op1 con) zero)); */
10765 /*   ins_encode %{ */
10766 /*     __ stop("testL_reg_con unimplemented"); */
10767 /*   %} */
10768 /*   ins_pipe(ialu_cconly_reg_reg); */
10769 /* %} */
10770 
10771 instruct compU_iReg_imm(flagsRegU icc, iRegI op1, aimmU31 op2 ) %{
10772   match(Set icc (CmpU op1 op2));
10773 
10774   size(4);
10775   format %{ "cmp_32 $op1,$op2\t! unsigned" %}
10776   ins_encode %{
10777     __ cmp_32($op1$$Register, $op2$$constant);
10778   %}
10779   ins_pipe(ialu_cconly_reg_imm);
10780 %}
10781 
10782 // Compare Pointers
10783 instruct compP_iRegP(flagsRegP pcc, iRegP op1, iRegP op2 ) %{
10784   match(Set pcc (CmpP op1 op2));
10785 
10786   size(4);
10787   format %{ "CMP    $op1,$op2\t! ptr" %}
10788   ins_encode %{
10789     __ cmp($op1$$Register, $op2$$Register);
10790   %}
10791   ins_pipe(ialu_cconly_reg_reg);
10792 %}
10793 
10794 instruct compP_iRegP_imm(flagsRegP pcc, iRegP op1, aimmP op2 ) %{
10795   match(Set pcc (CmpP op1 op2));
10796 
10797   size(4);
10798   format %{ "CMP    $op1,$op2\t! ptr" %}
10799   ins_encode %{
10800     assert($op2$$constant == 0 || _opnds[2]->constant_reloc() == relocInfo::none, "reloc in cmp?");
10801     __ cmp($op1$$Register, $op2$$constant);
10802   %}
10803   ins_pipe(ialu_cconly_reg_imm);
10804 %}
10805 
10806 //----------Max and Min--------------------------------------------------------
10807 // Min Instructions
10808 // Conditional move for min
10809 instruct cmovI_reg_lt( iRegI op2, iRegI op1, flagsReg icc ) %{
10810   effect( USE_DEF op2, USE op1, USE icc );
10811 
10812   size(4);
10813   format %{ "MOV.lt  $op2,$op1\t! min" %}
10814   ins_encode %{
10815     __ mov($op2$$Register, $op1$$Register, lt);
10816   %}
10817   ins_pipe(ialu_reg_flags);
10818 %}
10819 
10820 // Min Register with Register.
10821 instruct minI_eReg(iRegI op1, iRegI op2) %{
10822   match(Set op2 (MinI op1 op2));
10823   ins_cost(DEFAULT_COST*2);
10824   expand %{
10825     flagsReg icc;
10826     compI_iReg(icc,op1,op2);
10827     cmovI_reg_lt(op2,op1,icc);
10828   %}
10829 %}
10830 
10831 // Max Instructions
10832 // Conditional move for max
10833 instruct cmovI_reg_gt( iRegI op2, iRegI op1, flagsReg icc ) %{
10834   effect( USE_DEF op2, USE op1, USE icc );
10835   format %{ "MOV.gt  $op2,$op1\t! max" %}
10836   ins_encode %{
10837     __ mov($op2$$Register, $op1$$Register, gt);
10838   %}
10839   ins_pipe(ialu_reg_flags);
10840 %}
10841 
10842 // Max Register with Register
10843 instruct maxI_eReg(iRegI op1, iRegI op2) %{
10844   match(Set op2 (MaxI op1 op2));
10845   ins_cost(DEFAULT_COST*2);
10846   expand %{
10847     flagsReg icc;
10848     compI_iReg(icc,op1,op2);
10849     cmovI_reg_gt(op2,op1,icc);
10850   %}
10851 %}
10852 
10853 
10854 //----------Float Compares----------------------------------------------------
10855 // Compare floating, generate condition code
10856 instruct cmpF_cc(flagsRegF fcc, flagsReg icc, regF src1, regF src2) %{
10857   match(Set icc (CmpF src1 src2));
10858   effect(KILL fcc);
10859 
10860 #ifdef AARCH64
10861   size(4);
10862   format %{ "FCMP_s  $src1,$src2" %}
10863   ins_encode %{
10864     __ fcmp_s($src1$$FloatRegister, $src2$$FloatRegister);
10865   %}
10866 #else
10867   size(8);
10868   format %{ "FCMPs  $src1,$src2\n\t"
10869             "FMSTAT" %}
10870   ins_encode %{
10871     __ fcmps($src1$$FloatRegister, $src2$$FloatRegister);
10872     __ fmstat();
10873   %}
10874 #endif
10875   ins_pipe(faddF_fcc_reg_reg_zero);
10876 %}
10877 
10878 instruct cmpF0_cc(flagsRegF fcc, flagsReg icc, regF src1, immF0 src2) %{
10879   match(Set icc (CmpF src1 src2));
10880   effect(KILL fcc);
10881 
10882 #ifdef AARCH64
10883   size(4);
10884   format %{ "FCMP0_s $src1" %}
10885   ins_encode %{
10886     __ fcmp0_s($src1$$FloatRegister);
10887   %}
10888 #else
10889   size(8);
10890   format %{ "FCMPs  $src1,$src2\n\t"
10891             "FMSTAT" %}
10892   ins_encode %{
10893     __ fcmpzs($src1$$FloatRegister);
10894     __ fmstat();
10895   %}
10896 #endif
10897   ins_pipe(faddF_fcc_reg_reg_zero);
10898 %}
10899 
10900 instruct cmpD_cc(flagsRegF fcc, flagsReg icc, regD src1, regD src2) %{
10901   match(Set icc (CmpD src1 src2));
10902   effect(KILL fcc);
10903 
10904 #ifdef AARCH64
10905   size(4);
10906   format %{ "FCMP_d $src1,$src2" %}
10907   ins_encode %{
10908     __ fcmp_d($src1$$FloatRegister, $src2$$FloatRegister);
10909   %}
10910 #else
10911   size(8);
10912   format %{ "FCMPd  $src1,$src2 \n\t"
10913             "FMSTAT" %}
10914   ins_encode %{
10915     __ fcmpd($src1$$FloatRegister, $src2$$FloatRegister);
10916     __ fmstat();
10917   %}
10918 #endif
10919   ins_pipe(faddD_fcc_reg_reg_zero);
10920 %}
10921 
10922 instruct cmpD0_cc(flagsRegF fcc, flagsReg icc, regD src1, immD0 src2) %{
10923   match(Set icc (CmpD src1 src2));
10924   effect(KILL fcc);
10925 
10926 #ifdef AARCH64
10927   size(8);
10928   format %{ "FCMP0_d $src1" %}
10929   ins_encode %{
10930     __ fcmp0_d($src1$$FloatRegister);
10931   %}
10932 #else
10933   size(8);
10934   format %{ "FCMPZd  $src1,$src2 \n\t"
10935             "FMSTAT" %}
10936   ins_encode %{
10937     __ fcmpzd($src1$$FloatRegister);
10938     __ fmstat();
10939   %}
10940 #endif
10941   ins_pipe(faddD_fcc_reg_reg_zero);
10942 %}
10943 
10944 #ifdef AARCH64
10945 // Compare floating, generate -1,0,1
10946 instruct cmpF_reg(iRegI dst, regF src1, regF src2, flagsReg icc) %{
10947   match(Set dst (CmpF3 src1 src2));
10948   // effect(KILL fcc); // nobody cares if flagsRegF is killed
10949   effect(KILL icc);
10950   ins_cost(DEFAULT_COST*3); // FIXME
10951   size(12);
10952   format %{ "FCMP_s $src1,$src2\n\t"
10953             "CSET   $dst, gt\n\t"
10954             "CSINV  $dst, $dst, ZR, ge" %}
10955   ins_encode %{
10956     Register dst = $dst$$Register;
10957     __ fcmp_s($src1$$FloatRegister, $src2$$FloatRegister);
10958     __ cset(dst, gt);            // 1 if '>', else 0
10959     __ csinv(dst, dst, ZR, ge);  // previous value if '>=', else -1
10960   %}
10961   ins_pipe( floating_cmp ); // FIXME
10962 %}
10963 
10964 // Compare floating, generate -1,0,1
10965 instruct cmpD_reg(iRegI dst, regD src1, regD src2, flagsReg icc) %{
10966   match(Set dst (CmpD3 src1 src2));
10967   // effect(KILL fcc); // nobody cares if flagsRegF is killed
10968   effect(KILL icc);
10969   ins_cost(DEFAULT_COST*3); // FIXME
10970   size(12);
10971   format %{ "FCMP_d $src1,$src2\n\t"
10972             "CSET   $dst, gt\n\t"
10973             "CSINV  $dst, $dst, ZR, ge" %}
10974   ins_encode %{
10975     Register dst = $dst$$Register;
10976     __ fcmp_d($src1$$FloatRegister, $src2$$FloatRegister);
10977     __ cset(dst, gt);            // 1 if '>', else 0
10978     __ csinv(dst, dst, ZR, ge);  // previous value if '>=', else -1
10979   %}
10980   ins_pipe( floating_cmp ); // FIXME
10981 %}
10982 
10983 // Compare floating, generate -1,0,1
10984 instruct cmpF0_reg(iRegI dst, regF src1, immF0 src2, flagsReg icc) %{
10985   match(Set dst (CmpF3 src1 src2));
10986   // effect(KILL fcc); // nobody cares if flagsRegF is killed
10987   effect(KILL icc);
10988   ins_cost(DEFAULT_COST*3); // FIXME
10989   size(12);
10990   format %{ "FCMP0_s $src1\n\t"
10991             "CSET   $dst, gt\n\t"
10992             "CSINV  $dst, $dst, ZR, ge" %}
10993   ins_encode %{
10994     Register dst = $dst$$Register;
10995     __ fcmp0_s($src1$$FloatRegister);
10996     __ cset(dst, gt);            // 1 if '>', else 0
10997     __ csinv(dst, dst, ZR, ge);  // previous value if '>=', else -1
10998   %}
10999   ins_pipe( floating_cmp ); // FIXME
11000 %}
11001 
11002 // Compare floating, generate -1,0,1
11003 instruct cmpD0_reg(iRegI dst, regD src1, immD0 src2, flagsReg icc) %{
11004   match(Set dst (CmpD3 src1 src2));
11005   // effect(KILL fcc); // nobody cares if flagsRegF is killed
11006   effect(KILL icc);
11007   ins_cost(DEFAULT_COST*3); // FIXME
11008   size(12);
11009   format %{ "FCMP0_d $src1\n\t"
11010             "CSET   $dst, gt\n\t"
11011             "CSINV  $dst, $dst, ZR, ge" %}
11012   ins_encode %{
11013     Register dst = $dst$$Register;
11014     __ fcmp0_d($src1$$FloatRegister);
11015     __ cset(dst, gt);            // 1 if '>', else 0
11016     __ csinv(dst, dst, ZR, ge);  // previous value if '>=', else -1
11017   %}
11018   ins_pipe( floating_cmp ); // FIXME
11019 %}
11020 #else
11021 // Compare floating, generate -1,0,1
11022 instruct cmpF_reg(iRegI dst, regF src1, regF src2, flagsRegF fcc) %{
11023   match(Set dst (CmpF3 src1 src2));
11024   effect(KILL fcc);
11025   ins_cost(DEFAULT_COST*3+BRANCH_COST*3); // FIXME
11026   size(20);
11027   // same number of instructions as code using conditional moves but
11028   // doesn't kill integer condition register
11029   format %{ "FCMPs  $dst,$src1,$src2 \n\t"
11030             "VMRS   $dst, FPSCR \n\t"
11031             "OR     $dst, $dst, 0x08000000 \n\t"
11032             "EOR    $dst, $dst, $dst << 3 \n\t"
11033             "MOV    $dst, $dst >> 30" %}
11034   ins_encode %{
11035     __ fcmps($src1$$FloatRegister, $src2$$FloatRegister);
11036     __ floating_cmp($dst$$Register);
11037   %}
11038   ins_pipe( floating_cmp );
11039 %}
11040 
11041 instruct cmpF0_reg(iRegI dst, regF src1, immF0 src2, flagsRegF fcc) %{
11042   match(Set dst (CmpF3 src1 src2));
11043   effect(KILL fcc);
11044   ins_cost(DEFAULT_COST*3+BRANCH_COST*3); // FIXME
11045   size(20);
11046   // same number of instructions as code using conditional moves but
11047   // doesn't kill integer condition register
11048   format %{ "FCMPZs $dst,$src1,$src2 \n\t"
11049             "VMRS   $dst, FPSCR \n\t"
11050             "OR     $dst, $dst, 0x08000000 \n\t"
11051             "EOR    $dst, $dst, $dst << 3 \n\t"
11052             "MOV    $dst, $dst >> 30" %}
11053   ins_encode %{
11054     __ fcmpzs($src1$$FloatRegister);
11055     __ floating_cmp($dst$$Register);
11056   %}
11057   ins_pipe( floating_cmp );
11058 %}
11059 
11060 instruct cmpD_reg(iRegI dst, regD src1, regD src2, flagsRegF fcc) %{
11061   match(Set dst (CmpD3 src1 src2));
11062   effect(KILL fcc);
11063   ins_cost(DEFAULT_COST*3+BRANCH_COST*3); // FIXME
11064   size(20);
11065   // same number of instructions as code using conditional moves but
11066   // doesn't kill integer condition register
11067   format %{ "FCMPd  $dst,$src1,$src2 \n\t"
11068             "VMRS   $dst, FPSCR \n\t"
11069             "OR     $dst, $dst, 0x08000000 \n\t"
11070             "EOR    $dst, $dst, $dst << 3 \n\t"
11071             "MOV    $dst, $dst >> 30" %}
11072   ins_encode %{
11073     __ fcmpd($src1$$FloatRegister, $src2$$FloatRegister);
11074     __ floating_cmp($dst$$Register);
11075   %}
11076   ins_pipe( floating_cmp );
11077 %}
11078 
11079 instruct cmpD0_reg(iRegI dst, regD src1, immD0 src2, flagsRegF fcc) %{
11080   match(Set dst (CmpD3 src1 src2));
11081   effect(KILL fcc);
11082   ins_cost(DEFAULT_COST*3+BRANCH_COST*3); // FIXME
11083   size(20);
11084   // same number of instructions as code using conditional moves but
11085   // doesn't kill integer condition register
11086   format %{ "FCMPZd $dst,$src1,$src2 \n\t"
11087             "VMRS   $dst, FPSCR \n\t"
11088             "OR     $dst, $dst, 0x08000000 \n\t"
11089             "EOR    $dst, $dst, $dst << 3 \n\t"
11090             "MOV    $dst, $dst >> 30" %}
11091   ins_encode %{
11092     __ fcmpzd($src1$$FloatRegister);
11093     __ floating_cmp($dst$$Register);
11094   %}
11095   ins_pipe( floating_cmp );
11096 %}
11097 #endif // !AARCH64
11098 
11099 //----------Branches---------------------------------------------------------
11100 // Jump
11101 // (compare 'operand indIndex' and 'instruct addP_reg_reg' above)
11102 // FIXME
11103 instruct jumpXtnd(iRegX switch_val, iRegP tmp) %{
11104   match(Jump switch_val);
11105   effect(TEMP tmp);
11106   ins_cost(350);
11107   format %{  "ADD    $tmp, $constanttablebase, $switch_val\n\t"
11108              "LDR    $tmp,[$tmp + $constantoffset]\n\t"
11109              "BX     $tmp" %}
11110   size(20);
11111   ins_encode %{
11112     Register table_reg;
11113     Register label_reg = $tmp$$Register;
11114     if (constant_offset() == 0) {
11115       table_reg = $constanttablebase;
11116       __ ldr(label_reg, Address(table_reg, $switch_val$$Register));
11117     } else {
11118       table_reg = $tmp$$Register;
11119       int offset = $constantoffset;
11120       if (is_memoryP(offset)) {
11121         __ add(table_reg, $constanttablebase, $switch_val$$Register);
11122         __ ldr(label_reg, Address(table_reg, offset));
11123       } else {
11124         __ mov_slow(table_reg, $constantoffset);
11125         __ add(table_reg, $constanttablebase, table_reg);
11126         __ ldr(label_reg, Address(table_reg, $switch_val$$Register));
11127       }
11128     }
11129     __ jump(label_reg); // ldr + b better than ldr to PC for branch predictor?
11130     //    __ ldr(PC, Address($table$$Register, $switch_val$$Register));
11131   %}
11132   ins_pipe(ialu_reg_reg);
11133 %}
11134 
11135 // // Direct Branch.
11136 instruct branch(label labl) %{
11137   match(Goto);
11138   effect(USE labl);
11139 
11140   size(4);
11141   ins_cost(BRANCH_COST);
11142   format %{ "B     $labl" %}
11143   ins_encode %{
11144     __ b(*($labl$$label));
11145   %}
11146   ins_pipe(br);
11147 %}
11148 
11149 // Conditional Direct Branch
11150 instruct branchCon(cmpOp cmp, flagsReg icc, label labl) %{
11151   match(If cmp icc);
11152   effect(USE labl);
11153 
11154   size(4);
11155   ins_cost(BRANCH_COST);
11156   format %{ "B$cmp   $icc,$labl" %}
11157   ins_encode %{
11158     __ b(*($labl$$label), (AsmCondition)($cmp$$cmpcode));
11159   %}
11160   ins_pipe(br_cc);
11161 %}
11162 
11163 #ifdef ARM
11164 instruct branchCon_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, label labl) %{
11165   match(If cmp icc);
11166   effect(USE labl);
11167   predicate( _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne || _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
11168 
11169   size(4);
11170   ins_cost(BRANCH_COST);
11171   format %{ "B$cmp   $icc,$labl" %}
11172   ins_encode %{
11173     __ b(*($labl$$label), (AsmCondition)($cmp$$cmpcode));
11174   %}
11175   ins_pipe(br_cc);
11176 %}
11177 #endif
11178 
11179 #ifdef AARCH64
11180 instruct cbzI(cmpOp cmp, iRegI op1, immI0 op2, label labl) %{
11181   match(If cmp (CmpI op1 op2));
11182   effect(USE labl);
11183   predicate(_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq ||
11184             _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne);
11185   size(4);
11186   ins_cost(BRANCH_COST);
11187   format %{ "CB{N}Z $op1, $labl\t! int $cmp" %}
11188   ins_encode %{
11189     if ($cmp$$cmpcode == eq) {
11190       __ cbz_w($op1$$Register, *($labl$$label));
11191     } else {
11192       __ cbnz_w($op1$$Register, *($labl$$label));
11193     }
11194   %}
11195   ins_pipe(br_cc); // FIXME
11196 %}
11197 
11198 instruct cbzP(cmpOpP cmp, iRegP op1, immP0 op2, label labl) %{
11199   match(If cmp (CmpP op1 op2));
11200   effect(USE labl);
11201   predicate(_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq ||
11202             _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne);
11203   size(4);
11204   ins_cost(BRANCH_COST);
11205   format %{ "CB{N}Z $op1, $labl\t! ptr $cmp" %}
11206   ins_encode %{
11207     if ($cmp$$cmpcode == eq) {
11208       __ cbz($op1$$Register, *($labl$$label));
11209     } else {
11210       __ cbnz($op1$$Register, *($labl$$label));
11211     }
11212   %}
11213   ins_pipe(br_cc); // FIXME
11214 %}
11215 
11216 instruct cbzL(cmpOpL cmp, iRegL op1, immL0 op2, label labl) %{
11217   match(If cmp (CmpL op1 op2));
11218   effect(USE labl);
11219   predicate(_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq ||
11220             _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne);
11221   size(4);
11222   ins_cost(BRANCH_COST);
11223   format %{ "CB{N}Z $op1, $labl\t! long $cmp" %}
11224   ins_encode %{
11225     if ($cmp$$cmpcode == eq) {
11226       __ cbz($op1$$Register, *($labl$$label));
11227     } else {
11228       __ cbnz($op1$$Register, *($labl$$label));
11229     }
11230   %}
11231   ins_pipe(br_cc); // FIXME
11232 %}
11233 #endif
11234 
11235 instruct branchConU(cmpOpU cmp, flagsRegU icc, label labl) %{
11236   match(If cmp icc);
11237   effect(USE labl);
11238 
11239   size(4);
11240   ins_cost(BRANCH_COST);
11241   format %{ "B$cmp  $icc,$labl" %}
11242   ins_encode %{
11243     __ b(*($labl$$label), (AsmCondition)($cmp$$cmpcode));
11244   %}
11245   ins_pipe(br_cc);
11246 %}
11247 
11248 instruct branchConP(cmpOpP cmp, flagsRegP pcc, label labl) %{
11249   match(If cmp pcc);
11250   effect(USE labl);
11251 
11252   size(4);
11253   ins_cost(BRANCH_COST);
11254   format %{ "B$cmp  $pcc,$labl" %}
11255   ins_encode %{
11256     __ b(*($labl$$label), (AsmCondition)($cmp$$cmpcode));
11257   %}
11258   ins_pipe(br_cc);
11259 %}
11260 
11261 #ifndef AARCH64
11262 instruct branchConL_LTGE(cmpOpL cmp, flagsRegL_LTGE xcc, label labl) %{
11263   match(If cmp xcc);
11264   effect(USE labl);
11265   predicate( _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge );
11266 
11267   size(4);
11268   ins_cost(BRANCH_COST);
11269   format %{ "B$cmp  $xcc,$labl" %}
11270   ins_encode %{
11271     __ b(*($labl$$label), (AsmCondition)($cmp$$cmpcode));
11272   %}
11273   ins_pipe(br_cc);
11274 %}
11275 
11276 instruct branchConL_EQNE(cmpOpL cmp, flagsRegL_EQNE xcc, label labl) %{
11277   match(If cmp xcc);
11278   effect(USE labl);
11279   predicate( _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne );
11280 
11281   size(4);
11282   ins_cost(BRANCH_COST);
11283   format %{ "B$cmp  $xcc,$labl" %}
11284   ins_encode %{
11285     __ b(*($labl$$label), (AsmCondition)($cmp$$cmpcode));
11286   %}
11287   ins_pipe(br_cc);
11288 %}
11289 
11290 instruct branchConL_LEGT(cmpOpL_commute cmp, flagsRegL_LEGT xcc, label labl) %{
11291   match(If cmp xcc);
11292   effect(USE labl);
11293   predicate( _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt || _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le );
11294 
11295   size(4);
11296   ins_cost(BRANCH_COST);
11297   format %{ "B$cmp  $xcc,$labl" %}
11298   ins_encode %{
11299     __ b(*($labl$$label), (AsmCondition)($cmp$$cmpcode));
11300   %}
11301   ins_pipe(br_cc);
11302 %}
11303 
11304 instruct branchConUL_LTGE(cmpOpUL cmp, flagsRegUL_LTGE xcc, label labl) %{
11305   match(If cmp xcc);
11306   effect(USE labl);
11307   predicate(_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
11308 
11309   size(4);
11310   ins_cost(BRANCH_COST);
11311   format %{ "B$cmp  $xcc,$labl" %}
11312   ins_encode %{
11313     __ b(*($labl$$label), (AsmCondition)($cmp$$cmpcode));
11314   %}
11315   ins_pipe(br_cc);
11316 %}
11317 
11318 instruct branchConUL_EQNE(cmpOpUL cmp, flagsRegUL_EQNE xcc, label labl) %{
11319   match(If cmp xcc);
11320   effect(USE labl);
11321   predicate(_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne);
11322 
11323   size(4);
11324   ins_cost(BRANCH_COST);
11325   format %{ "B$cmp  $xcc,$labl" %}
11326   ins_encode %{
11327     __ b(*($labl$$label), (AsmCondition)($cmp$$cmpcode));
11328   %}
11329   ins_pipe(br_cc);
11330 %}
11331 
11332 instruct branchConUL_LEGT(cmpOpUL_commute cmp, flagsRegUL_LEGT xcc, label labl) %{
11333   match(If cmp xcc);
11334   effect(USE labl);
11335   predicate(_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt || _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le);
11336 
11337   size(4);
11338   ins_cost(BRANCH_COST);
11339   format %{ "B$cmp  $xcc,$labl" %}
11340   ins_encode %{
11341     __ b(*($labl$$label), (AsmCondition)($cmp$$cmpcode));
11342   %}
11343   ins_pipe(br_cc);
11344 %}
11345 #endif
11346 
11347 instruct branchLoopEnd(cmpOp cmp, flagsReg icc, label labl) %{
11348   match(CountedLoopEnd cmp icc);
11349   effect(USE labl);
11350 
11351   size(4);
11352   ins_cost(BRANCH_COST);
11353   format %{ "B$cmp   $icc,$labl\t! Loop end" %}
11354   ins_encode %{
11355     __ b(*($labl$$label), (AsmCondition)($cmp$$cmpcode));
11356   %}
11357   ins_pipe(br_cc);
11358 %}
11359 
11360 // instruct branchLoopEndU(cmpOpU cmp, flagsRegU icc, label labl) %{
11361 //   match(CountedLoopEnd cmp icc);
11362 //   ins_pipe(br_cc);
11363 // %}
11364 
11365 // ============================================================================
11366 // Long Compare
11367 //
11368 // Currently we hold longs in 2 registers.  Comparing such values efficiently
11369 // is tricky.  The flavor of compare used depends on whether we are testing
11370 // for LT, LE, or EQ.  For a simple LT test we can check just the sign bit.
11371 // The GE test is the negated LT test.  The LE test can be had by commuting
11372 // the operands (yielding a GE test) and then negating; negate again for the
11373 // GT test.  The EQ test is done by ORcc'ing the high and low halves, and the
11374 // NE test is negated from that.
11375 
11376 // Due to a shortcoming in the ADLC, it mixes up expressions like:
11377 // (foo (CmpI (CmpL X Y) 0)) and (bar (CmpI (CmpL X 0L) 0)).  Note the
11378 // difference between 'Y' and '0L'.  The tree-matches for the CmpI sections
11379 // are collapsed internally in the ADLC's dfa-gen code.  The match for
11380 // (CmpI (CmpL X Y) 0) is silently replaced with (CmpI (CmpL X 0L) 0) and the
11381 // foo match ends up with the wrong leaf.  One fix is to not match both
11382 // reg-reg and reg-zero forms of long-compare.  This is unfortunate because
11383 // both forms beat the trinary form of long-compare and both are very useful
11384 // on Intel which has so few registers.
11385 
11386 // instruct branchCon_long(cmpOp cmp, flagsRegL xcc, label labl) %{
11387 //   match(If cmp xcc);
11388 //   ins_pipe(br_cc);
11389 // %}
11390 
11391 // Manifest a CmpL3 result in an integer register.  Very painful.
11392 // This is the test to avoid.
11393 #ifdef AARCH64
11394 instruct cmpL3_reg_reg(iRegI dst, iRegL src1, iRegL src2, flagsReg ccr) %{
11395   match(Set dst (CmpL3 src1 src2));
11396   // effect(KILL fcc); // nobody cares if flagsRegF is killed
11397   effect(KILL ccr);
11398   ins_cost(DEFAULT_COST*3); // FIXME
11399   size(12);
11400   format %{ "CMP    $src1,$src2\n\t"
11401             "CSET   $dst, gt\n\t"
11402             "CSINV  $dst, $dst, ZR, ge" %}
11403   ins_encode %{
11404     Register dst = $dst$$Register;
11405     __ cmp($src1$$Register, $src2$$Register);
11406     __ cset(dst, gt);            // 1 if '>', else 0
11407     __ csinv(dst, dst, ZR, ge);  // previous value if '>=', else -1
11408   %}
11409   ins_pipe( ialu_cconly_reg_reg ); // FIXME
11410 %}
11411 // TODO cmpL3_reg_imm
11412 #else
11413 instruct cmpL3_reg_reg(iRegI dst, iRegL src1, iRegL src2, flagsReg ccr ) %{
11414   match(Set dst (CmpL3 src1 src2) );
11415   effect( KILL ccr );
11416   ins_cost(6*DEFAULT_COST); // FIXME
11417   size(32);
11418   format %{
11419       "CMP    $src1.hi, $src2.hi\t\t! long\n"
11420     "\tMOV.gt $dst, 1\n"
11421     "\tmvn.lt $dst, 0\n"
11422     "\tB.ne   done\n"
11423     "\tSUBS   $dst, $src1.lo, $src2.lo\n"
11424     "\tMOV.hi $dst, 1\n"
11425     "\tmvn.lo $dst, 0\n"
11426     "done:"     %}
11427   ins_encode %{
11428     Label done;
11429     __ cmp($src1$$Register->successor(), $src2$$Register->successor());
11430     __ mov($dst$$Register, 1, gt);
11431     __ mvn($dst$$Register, 0, lt);
11432     __ b(done, ne);
11433     __ subs($dst$$Register, $src1$$Register, $src2$$Register);
11434     __ mov($dst$$Register, 1, hi);
11435     __ mvn($dst$$Register, 0, lo);
11436     __ bind(done);
11437   %}
11438   ins_pipe(cmpL_reg);
11439 %}
11440 #endif
11441 
11442 #ifndef AARCH64
11443 // Conditional move
11444 instruct cmovLL_reg_LTGE(cmpOpL cmp, flagsRegL_LTGE xcc, iRegL dst, iRegL src) %{
11445   match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src)));
11446   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge );
11447 
11448   ins_cost(150);
11449   size(8);
11450   format %{ "MOV$cmp  $dst.lo,$src.lo\t! long\n\t"
11451             "MOV$cmp  $dst,$src.hi" %}
11452   ins_encode %{
11453     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
11454     __ mov($dst$$Register->successor(), $src$$Register->successor(), (AsmCondition)($cmp$$cmpcode));
11455   %}
11456   ins_pipe(ialu_reg);
11457 %}
11458 
11459 instruct cmovLL_reg_EQNE(cmpOpL cmp, flagsRegL_EQNE xcc, iRegL dst, iRegL src) %{
11460   match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src)));
11461   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne );
11462 
11463   ins_cost(150);
11464   size(8);
11465   format %{ "MOV$cmp  $dst.lo,$src.lo\t! long\n\t"
11466             "MOV$cmp  $dst,$src.hi" %}
11467   ins_encode %{
11468     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
11469     __ mov($dst$$Register->successor(), $src$$Register->successor(), (AsmCondition)($cmp$$cmpcode));
11470   %}
11471   ins_pipe(ialu_reg);
11472 %}
11473 
11474 instruct cmovLL_reg_LEGT(cmpOpL_commute cmp, flagsRegL_LEGT xcc, iRegL dst, iRegL src) %{
11475   match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src)));
11476   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt );
11477 
11478   ins_cost(150);
11479   size(8);
11480   format %{ "MOV$cmp  $dst.lo,$src.lo\t! long\n\t"
11481             "MOV$cmp  $dst,$src.hi" %}
11482   ins_encode %{
11483     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
11484     __ mov($dst$$Register->successor(), $src$$Register->successor(), (AsmCondition)($cmp$$cmpcode));
11485   %}
11486   ins_pipe(ialu_reg);
11487 %}
11488 
11489 instruct cmovLL_imm_LTGE(cmpOpL cmp, flagsRegL_LTGE xcc, iRegL dst, immL0 src) %{
11490   match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src)));
11491   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge );
11492   ins_cost(140);
11493   size(8);
11494   format %{ "MOV$cmp  $dst.lo,0\t! long\n\t"
11495             "MOV$cmp  $dst,0" %}
11496   ins_encode %{
11497     __ mov($dst$$Register, 0, (AsmCondition)($cmp$$cmpcode));
11498     __ mov($dst$$Register->successor(), 0, (AsmCondition)($cmp$$cmpcode));
11499   %}
11500   ins_pipe(ialu_imm);
11501 %}
11502 
11503 instruct cmovLL_imm_EQNE(cmpOpL cmp, flagsRegL_EQNE xcc, iRegL dst, immL0 src) %{
11504   match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src)));
11505   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne );
11506   ins_cost(140);
11507   size(8);
11508   format %{ "MOV$cmp  $dst.lo,0\t! long\n\t"
11509             "MOV$cmp  $dst,0" %}
11510   ins_encode %{
11511     __ mov($dst$$Register, 0, (AsmCondition)($cmp$$cmpcode));
11512     __ mov($dst$$Register->successor(), 0, (AsmCondition)($cmp$$cmpcode));
11513   %}
11514   ins_pipe(ialu_imm);
11515 %}
11516 
11517 instruct cmovLL_imm_LEGT(cmpOpL_commute cmp, flagsRegL_LEGT xcc, iRegL dst, immL0 src) %{
11518   match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src)));
11519   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt );
11520   ins_cost(140);
11521   size(8);
11522   format %{ "MOV$cmp  $dst.lo,0\t! long\n\t"
11523             "MOV$cmp  $dst,0" %}
11524   ins_encode %{
11525     __ mov($dst$$Register, 0, (AsmCondition)($cmp$$cmpcode));
11526     __ mov($dst$$Register->successor(), 0, (AsmCondition)($cmp$$cmpcode));
11527   %}
11528   ins_pipe(ialu_imm);
11529 %}
11530 #endif // !AARCH64
11531 
11532 #ifndef AARCH64
11533 instruct cmovIL_reg_LTGE(cmpOpL cmp, flagsRegL_LTGE xcc, iRegI dst, iRegI src) %{
11534   match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src)));
11535   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge );
11536 
11537   ins_cost(150);
11538   size(4);
11539   format %{ "MOV$cmp  $dst,$src" %}
11540   ins_encode %{
11541     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
11542   %}
11543   ins_pipe(ialu_reg);
11544 %}
11545 
11546 instruct cmovIL_reg_EQNE(cmpOpL cmp, flagsRegL_EQNE xcc, iRegI dst, iRegI src) %{
11547   match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src)));
11548   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne );
11549 
11550   ins_cost(150);
11551   size(4);
11552   format %{ "MOV$cmp  $dst,$src" %}
11553   ins_encode %{
11554     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
11555   %}
11556   ins_pipe(ialu_reg);
11557 %}
11558 
11559 instruct cmovIL_reg_LEGT(cmpOpL_commute cmp, flagsRegL_LEGT xcc, iRegI dst, iRegI src) %{
11560   match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src)));
11561   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt );
11562 
11563   ins_cost(150);
11564   size(4);
11565   format %{ "MOV$cmp  $dst,$src" %}
11566   ins_encode %{
11567     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
11568   %}
11569   ins_pipe(ialu_reg);
11570 %}
11571 #endif // !AARCH64
11572 
11573 #ifndef AARCH64
11574 instruct cmovIL_imm_LTGE(cmpOpL cmp, flagsRegL_LTGE xcc, iRegI dst, immI16 src) %{
11575   match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src)));
11576   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge );
11577 
11578   ins_cost(140);
11579   format %{ "MOVW$cmp  $dst,$src" %}
11580   ins_encode %{
11581     __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
11582   %}
11583   ins_pipe(ialu_imm);
11584 %}
11585 
11586 instruct cmovIL_imm_EQNE(cmpOpL cmp, flagsRegL_EQNE xcc, iRegI dst, immI16 src) %{
11587   match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src)));
11588   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne );
11589 
11590   ins_cost(140);
11591   format %{ "MOVW$cmp  $dst,$src" %}
11592   ins_encode %{
11593     __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
11594   %}
11595   ins_pipe(ialu_imm);
11596 %}
11597 
11598 instruct cmovIL_imm_LEGT(cmpOpL_commute cmp, flagsRegL_LEGT xcc, iRegI dst, immI16 src) %{
11599   match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src)));
11600   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt );
11601 
11602   ins_cost(140);
11603   format %{ "MOVW$cmp  $dst,$src" %}
11604   ins_encode %{
11605     __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
11606   %}
11607   ins_pipe(ialu_imm);
11608 %}
11609 
11610 instruct cmovPL_reg_LTGE(cmpOpL cmp, flagsRegL_LTGE xcc, iRegP dst, iRegP src) %{
11611   match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src)));
11612   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge );
11613 
11614   ins_cost(150);
11615   size(4);
11616   format %{ "MOV$cmp  $dst,$src" %}
11617   ins_encode %{
11618     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
11619   %}
11620   ins_pipe(ialu_reg);
11621 %}
11622 
11623 instruct cmovPL_reg_EQNE(cmpOpL cmp, flagsRegL_EQNE xcc, iRegP dst, iRegP src) %{
11624   match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src)));
11625   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne );
11626 
11627   ins_cost(150);
11628   size(4);
11629   format %{ "MOV$cmp  $dst,$src" %}
11630   ins_encode %{
11631     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
11632   %}
11633   ins_pipe(ialu_reg);
11634 %}
11635 
11636 instruct cmovPL_reg_LEGT(cmpOpL_commute cmp, flagsRegL_LEGT xcc, iRegP dst, iRegP src) %{
11637   match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src)));
11638   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt );
11639 
11640   ins_cost(150);
11641   size(4);
11642   format %{ "MOV$cmp  $dst,$src" %}
11643   ins_encode %{
11644     __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
11645   %}
11646   ins_pipe(ialu_reg);
11647 %}
11648 
11649 instruct cmovPL_imm_LTGE(cmpOpL cmp, flagsRegL_LTGE xcc, iRegP dst, immP0 src) %{
11650   match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src)));
11651   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge );
11652 
11653   ins_cost(140);
11654   format %{ "MOVW$cmp  $dst,$src" %}
11655   ins_encode %{
11656     __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
11657   %}
11658   ins_pipe(ialu_imm);
11659 %}
11660 
11661 instruct cmovPL_imm_EQNE(cmpOpL cmp, flagsRegL_EQNE xcc, iRegP dst, immP0 src) %{
11662   match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src)));
11663   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne );
11664 
11665   ins_cost(140);
11666   format %{ "MOVW$cmp  $dst,$src" %}
11667   ins_encode %{
11668     __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
11669   %}
11670   ins_pipe(ialu_imm);
11671 %}
11672 
11673 instruct cmovPL_imm_LEGT(cmpOpL_commute cmp, flagsRegL_LEGT xcc, iRegP dst, immP0 src) %{
11674   match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src)));
11675   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt );
11676 
11677   ins_cost(140);
11678   format %{ "MOVW$cmp  $dst,$src" %}
11679   ins_encode %{
11680     __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
11681   %}
11682   ins_pipe(ialu_imm);
11683 %}
11684 
11685 instruct cmovFL_reg_LTGE(cmpOpL cmp, flagsRegL_LTGE xcc, regF dst, regF src) %{
11686   match(Set dst (CMoveF (Binary cmp xcc) (Binary dst src)));
11687   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge );
11688   ins_cost(150);
11689   size(4);
11690   format %{ "FCPYS$cmp $dst,$src" %}
11691   ins_encode %{
11692     __ fcpys($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
11693   %}
11694   ins_pipe(int_conditional_float_move);
11695 %}
11696 
11697 instruct cmovFL_reg_EQNE(cmpOpL cmp, flagsRegL_EQNE xcc, regF dst, regF src) %{
11698   match(Set dst (CMoveF (Binary cmp xcc) (Binary dst src)));
11699   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne );
11700   ins_cost(150);
11701   size(4);
11702   format %{ "FCPYS$cmp $dst,$src" %}
11703   ins_encode %{
11704     __ fcpys($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
11705   %}
11706   ins_pipe(int_conditional_float_move);
11707 %}
11708 
11709 instruct cmovFL_reg_LEGT(cmpOpL_commute cmp, flagsRegL_LEGT xcc, regF dst, regF src) %{
11710   match(Set dst (CMoveF (Binary cmp xcc) (Binary dst src)));
11711   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt );
11712   ins_cost(150);
11713   size(4);
11714   format %{ "FCPYS$cmp $dst,$src" %}
11715   ins_encode %{
11716     __ fcpys($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
11717   %}
11718   ins_pipe(int_conditional_float_move);
11719 %}
11720 
11721 instruct cmovDL_reg_LTGE(cmpOpL cmp, flagsRegL_LTGE xcc, regD dst, regD src) %{
11722   match(Set dst (CMoveD (Binary cmp xcc) (Binary dst src)));
11723   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge );
11724 
11725   ins_cost(150);
11726   size(4);
11727   format %{ "FCPYD$cmp $dst,$src" %}
11728   ins_encode %{
11729     __ fcpyd($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
11730   %}
11731   ins_pipe(int_conditional_float_move);
11732 %}
11733 
11734 instruct cmovDL_reg_EQNE(cmpOpL cmp, flagsRegL_EQNE xcc, regD dst, regD src) %{
11735   match(Set dst (CMoveD (Binary cmp xcc) (Binary dst src)));
11736   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne );
11737 
11738   ins_cost(150);
11739   size(4);
11740   format %{ "FCPYD$cmp $dst,$src" %}
11741   ins_encode %{
11742     __ fcpyd($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
11743   %}
11744   ins_pipe(int_conditional_float_move);
11745 %}
11746 
11747 instruct cmovDL_reg_LEGT(cmpOpL_commute cmp, flagsRegL_LEGT xcc, regD dst, regD src) %{
11748   match(Set dst (CMoveD (Binary cmp xcc) (Binary dst src)));
11749   predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::gt );
11750 
11751   ins_cost(150);
11752   size(4);
11753   format %{ "FCPYD$cmp $dst,$src" %}
11754   ins_encode %{
11755     __ fcpyd($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
11756   %}
11757   ins_pipe(int_conditional_float_move);
11758 %}
11759 #endif // !AARCH64
11760 
11761 // ============================================================================
11762 // Safepoint Instruction
11763 #ifdef AARCH64
11764 instruct safePoint_poll(iRegP poll, flagsReg icc, RtempRegP tmp) %{
11765   match(SafePoint poll);
11766   // The handler stub kills Rtemp
11767   effect(USE poll, KILL tmp, KILL icc);
11768 
11769   size(4);
11770   format %{ "LDR   ZR,[$poll]\t! Safepoint: poll for GC" %}
11771   ins_encode %{
11772     __ relocate(relocInfo::poll_type);
11773     __ ldr(ZR, Address($poll$$Register));
11774   %}
11775   ins_pipe(loadPollP);
11776 %}
11777 #else
11778 // rather than KILL R12, it would be better to use any reg as
11779 // TEMP. Can't do that at this point because it crashes the compiler
11780 instruct safePoint_poll(iRegP poll, R12RegI tmp, flagsReg icc) %{
11781   match(SafePoint poll);
11782   effect(USE poll, KILL tmp, KILL icc);
11783 
11784   size(4);
11785   format %{ "LDR   $tmp,[$poll]\t! Safepoint: poll for GC" %}
11786   ins_encode %{
11787     __ relocate(relocInfo::poll_type);
11788     __ ldr($tmp$$Register, Address($poll$$Register));
11789   %}
11790   ins_pipe(loadPollP);
11791 %}
11792 #endif
11793 
11794 
11795 // ============================================================================
11796 // Call Instructions
11797 // Call Java Static Instruction
11798 instruct CallStaticJavaDirect( method meth ) %{
11799   match(CallStaticJava);
11800   predicate(! ((CallStaticJavaNode*)n)->is_method_handle_invoke());
11801   effect(USE meth);
11802 
11803   ins_cost(CALL_COST);
11804   format %{ "CALL,static ==> " %}
11805   ins_encode( Java_Static_Call( meth ), call_epilog );
11806   ins_pipe(simple_call);
11807 %}
11808 
11809 // Call Java Static Instruction (method handle version)
11810 instruct CallStaticJavaHandle( method meth ) %{
11811   match(CallStaticJava);
11812   predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
11813   effect(USE meth);
11814   // FP is saved by all callees (for interpreter stack correction).
11815   // We use it here for a similar purpose, in {preserve,restore}_FP.
11816 
11817   ins_cost(CALL_COST);
11818   format %{ "CALL,static/MethodHandle ==> " %}
11819   ins_encode( preserve_SP, Java_Static_Call( meth ), restore_SP, call_epilog );
11820   ins_pipe(simple_call);
11821 %}
11822 
11823 // Call Java Dynamic Instruction
11824 instruct CallDynamicJavaDirect( method meth ) %{
11825   match(CallDynamicJava);
11826   effect(USE meth);
11827 
11828   ins_cost(CALL_COST);
11829   format %{ "MOV_OOP    (empty),R_R8\n\t"
11830             "CALL,dynamic  ; NOP ==> " %}
11831   ins_encode( Java_Dynamic_Call( meth ), call_epilog );
11832   ins_pipe(call);
11833 %}
11834 
11835 // Call Runtime Instruction
11836 instruct CallRuntimeDirect(method meth) %{
11837   match(CallRuntime);
11838   effect(USE meth);
11839   ins_cost(CALL_COST);
11840   format %{ "CALL,runtime" %}
11841 #ifdef AARCH64
11842   ins_encode( save_last_PC, Java_To_Runtime( meth ),
11843               call_epilog );
11844 #else
11845   ins_encode( Java_To_Runtime( meth ),
11846               call_epilog );
11847 #endif
11848   ins_pipe(simple_call);
11849 %}
11850 
11851 // Call runtime without safepoint - same as CallRuntime
11852 instruct CallLeafDirect(method meth) %{
11853   match(CallLeaf);
11854   effect(USE meth);
11855   ins_cost(CALL_COST);
11856   format %{ "CALL,runtime leaf" %}
11857   // TODO: ned save_last_PC here?
11858   ins_encode( Java_To_Runtime( meth ),
11859               call_epilog );
11860   ins_pipe(simple_call);
11861 %}
11862 
11863 // Call runtime without safepoint - same as CallLeaf
11864 instruct CallLeafNoFPDirect(method meth) %{
11865   match(CallLeafNoFP);
11866   effect(USE meth);
11867   ins_cost(CALL_COST);
11868   format %{ "CALL,runtime leaf nofp" %}
11869   // TODO: ned save_last_PC here?
11870   ins_encode( Java_To_Runtime( meth ),
11871               call_epilog );
11872   ins_pipe(simple_call);
11873 %}
11874 
11875 // Tail Call; Jump from runtime stub to Java code.
11876 // Also known as an 'interprocedural jump'.
11877 // Target of jump will eventually return to caller.
11878 // TailJump below removes the return address.
11879 instruct TailCalljmpInd(IPRegP jump_target, inline_cache_regP method_oop) %{
11880   match(TailCall jump_target method_oop );
11881 
11882   ins_cost(CALL_COST);
11883   format %{ "MOV    Rexception_pc, LR\n\t"
11884             "jump   $jump_target  \t! $method_oop holds method oop" %}
11885   ins_encode %{
11886     __ mov(Rexception_pc, LR);   // this is used only to call
11887                                  // StubRoutines::forward_exception_entry()
11888                                  // which expects PC of exception in
11889                                  // R5. FIXME?
11890     __ jump($jump_target$$Register);
11891   %}
11892   ins_pipe(tail_call);
11893 %}
11894 
11895 
11896 // Return Instruction
11897 instruct Ret() %{
11898   match(Return);
11899 
11900   format %{ "ret LR" %}
11901 
11902   ins_encode %{
11903     __ ret(LR);
11904   %}
11905 
11906   ins_pipe(br);
11907 %}
11908 
11909 
11910 // Tail Jump; remove the return address; jump to target.
11911 // TailCall above leaves the return address around.
11912 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
11913 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
11914 // "restore" before this instruction (in Epilogue), we need to materialize it
11915 // in %i0.
11916 instruct tailjmpInd(IPRegP jump_target, RExceptionRegP ex_oop) %{
11917   match( TailJump jump_target ex_oop );
11918   ins_cost(CALL_COST);
11919   format %{ "MOV    Rexception_pc, LR\n\t"
11920             "jump   $jump_target \t! $ex_oop holds exc. oop" %}
11921   ins_encode %{
11922     __ mov(Rexception_pc, LR);
11923     __ jump($jump_target$$Register);
11924   %}
11925   ins_pipe(tail_call);
11926 %}
11927 
11928 // Create exception oop: created by stack-crawling runtime code.
11929 // Created exception is now available to this handler, and is setup
11930 // just prior to jumping to this handler.  No code emitted.
11931 instruct CreateException( RExceptionRegP ex_oop )
11932 %{
11933   match(Set ex_oop (CreateEx));
11934   ins_cost(0);
11935 
11936   size(0);
11937   // use the following format syntax
11938   format %{ "! exception oop is in Rexception_obj; no code emitted" %}
11939   ins_encode();
11940   ins_pipe(empty);
11941 %}
11942 
11943 
11944 // Rethrow exception:
11945 // The exception oop will come in the first argument position.
11946 // Then JUMP (not call) to the rethrow stub code.
11947 instruct RethrowException()
11948 %{
11949   match(Rethrow);
11950   ins_cost(CALL_COST);
11951 
11952   // use the following format syntax
11953   format %{ "b    rethrow_stub" %}
11954   ins_encode %{
11955     Register scratch = R1_tmp;
11956     assert_different_registers(scratch, c_rarg0, LR);
11957     __ jump(OptoRuntime::rethrow_stub(), relocInfo::runtime_call_type, scratch);
11958   %}
11959   ins_pipe(tail_call);
11960 %}
11961 
11962 
11963 // Die now
11964 instruct ShouldNotReachHere( )
11965 %{
11966   match(Halt);
11967   ins_cost(CALL_COST);
11968 
11969   size(4);
11970   // Use the following format syntax
11971   format %{ "ShouldNotReachHere" %}
11972   ins_encode %{
11973 #ifdef AARCH64
11974     __ dpcs1(0xdead);
11975 #else
11976     __ udf(0xdead);
11977 #endif
11978   %}
11979   ins_pipe(tail_call);
11980 %}
11981 
11982 // ============================================================================
11983 // The 2nd slow-half of a subtype check.  Scan the subklass's 2ndary superklass
11984 // array for an instance of the superklass.  Set a hidden internal cache on a
11985 // hit (cache is checked with exposed code in gen_subtype_check()).  Return
11986 // not zero for a miss or zero for a hit.  The encoding ALSO sets flags.
11987 instruct partialSubtypeCheck( R0RegP index, R1RegP sub, R2RegP super, flagsRegP pcc, LRRegP lr ) %{
11988   match(Set index (PartialSubtypeCheck sub super));
11989   effect( KILL pcc, KILL lr );
11990   ins_cost(DEFAULT_COST*10);
11991   format %{ "CALL   PartialSubtypeCheck" %}
11992   ins_encode %{
11993     __ call(StubRoutines::Arm::partial_subtype_check(), relocInfo::runtime_call_type);
11994   %}
11995   ins_pipe(partial_subtype_check_pipe);
11996 %}
11997 
11998 /* instruct partialSubtypeCheck_vs_zero( flagsRegP pcc, o1RegP sub, o2RegP super, immP0 zero, o0RegP idx, o7RegP o7 ) %{ */
11999 /*   match(Set pcc (CmpP (PartialSubtypeCheck sub super) zero)); */
12000 /*   ins_pipe(partial_subtype_check_pipe); */
12001 /* %} */
12002 
12003 
12004 // ============================================================================
12005 // inlined locking and unlocking
12006 
12007 #ifdef AARCH64
12008 instruct cmpFastLock(flagsRegP pcc, iRegP object, iRegP box, iRegP scratch2, iRegP scratch, iRegP scratch3 )
12009 #else
12010 instruct cmpFastLock(flagsRegP pcc, iRegP object, iRegP box, iRegP scratch2, iRegP scratch )
12011 #endif
12012 %{
12013   match(Set pcc (FastLock object box));
12014 
12015 #ifdef AARCH64
12016   effect(TEMP scratch, TEMP scratch2, TEMP scratch3);
12017 #else
12018   effect(TEMP scratch, TEMP scratch2);
12019 #endif
12020   ins_cost(100);
12021 
12022 #ifdef AARCH64
12023   format %{ "FASTLOCK  $object, $box; KILL $scratch, $scratch2, $scratch3" %}
12024   ins_encode %{
12025     __ fast_lock($object$$Register, $box$$Register, $scratch$$Register, $scratch2$$Register, $scratch3$$Register);
12026   %}
12027 #else
12028   format %{ "FASTLOCK  $object, $box; KILL $scratch, $scratch2" %}
12029   ins_encode %{
12030     __ fast_lock($object$$Register, $box$$Register, $scratch$$Register, $scratch2$$Register);
12031   %}
12032 #endif
12033   ins_pipe(long_memory_op);
12034 %}
12035 
12036 
12037 #ifdef AARCH64
12038 instruct cmpFastUnlock(flagsRegP pcc, iRegP object, iRegP box, iRegP scratch2, iRegP scratch, iRegP scratch3 ) %{
12039   match(Set pcc (FastUnlock object box));
12040   effect(TEMP scratch, TEMP scratch2, TEMP scratch3);
12041   ins_cost(100);
12042 
12043   format %{ "FASTUNLOCK  $object, $box; KILL $scratch, $scratch2, $scratch3" %}
12044   ins_encode %{
12045     __ fast_unlock($object$$Register, $box$$Register, $scratch$$Register, $scratch2$$Register, $scratch3$$Register);
12046   %}
12047   ins_pipe(long_memory_op);
12048 %}
12049 #else
12050 instruct cmpFastUnlock(flagsRegP pcc, iRegP object, iRegP box, iRegP scratch2, iRegP scratch ) %{
12051   match(Set pcc (FastUnlock object box));
12052   effect(TEMP scratch, TEMP scratch2);
12053   ins_cost(100);
12054 
12055   format %{ "FASTUNLOCK  $object, $box; KILL $scratch, $scratch2" %}
12056   ins_encode %{
12057     __ fast_unlock($object$$Register, $box$$Register, $scratch$$Register, $scratch2$$Register);
12058   %}
12059   ins_pipe(long_memory_op);
12060 %}
12061 #endif
12062 
12063 #ifdef AARCH64
12064 // TODO: add version that takes immI cnt?
12065 instruct clear_array(iRegX cnt, iRegP base, iRegP ptr, iRegX temp, Universe dummy, flagsReg cpsr) %{
12066   match(Set dummy (ClearArray cnt base));
12067   effect(TEMP temp, TEMP ptr, KILL cpsr);
12068   ins_cost(300);
12069   format %{
12070       "        MOV    $temp,$cnt\n"
12071       "        ADD    $ptr,$base,$cnt\n"
12072       "        SUBS   $temp,$temp,16\t! Count down dword pair in bytes\n"
12073       "        B.lt   done16\n"
12074       "loop:   STP    ZR,ZR,[$ptr,-16]!\n"
12075       "        SUBS   $temp,$temp,16\t! Count down dword pair in bytes\n"
12076       "        B.ge   loop\t! Clearing loop\n"
12077       "done16: ADDS   $temp,$temp,8\t! Room for 1 more long?\n"
12078       "        B.lt   done\n"
12079       "        STR    ZR,[$base+$temp]\n"
12080       "done:"
12081   %}
12082   ins_encode %{
12083     // TODO: preload?
12084     __ mov($temp$$Register, $cnt$$Register);
12085     __ add($ptr$$Register, $base$$Register, $cnt$$Register);
12086     Label loop, done, done16;
12087     __ subs($temp$$Register, $temp$$Register, 16);
12088     __ b(done16, lt);
12089     __ bind(loop);
12090     __ stp(ZR, ZR, Address($ptr$$Register, -16, pre_indexed));
12091     __ subs($temp$$Register, $temp$$Register, 16);
12092     __ b(loop, ge);
12093     __ bind(done16);
12094     __ adds($temp$$Register, $temp$$Register, 8);
12095     __ b(done, lt);
12096     // $temp should be 0 here
12097     __ str(ZR, Address($base$$Register, $temp$$Register));
12098     __ bind(done);
12099   %}
12100   ins_pipe(long_memory_op);
12101 %}
12102 #else
12103 // Count and Base registers are fixed because the allocator cannot
12104 // kill unknown registers.  The encodings are generic.
12105 instruct clear_array(iRegX cnt, iRegP base, iRegI temp, iRegX zero, Universe dummy, flagsReg cpsr) %{
12106   match(Set dummy (ClearArray cnt base));
12107   effect(TEMP temp, TEMP zero, KILL cpsr);
12108   ins_cost(300);
12109   format %{ "MOV    $zero,0\n"
12110       "        MOV    $temp,$cnt\n"
12111       "loop:   SUBS   $temp,$temp,4\t! Count down a dword of bytes\n"
12112       "        STR.ge $zero,[$base+$temp]\t! delay slot"
12113       "        B.gt   loop\t\t! Clearing loop\n" %}
12114   ins_encode %{
12115     __ mov($zero$$Register, 0);
12116     __ mov($temp$$Register, $cnt$$Register);
12117     Label(loop);
12118     __ bind(loop);
12119     __ subs($temp$$Register, $temp$$Register, 4);
12120     __ str($zero$$Register, Address($base$$Register, $temp$$Register), ge);
12121     __ b(loop, gt);
12122   %}
12123   ins_pipe(long_memory_op);
12124 %}
12125 #endif
12126 
12127 #ifdef XXX
12128 // FIXME: Why R0/R1/R2/R3?
12129 instruct string_compare(R0RegP str1, R1RegP str2, R2RegI cnt1, R3RegI cnt2, iRegI result,
12130                         iRegI tmp1, iRegI tmp2, flagsReg ccr) %{
12131   predicate(!CompactStrings);
12132   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
12133   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ccr, TEMP tmp1, TEMP tmp2);
12134   ins_cost(300);
12135   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   // TEMP $tmp1, $tmp2" %}
12136   ins_encode( enc_String_Compare(str1, str2, cnt1, cnt2, result, tmp1, tmp2) );
12137 
12138   ins_pipe(long_memory_op);
12139 %}
12140 
12141 // FIXME: Why R0/R1/R2?
12142 instruct string_equals(R0RegP str1, R1RegP str2, R2RegI cnt, iRegI result, iRegI tmp1, iRegI tmp2,
12143                        flagsReg ccr) %{
12144   predicate(!CompactStrings);
12145   match(Set result (StrEquals (Binary str1 str2) cnt));
12146   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, TEMP tmp1, TEMP tmp2, TEMP result, KILL ccr);
12147 
12148   ins_cost(300);
12149   format %{ "String Equals $str1,$str2,$cnt -> $result   // TEMP $tmp1, $tmp2" %}
12150   ins_encode( enc_String_Equals(str1, str2, cnt, result, tmp1, tmp2) );
12151   ins_pipe(long_memory_op);
12152 %}
12153 
12154 // FIXME: Why R0/R1?
12155 instruct array_equals(R0RegP ary1, R1RegP ary2, iRegI tmp1, iRegI tmp2, iRegI tmp3, iRegI result,
12156                       flagsReg ccr) %{
12157   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
12158   match(Set result (AryEq ary1 ary2));
12159   effect(USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP result, KILL ccr);
12160 
12161   ins_cost(300);
12162   format %{ "Array Equals $ary1,$ary2 -> $result   // TEMP $tmp1,$tmp2,$tmp3" %}
12163   ins_encode( enc_Array_Equals(ary1, ary2, tmp1, tmp2, tmp3, result));
12164   ins_pipe(long_memory_op);
12165 %}
12166 #endif
12167 
12168 //---------- Zeros Count Instructions ------------------------------------------
12169 
12170 instruct countLeadingZerosI(iRegI dst, iRegI src) %{
12171   match(Set dst (CountLeadingZerosI src));
12172   size(4);
12173   format %{ "CLZ_32 $dst,$src" %}
12174   ins_encode %{
12175     __ clz_32($dst$$Register, $src$$Register);
12176   %}
12177   ins_pipe(ialu_reg);
12178 %}
12179 
12180 #ifdef AARCH64
12181 instruct countLeadingZerosL(iRegI dst, iRegL src) %{
12182   match(Set dst (CountLeadingZerosL src));
12183   size(4);
12184   format %{ "CLZ $dst,$src" %}
12185   ins_encode %{
12186     __ clz($dst$$Register, $src$$Register);
12187   %}
12188   ins_pipe(ialu_reg);
12189 %}
12190 #else
12191 instruct countLeadingZerosL(iRegI dst, iRegL src, iRegI tmp, flagsReg ccr) %{
12192   match(Set dst (CountLeadingZerosL src));
12193   effect(TEMP tmp, TEMP dst, KILL ccr);
12194   size(16);
12195   format %{ "CLZ    $dst,$src.hi\n\t"
12196             "TEQ    $dst,32\n\t"
12197             "CLZ.eq $tmp,$src.lo\n\t"
12198             "ADD.eq $dst, $dst, $tmp\n\t" %}
12199   ins_encode %{
12200     __ clz($dst$$Register, $src$$Register->successor());
12201     __ teq($dst$$Register, 32);
12202     __ clz($tmp$$Register, $src$$Register, eq);
12203     __ add($dst$$Register, $dst$$Register, $tmp$$Register, eq);
12204   %}
12205   ins_pipe(ialu_reg);
12206 %}
12207 #endif
12208 
12209 instruct countTrailingZerosI(iRegI dst, iRegI src, iRegI tmp) %{
12210   match(Set dst (CountTrailingZerosI src));
12211   effect(TEMP tmp);
12212   size(8);
12213   format %{ "RBIT_32 $tmp, $src\n\t"
12214             "CLZ_32  $dst,$tmp" %}
12215   ins_encode %{
12216     __ rbit_32($tmp$$Register, $src$$Register);
12217     __ clz_32($dst$$Register, $tmp$$Register);
12218   %}
12219   ins_pipe(ialu_reg);
12220 %}
12221 
12222 #ifdef AARCH64
12223 instruct countTrailingZerosL(iRegI dst, iRegL src, iRegL tmp) %{
12224   match(Set dst (CountTrailingZerosL src));
12225   effect(TEMP tmp);
12226   size(8);
12227   format %{ "RBIT $tmp, $src\n\t"
12228             "CLZ  $dst,$tmp" %}
12229   ins_encode %{
12230     __ rbit($tmp$$Register, $src$$Register);
12231     __ clz($dst$$Register, $tmp$$Register);
12232   %}
12233   ins_pipe(ialu_reg);
12234 %}
12235 #else
12236 instruct countTrailingZerosL(iRegI dst, iRegL src, iRegI tmp, flagsReg ccr) %{
12237   match(Set dst (CountTrailingZerosL src));
12238   effect(TEMP tmp, TEMP dst, KILL ccr);
12239   size(24);
12240   format %{ "RBIT   $tmp,$src.lo\n\t"
12241             "CLZ    $dst,$tmp\n\t"
12242             "TEQ    $dst,32\n\t"
12243             "RBIT   $tmp,$src.hi\n\t"
12244             "CLZ.eq $tmp,$tmp\n\t"
12245             "ADD.eq $dst,$dst,$tmp\n\t" %}
12246   ins_encode %{
12247     __ rbit($tmp$$Register, $src$$Register);
12248     __ clz($dst$$Register, $tmp$$Register);
12249     __ teq($dst$$Register, 32);
12250     __ rbit($tmp$$Register, $src$$Register->successor());
12251     __ clz($tmp$$Register, $tmp$$Register, eq);
12252     __ add($dst$$Register, $dst$$Register, $tmp$$Register, eq);
12253   %}
12254   ins_pipe(ialu_reg);
12255 %}
12256 #endif
12257 
12258 
12259 //---------- Population Count Instructions -------------------------------------
12260 
12261 #ifdef AARCH64
12262 instruct popCountI(iRegI dst, iRegI src, regD_low tmp) %{
12263   predicate(UsePopCountInstruction);
12264   match(Set dst (PopCountI src));
12265   effect(TEMP tmp);
12266   size(20);
12267 
12268   format %{ "MOV_W      $dst,$src\n\t"
12269             "FMOV_dx    $tmp,$dst\n\t"
12270             "VCNT       $tmp.8B,$tmp.8B\n\t"
12271             "ADDV       $tmp.B,$tmp.8B\n\t"
12272             "FMRS       $dst,$tmp" %}
12273 
12274   ins_encode %{
12275     __ mov_w($dst$$Register, $src$$Register);
12276     __ fmov_dx($tmp$$FloatRegister, $dst$$Register);
12277     int quad = 0;
12278     int cnt_size = 0; // VELEM_SIZE_8
12279     __ vcnt($tmp$$FloatRegister, $tmp$$FloatRegister, quad, cnt_size);
12280     int add_size = 0; // VELEM_SIZE_8
12281     __ addv($tmp$$FloatRegister, $tmp$$FloatRegister, quad, add_size);
12282     __ fmrs($dst$$Register, $tmp$$FloatRegister);
12283   %}
12284   ins_pipe(ialu_reg); // FIXME
12285 %}
12286 #else
12287 instruct popCountI(iRegI dst, iRegI src, regD_low tmp) %{
12288   predicate(UsePopCountInstruction);
12289   match(Set dst (PopCountI src));
12290   effect(TEMP tmp);
12291 
12292   format %{ "FMSR       $tmp,$src\n\t"
12293             "VCNT.8     $tmp,$tmp\n\t"
12294             "VPADDL.U8  $tmp,$tmp\n\t"
12295             "VPADDL.U16 $tmp,$tmp\n\t"
12296             "FMRS       $dst,$tmp" %}
12297   size(20);
12298 
12299   ins_encode %{
12300     __ fmsr($tmp$$FloatRegister, $src$$Register);
12301     __ vcnt($tmp$$FloatRegister, $tmp$$FloatRegister);
12302     __ vpaddl($tmp$$FloatRegister, $tmp$$FloatRegister, 8, 0);
12303     __ vpaddl($tmp$$FloatRegister, $tmp$$FloatRegister, 16, 0);
12304     __ fmrs($dst$$Register, $tmp$$FloatRegister);
12305   %}
12306   ins_pipe(ialu_reg); // FIXME
12307 %}
12308 #endif
12309 
12310 #ifdef AARCH64
12311 instruct popCountL(iRegI dst, iRegL src, regD tmp) %{
12312   predicate(UsePopCountInstruction);
12313   match(Set dst (PopCountL src));
12314   effect(TEMP tmp);
12315   size(16);
12316 
12317   format %{ "FMOV_dx    $tmp,$src\n\t"
12318             "VCNT       $tmp.8B,$tmp.8B\n\t"
12319             "ADDV       $tmp.B,$tmp.8B\n\t"
12320             "FMOV_ws    $dst,$tmp" %}
12321 
12322   ins_encode %{
12323     __ fmov_dx($tmp$$FloatRegister, $src$$Register);
12324     int quad = 0;
12325     int cnt_size = 0;
12326     __ vcnt($tmp$$FloatRegister, $tmp$$FloatRegister, quad, cnt_size);
12327     int add_size = 0;
12328     __ addv($tmp$$FloatRegister, $tmp$$FloatRegister, quad, add_size);
12329     __ fmov_ws($dst$$Register, $tmp$$FloatRegister);
12330   %}
12331   ins_pipe(ialu_reg); // FIXME
12332 %}
12333 #else
12334 // Note: Long.bitCount(long) returns an int.
12335 instruct popCountL(iRegI dst, iRegL src, regD_low tmp) %{
12336   predicate(UsePopCountInstruction);
12337   match(Set dst (PopCountL src));
12338   effect(TEMP tmp);
12339 
12340   format %{ "FMDRR       $tmp,$src.lo,$src.hi\n\t"
12341             "VCNT.8      $tmp,$tmp\n\t"
12342             "VPADDL.U8   $tmp,$tmp\n\t"
12343             "VPADDL.U16  $tmp,$tmp\n\t"
12344             "VPADDL.U32  $tmp,$tmp\n\t"
12345             "FMRS        $dst,$tmp" %}
12346 
12347   size(32);
12348 
12349   ins_encode %{
12350     __ fmdrr($tmp$$FloatRegister, $src$$Register, $src$$Register->successor());
12351     __ vcnt($tmp$$FloatRegister, $tmp$$FloatRegister);
12352     __ vpaddl($tmp$$FloatRegister, $tmp$$FloatRegister, 8, 0);
12353     __ vpaddl($tmp$$FloatRegister, $tmp$$FloatRegister, 16, 0);
12354     __ vpaddl($tmp$$FloatRegister, $tmp$$FloatRegister, 32, 0);
12355     __ fmrs($dst$$Register, $tmp$$FloatRegister);
12356   %}
12357   ins_pipe(ialu_reg);
12358 %}
12359 #endif
12360 
12361 
12362 // ============================================================================
12363 //------------Bytes reverse--------------------------------------------------
12364 
12365 instruct bytes_reverse_int(iRegI dst, iRegI src) %{
12366   match(Set dst (ReverseBytesI src));
12367 
12368   size(4);
12369   format %{ "REV32 $dst,$src" %}
12370   ins_encode %{
12371 #ifdef AARCH64
12372     __ rev_w($dst$$Register, $src$$Register);
12373     // high 32 bits zeroed, not sign extended
12374 #else
12375     __ rev($dst$$Register, $src$$Register);
12376 #endif
12377   %}
12378   ins_pipe( iload_mem ); // FIXME
12379 %}
12380 
12381 instruct bytes_reverse_long(iRegL dst, iRegL src) %{
12382   match(Set dst (ReverseBytesL src));
12383 #ifdef AARCH64
12384 //size(4);
12385   format %{ "REV $dst,$src"  %}
12386   ins_encode %{
12387     __ rev($dst$$Register, $src$$Register);
12388   %}
12389   ins_pipe(ialu_reg_reg); // FIXME
12390 #else
12391   effect(TEMP dst);
12392   size(8);
12393   format %{ "REV $dst.lo,$src.lo\n\t"
12394             "REV $dst.hi,$src.hi" %}
12395   ins_encode %{
12396     __ rev($dst$$Register, $src$$Register->successor());
12397     __ rev($dst$$Register->successor(), $src$$Register);
12398   %}
12399   ins_pipe( iload_mem ); // FIXME
12400 #endif
12401 %}
12402 
12403 instruct bytes_reverse_unsigned_short(iRegI dst, iRegI src) %{
12404   match(Set dst (ReverseBytesUS src));
12405 #ifdef AARCH64
12406   size(4);
12407   format %{ "REV16_W $dst,$src" %}
12408   ins_encode %{
12409     __ rev16_w($dst$$Register, $src$$Register);
12410     // high 32 bits zeroed
12411   %}
12412 #else
12413   size(4);
12414   format %{ "REV16 $dst,$src" %}
12415   ins_encode %{
12416     __ rev16($dst$$Register, $src$$Register);
12417   %}
12418 #endif
12419   ins_pipe( iload_mem ); // FIXME
12420 %}
12421 
12422 instruct bytes_reverse_short(iRegI dst, iRegI src) %{
12423   match(Set dst (ReverseBytesS src));
12424 #ifdef AARCH64
12425   size(8);
12426   format %{ "REV16_W $dst,$src\n\t"
12427             "SIGN_EXT16 $dst" %}
12428   ins_encode %{
12429     __ rev16_w($dst$$Register, $src$$Register);
12430     __ sign_extend($dst$$Register, $dst$$Register, 16);
12431   %}
12432 #else
12433   size(4);
12434   format %{ "REVSH $dst,$src" %}
12435   ins_encode %{
12436     __ revsh($dst$$Register, $src$$Register);
12437   %}
12438 #endif
12439   ins_pipe( iload_mem ); // FIXME
12440 %}
12441 
12442 
12443 // ====================VECTOR INSTRUCTIONS=====================================
12444 
12445 // Load Aligned Packed values into a Double Register
12446 instruct loadV8(vecD dst, memoryD mem) %{
12447   predicate(n->as_LoadVector()->memory_size() == 8);
12448   match(Set dst (LoadVector mem));
12449   ins_cost(MEMORY_REF_COST);
12450   size(4);
12451   format %{ "FLDD   $mem,$dst\t! load vector (8 bytes)" %}
12452   ins_encode %{
12453     __ ldr_double($dst$$FloatRegister, $mem$$Address);
12454   %}
12455   ins_pipe(floadD_mem);
12456 %}
12457 
12458 // Load Aligned Packed values into a Double Register Pair
12459 instruct loadV16(vecX dst, memoryvld mem) %{
12460   predicate(n->as_LoadVector()->memory_size() == 16);
12461   match(Set dst (LoadVector mem));
12462   ins_cost(MEMORY_REF_COST);
12463   size(4);
12464   format %{ "VLD1   $mem,$dst.Q\t! load vector (16 bytes)" %}
12465   ins_encode %{
12466     __ vld1($dst$$FloatRegister, $mem$$Address, MacroAssembler::VELEM_SIZE_16, 128);
12467   %}
12468   ins_pipe(floadD_mem); // FIXME
12469 %}
12470 
12471 // Store Vector in Double register to memory
12472 instruct storeV8(memoryD mem, vecD src) %{
12473   predicate(n->as_StoreVector()->memory_size() == 8);
12474   match(Set mem (StoreVector mem src));
12475   ins_cost(MEMORY_REF_COST);
12476   size(4);
12477   format %{ "FSTD   $src,$mem\t! store vector (8 bytes)" %}
12478   ins_encode %{
12479     __ str_double($src$$FloatRegister, $mem$$Address);
12480   %}
12481   ins_pipe(fstoreD_mem_reg);
12482 %}
12483 
12484 // Store Vector in Double Register Pair to memory
12485 instruct storeV16(memoryvld mem, vecX src) %{
12486   predicate(n->as_StoreVector()->memory_size() == 16);
12487   match(Set mem (StoreVector mem src));
12488   ins_cost(MEMORY_REF_COST);
12489   size(4);
12490   format %{ "VST1   $src,$mem\t! store vector (16 bytes)" %}
12491   ins_encode %{
12492     __ vst1($src$$FloatRegister, $mem$$Address, MacroAssembler::VELEM_SIZE_16, 128);
12493   %}
12494   ins_pipe(fstoreD_mem_reg); // FIXME
12495 %}
12496 
12497 #ifndef AARCH64
12498 // Replicate scalar to packed byte values in Double register
12499 instruct Repl8B_reg(vecD dst, iRegI src, iRegI tmp) %{
12500   predicate(n->as_Vector()->length() == 8);
12501   match(Set dst (ReplicateB src));
12502   ins_cost(DEFAULT_COST*4);
12503   effect(TEMP tmp);
12504   size(16);
12505 
12506   // FIXME: could use PKH instruction instead?
12507   format %{ "LSL      $tmp, $src, 24 \n\t"
12508             "OR       $tmp, $tmp, ($tmp >> 8) \n\t"
12509             "OR       $tmp, $tmp, ($tmp >> 16) \n\t"
12510             "FMDRR    $dst,$tmp,$tmp\t" %}
12511   ins_encode %{
12512     __ mov($tmp$$Register, AsmOperand($src$$Register, lsl, 24));
12513     __ orr($tmp$$Register, $tmp$$Register, AsmOperand($tmp$$Register, lsr, 8));
12514     __ orr($tmp$$Register, $tmp$$Register, AsmOperand($tmp$$Register, lsr, 16));
12515     __ fmdrr($dst$$FloatRegister, $tmp$$Register, $tmp$$Register);
12516   %}
12517   ins_pipe(ialu_reg); // FIXME
12518 %}
12519 #endif /* !AARCH64 */
12520 
12521 // Replicate scalar to packed byte values in Double register
12522 instruct Repl8B_reg_simd(vecD dst, iRegI src) %{
12523   predicate(n->as_Vector()->length_in_bytes() == 8 && VM_Version::has_simd());
12524   match(Set dst (ReplicateB src));
12525   size(4);
12526 
12527   format %{ "VDUP.8 $dst,$src\t" %}
12528   ins_encode %{
12529     bool quad = false;
12530     __ vdupI($dst$$FloatRegister, $src$$Register,
12531              MacroAssembler::VELEM_SIZE_8, quad);
12532   %}
12533   ins_pipe(ialu_reg); // FIXME
12534 %}
12535 
12536 // Replicate scalar to packed byte values in Double register pair
12537 instruct Repl16B_reg(vecX dst, iRegI src) %{
12538   predicate(n->as_Vector()->length_in_bytes() == 16);
12539   match(Set dst (ReplicateB src));
12540   size(4);
12541 
12542   format %{ "VDUP.8 $dst.Q,$src\t" %}
12543   ins_encode %{
12544     bool quad = true;
12545     __ vdupI($dst$$FloatRegister, $src$$Register,
12546              MacroAssembler::VELEM_SIZE_8, quad);
12547   %}
12548   ins_pipe(ialu_reg); // FIXME
12549 %}
12550 
12551 #ifndef AARCH64
12552 // Replicate scalar constant to packed byte values in Double register
12553 instruct Repl8B_immI(vecD dst, immI src, iRegI tmp) %{
12554   predicate(n->as_Vector()->length() == 8);
12555   match(Set dst (ReplicateB src));
12556   ins_cost(DEFAULT_COST*2);
12557   effect(TEMP tmp);
12558   size(12);
12559 
12560   format %{ "MOV      $tmp, Repl4($src))\n\t"
12561             "FMDRR    $dst,$tmp,$tmp\t" %}
12562   ins_encode( LdReplImmI(src, dst, tmp, (4), (1)) );
12563   ins_pipe(loadConFD); // FIXME
12564 %}
12565 #endif /* !AARCH64 */
12566 
12567 // Replicate scalar constant to packed byte values in Double register
12568 // TODO: support negative constants with MVNI?
12569 instruct Repl8B_immU8(vecD dst, immU8 src) %{
12570   predicate(n->as_Vector()->length_in_bytes() == 8 && VM_Version::has_simd());
12571   match(Set dst (ReplicateB src));
12572   size(4);
12573 
12574   format %{ "VMOV.U8  $dst,$src" %}
12575   ins_encode %{
12576     bool quad = false;
12577     __ vmovI($dst$$FloatRegister, $src$$constant,
12578              MacroAssembler::VELEM_SIZE_8, quad);
12579   %}
12580   ins_pipe(loadConFD); // FIXME
12581 %}
12582 
12583 // Replicate scalar constant to packed byte values in Double register pair
12584 instruct Repl16B_immU8(vecX dst, immU8 src) %{
12585   predicate(n->as_Vector()->length_in_bytes() == 16 && VM_Version::has_simd());
12586   match(Set dst (ReplicateB src));
12587   size(4);
12588 
12589   format %{ "VMOV.U8  $dst.Q,$src" %}
12590   ins_encode %{
12591     bool quad = true;
12592     __ vmovI($dst$$FloatRegister, $src$$constant,
12593              MacroAssembler::VELEM_SIZE_8, quad);
12594   %}
12595   ins_pipe(loadConFD); // FIXME
12596 %}
12597 
12598 #ifndef AARCH64
12599 // Replicate scalar to packed short/char values into Double register
12600 instruct Repl4S_reg(vecD dst, iRegI src, iRegI tmp) %{
12601   predicate(n->as_Vector()->length() == 4);
12602   match(Set dst (ReplicateS src));
12603   ins_cost(DEFAULT_COST*3);
12604   effect(TEMP tmp);
12605   size(12);
12606 
12607   // FIXME: could use PKH instruction instead?
12608   format %{ "LSL      $tmp, $src, 16 \n\t"
12609             "OR       $tmp, $tmp, ($tmp >> 16) \n\t"
12610             "FMDRR    $dst,$tmp,$tmp\t" %}
12611   ins_encode %{
12612     __ mov($tmp$$Register, AsmOperand($src$$Register, lsl, 16));
12613     __ orr($tmp$$Register, $tmp$$Register, AsmOperand($tmp$$Register, lsr, 16));
12614     __ fmdrr($dst$$FloatRegister, $tmp$$Register, $tmp$$Register);
12615   %}
12616   ins_pipe(ialu_reg); // FIXME
12617 %}
12618 #endif /* !AARCH64 */
12619 
12620 // Replicate scalar to packed byte values in Double register
12621 instruct Repl4S_reg_simd(vecD dst, iRegI src) %{
12622   predicate(n->as_Vector()->length_in_bytes() == 8 && VM_Version::has_simd());
12623   match(Set dst (ReplicateS src));
12624   size(4);
12625 
12626   format %{ "VDUP.16 $dst,$src\t" %}
12627   ins_encode %{
12628     bool quad = false;
12629     __ vdupI($dst$$FloatRegister, $src$$Register,
12630              MacroAssembler::VELEM_SIZE_16, quad);
12631   %}
12632   ins_pipe(ialu_reg); // FIXME
12633 %}
12634 
12635 // Replicate scalar to packed byte values in Double register pair
12636 instruct Repl8S_reg(vecX dst, iRegI src) %{
12637   predicate(n->as_Vector()->length_in_bytes() == 16 && VM_Version::has_simd());
12638   match(Set dst (ReplicateS src));
12639   size(4);
12640 
12641   format %{ "VDUP.16 $dst.Q,$src\t" %}
12642   ins_encode %{
12643     bool quad = true;
12644     __ vdupI($dst$$FloatRegister, $src$$Register,
12645              MacroAssembler::VELEM_SIZE_16, quad);
12646   %}
12647   ins_pipe(ialu_reg); // FIXME
12648 %}
12649 
12650 
12651 #ifndef AARCH64
12652 // Replicate scalar constant to packed short/char values in Double register
12653 instruct Repl4S_immI(vecD dst, immI src, iRegP tmp) %{
12654   predicate(n->as_Vector()->length() == 4);
12655   match(Set dst (ReplicateS src));
12656   effect(TEMP tmp);
12657   size(12);
12658   ins_cost(DEFAULT_COST*4); // FIXME
12659 
12660   format %{ "MOV      $tmp, Repl2($src))\n\t"
12661             "FMDRR    $dst,$tmp,$tmp\t" %}
12662   ins_encode( LdReplImmI(src, dst, tmp, (2), (2)) );
12663   ins_pipe(loadConFD); // FIXME
12664 %}
12665 #endif /* !AARCH64 */
12666 
12667 // Replicate scalar constant to packed byte values in Double register
12668 instruct Repl4S_immU8(vecD dst, immU8 src) %{
12669   predicate(n->as_Vector()->length_in_bytes() == 8 && VM_Version::has_simd());
12670   match(Set dst (ReplicateS src));
12671   size(4);
12672 
12673   format %{ "VMOV.U16  $dst,$src" %}
12674   ins_encode %{
12675     bool quad = false;
12676     __ vmovI($dst$$FloatRegister, $src$$constant,
12677              MacroAssembler::VELEM_SIZE_16, quad);
12678   %}
12679   ins_pipe(loadConFD); // FIXME
12680 %}
12681 
12682 // Replicate scalar constant to packed byte values in Double register pair
12683 instruct Repl8S_immU8(vecX dst, immU8 src) %{
12684   predicate(n->as_Vector()->length_in_bytes() == 16 && VM_Version::has_simd());
12685   match(Set dst (ReplicateS src));
12686   size(4);
12687 
12688   format %{ "VMOV.U16  $dst.Q,$src" %}
12689   ins_encode %{
12690     bool quad = true;
12691     __ vmovI($dst$$FloatRegister, $src$$constant,
12692              MacroAssembler::VELEM_SIZE_16, quad);
12693   %}
12694   ins_pipe(loadConFD); // FIXME
12695 %}
12696 
12697 #ifndef AARCH64
12698 // Replicate scalar to packed int values in Double register
12699 instruct Repl2I_reg(vecD dst, iRegI src) %{
12700   predicate(n->as_Vector()->length() == 2);
12701   match(Set dst (ReplicateI src));
12702   size(4);
12703 
12704   format %{ "FMDRR    $dst,$src,$src\t" %}
12705   ins_encode %{
12706     __ fmdrr($dst$$FloatRegister, $src$$Register, $src$$Register);
12707   %}
12708   ins_pipe(ialu_reg); // FIXME
12709 %}
12710 
12711 // Replicate scalar to packed int values in Double register pair
12712 instruct Repl4I_reg(vecX dst, iRegI src) %{
12713   predicate(n->as_Vector()->length() == 4);
12714   match(Set dst (ReplicateI src));
12715   ins_cost(DEFAULT_COST*2);
12716   size(8);
12717 
12718   format %{ "FMDRR    $dst.lo,$src,$src\n\t"
12719             "FMDRR    $dst.hi,$src,$src" %}
12720 
12721   ins_encode %{
12722     __ fmdrr($dst$$FloatRegister, $src$$Register, $src$$Register);
12723     __ fmdrr($dst$$FloatRegister->successor()->successor(),
12724              $src$$Register, $src$$Register);
12725   %}
12726   ins_pipe(ialu_reg); // FIXME
12727 %}
12728 #endif /* !AARCH64 */
12729 
12730 // Replicate scalar to packed int values in Double register
12731 instruct Repl2I_reg_simd(vecD dst, iRegI src) %{
12732   predicate(n->as_Vector()->length_in_bytes() == 8 && VM_Version::has_simd());
12733   match(Set dst (ReplicateI src));
12734   size(4);
12735 
12736   format %{ "VDUP.32 $dst.D,$src\t" %}
12737   ins_encode %{
12738     bool quad = false;
12739     __ vdupI($dst$$FloatRegister, $src$$Register,
12740              MacroAssembler::VELEM_SIZE_32, quad);
12741   %}
12742   ins_pipe(ialu_reg); // FIXME
12743 %}
12744 
12745 // Replicate scalar to packed int values in Double register pair
12746 instruct Repl4I_reg_simd(vecX dst, iRegI src) %{
12747   predicate(n->as_Vector()->length_in_bytes() == 16 && VM_Version::has_simd());
12748   match(Set dst (ReplicateI src));
12749   size(4);
12750 
12751   format %{ "VDUP.32 $dst.Q,$src\t" %}
12752   ins_encode %{
12753     bool quad = true;
12754     __ vdupI($dst$$FloatRegister, $src$$Register,
12755              MacroAssembler::VELEM_SIZE_32, quad);
12756   %}
12757   ins_pipe(ialu_reg); // FIXME
12758 %}
12759 
12760 
12761 #ifndef AARCH64
12762 // Replicate scalar zero constant to packed int values in Double register
12763 instruct Repl2I_immI(vecD dst, immI src, iRegI tmp) %{
12764   predicate(n->as_Vector()->length() == 2);
12765   match(Set dst (ReplicateI src));
12766   effect(TEMP tmp);
12767   size(12);
12768   ins_cost(DEFAULT_COST*4); // FIXME
12769 
12770   format %{ "MOV      $tmp, Repl1($src))\n\t"
12771             "FMDRR    $dst,$tmp,$tmp\t" %}
12772   ins_encode( LdReplImmI(src, dst, tmp, (1), (4)) );
12773   ins_pipe(loadConFD); // FIXME
12774 %}
12775 #endif /* !AARCH64 */
12776 
12777 // Replicate scalar constant to packed byte values in Double register
12778 instruct Repl2I_immU8(vecD dst, immU8 src) %{
12779   predicate(n->as_Vector()->length_in_bytes() == 8 && VM_Version::has_simd());
12780   match(Set dst (ReplicateI src));
12781   size(4);
12782 
12783   format %{ "VMOV.I32  $dst.D,$src" %}
12784   ins_encode %{
12785     bool quad = false;
12786     __ vmovI($dst$$FloatRegister, $src$$constant,
12787              MacroAssembler::VELEM_SIZE_32, quad);
12788   %}
12789   ins_pipe(loadConFD); // FIXME
12790 %}
12791 
12792 // Replicate scalar constant to packed byte values in Double register pair
12793 instruct Repl4I_immU8(vecX dst, immU8 src) %{
12794   predicate(n->as_Vector()->length_in_bytes() == 16 && VM_Version::has_simd());
12795   match(Set dst (ReplicateI src));
12796   size(4);
12797 
12798   format %{ "VMOV.I32  $dst.Q,$src" %}
12799   ins_encode %{
12800     bool quad = true;
12801     __ vmovI($dst$$FloatRegister, $src$$constant,
12802              MacroAssembler::VELEM_SIZE_32, quad);
12803   %}
12804   ins_pipe(loadConFD); // FIXME
12805 %}
12806 
12807 #ifdef AARCH64
12808 // Replicate scalar to packed byte values in Double register pair
12809 instruct Repl2L_reg(vecX dst, iRegL src) %{
12810   predicate(n->as_Vector()->length() == 2);
12811   match(Set dst (ReplicateL src));
12812   size(4*1);
12813   ins_cost(DEFAULT_COST*1); // FIXME
12814 
12815   format %{ "VDUP.2D $dst.Q,$src\t" %}
12816   ins_encode %{
12817     bool quad = true;
12818     __ vdupI($dst$$FloatRegister, $src$$Register,
12819              MacroAssembler::VELEM_SIZE_64, quad);
12820   %}
12821   ins_pipe(ialu_reg); // FIXME
12822 %}
12823 #else /* !AARCH64 */
12824 // Replicate scalar to packed byte values in Double register pair
12825 instruct Repl2L_reg(vecX dst, iRegL src) %{
12826   predicate(n->as_Vector()->length() == 2);
12827   match(Set dst (ReplicateL src));
12828   size(8);
12829   ins_cost(DEFAULT_COST*2); // FIXME
12830 
12831   format %{ "FMDRR $dst.D,$src.lo,$src.hi\t\n"
12832             "FMDRR $dst.D.next,$src.lo,$src.hi" %}
12833   ins_encode %{
12834     __ fmdrr($dst$$FloatRegister, $src$$Register, $src$$Register->successor());
12835     __ fmdrr($dst$$FloatRegister->successor()->successor(),
12836              $src$$Register, $src$$Register->successor());
12837   %}
12838   ins_pipe(ialu_reg); // FIXME
12839 %}
12840 
12841 
12842 // Replicate scalar to packed float values in Double register
12843 instruct Repl2F_regI(vecD dst, iRegI src) %{
12844   predicate(n->as_Vector()->length() == 2);
12845   match(Set dst (ReplicateF src));
12846   size(4);
12847 
12848   format %{ "FMDRR    $dst.D,$src,$src\t" %}
12849   ins_encode %{
12850     __ fmdrr($dst$$FloatRegister, $src$$Register, $src$$Register);
12851   %}
12852   ins_pipe(ialu_reg); // FIXME
12853 %}
12854 
12855 // Replicate scalar to packed float values in Double register
12856 instruct Repl2F_reg_vfp(vecD dst, regF src) %{
12857   predicate(n->as_Vector()->length() == 2);
12858   match(Set dst (ReplicateF src));
12859   size(4*2);
12860   ins_cost(DEFAULT_COST*2); // FIXME
12861 
12862   expand %{
12863     iRegI tmp;
12864     MoveF2I_reg_reg(tmp, src);
12865     Repl2F_regI(dst,tmp);
12866   %}
12867 %}
12868 #endif /* !AARCH64 */
12869 
12870 // Replicate scalar to packed float values in Double register
12871 instruct Repl2F_reg_simd(vecD dst, regF src) %{
12872   predicate(n->as_Vector()->length_in_bytes() == 8 && VM_Version::has_simd());
12873   match(Set dst (ReplicateF src));
12874   size(4);
12875   ins_cost(DEFAULT_COST); // FIXME
12876 
12877   format %{ "VDUP.32  $dst.D,$src.D\t" %}
12878   ins_encode %{
12879     bool quad = false;
12880     __ vdupF($dst$$FloatRegister, $src$$FloatRegister, quad);
12881   %}
12882   ins_pipe(ialu_reg); // FIXME
12883 %}
12884 
12885 #ifndef AARCH64
12886 // Replicate scalar to packed float values in Double register pair
12887 instruct Repl4F_reg(vecX dst, regF src, iRegI tmp) %{
12888   predicate(n->as_Vector()->length() == 4);
12889   match(Set dst (ReplicateF src));
12890   effect(TEMP tmp);
12891   size(4*3);
12892   ins_cost(DEFAULT_COST*3); // FIXME
12893 
12894   format %{ "FMRS     $tmp,$src\n\t"
12895             "FMDRR    $dst.D,$tmp,$tmp\n\t"
12896             "FMDRR    $dst.D.next,$tmp,$tmp\t" %}
12897   ins_encode %{
12898     __ fmrs($tmp$$Register, $src$$FloatRegister);
12899     __ fmdrr($dst$$FloatRegister, $tmp$$Register, $tmp$$Register);
12900     __ fmdrr($dst$$FloatRegister->successor()->successor(),
12901              $tmp$$Register, $tmp$$Register);
12902   %}
12903   ins_pipe(ialu_reg); // FIXME
12904 %}
12905 #endif /* !AARCH64 */
12906 
12907 // Replicate scalar to packed float values in Double register pair
12908 instruct Repl4F_reg_simd(vecX dst, regF src) %{
12909   predicate(n->as_Vector()->length_in_bytes() == 16 && VM_Version::has_simd());
12910   match(Set dst (ReplicateF src));
12911   size(4);
12912   ins_cost(DEFAULT_COST); // FIXME
12913 
12914   format %{ "VDUP.32  $dst.Q,$src.D\t" %}
12915   ins_encode %{
12916     bool quad = true;
12917     __ vdupF($dst$$FloatRegister, $src$$FloatRegister, quad);
12918   %}
12919   ins_pipe(ialu_reg); // FIXME
12920 %}
12921 
12922 #ifndef AARCH64
12923 // Replicate scalar zero constant to packed float values in Double register
12924 instruct Repl2F_immI(vecD dst, immF src, iRegI tmp) %{
12925   predicate(n->as_Vector()->length() == 2);
12926   match(Set dst (ReplicateF src));
12927   effect(TEMP tmp);
12928   size(12);
12929   ins_cost(DEFAULT_COST*4); // FIXME
12930 
12931   format %{ "MOV      $tmp, Repl1($src))\n\t"
12932             "FMDRR    $dst,$tmp,$tmp\t" %}
12933   ins_encode( LdReplImmF(src, dst, tmp) );
12934   ins_pipe(loadConFD); // FIXME
12935 %}
12936 #endif /* !AAARCH64 */
12937 
12938 // Replicate scalar to packed double float values in Double register pair
12939 instruct Repl2D_reg(vecX dst, regD src) %{
12940 #ifdef AARCH64
12941   predicate(n->as_Vector()->length() == 2 && VM_Version::has_simd());
12942   match(Set dst (ReplicateD src));
12943   size(4*1);
12944   ins_cost(DEFAULT_COST*1); // FIXME
12945 
12946   format %{ "VDUP     $dst.2D,$src\t" %}
12947   ins_encode %{
12948     bool quad = true;
12949     __ vdupD($dst$$FloatRegister, $src$$FloatRegister, quad);
12950   %}
12951 #else
12952   predicate(n->as_Vector()->length() == 2);
12953   match(Set dst (ReplicateD src));
12954   size(4*2);
12955   ins_cost(DEFAULT_COST*2); // FIXME
12956 
12957   format %{ "FCPYD    $dst.D.a,$src\n\t"
12958             "FCPYD    $dst.D.b,$src\t" %}
12959   ins_encode %{
12960     FloatRegister dsta = $dst$$FloatRegister;
12961     FloatRegister src = $src$$FloatRegister;
12962     __ fcpyd(dsta, src);
12963     FloatRegister dstb = dsta->successor()->successor();
12964     __ fcpyd(dstb, src);
12965   %}
12966 #endif
12967   ins_pipe(ialu_reg); // FIXME
12968 %}
12969 
12970 // ====================VECTOR ARITHMETIC=======================================
12971 
12972 // --------------------------------- ADD --------------------------------------
12973 
12974 // Bytes vector add
12975 instruct vadd8B_reg(vecD dst, vecD src1, vecD src2) %{
12976   predicate(n->as_Vector()->length() == 8);
12977   match(Set dst (AddVB src1 src2));
12978   format %{ "VADD.I8 $dst,$src1,$src2\t! add packed8B" %}
12979   size(4);
12980   ins_encode %{
12981     bool quad = false;
12982     __ vaddI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
12983              MacroAssembler::VELEM_SIZE_8, quad);
12984   %}
12985   ins_pipe( ialu_reg_reg ); // FIXME
12986 %}
12987 
12988 instruct vadd16B_reg(vecX dst, vecX src1, vecX src2) %{
12989   predicate(n->as_Vector()->length() == 16);
12990   match(Set dst (AddVB src1 src2));
12991   size(4);
12992   format %{ "VADD.I8 $dst.Q,$src1.Q,$src2.Q\t! add packed16B" %}
12993   ins_encode %{
12994     bool quad = true;
12995     __ vaddI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
12996              MacroAssembler::VELEM_SIZE_8, quad);
12997   %}
12998   ins_pipe( ialu_reg_reg ); // FIXME
12999 %}
13000 
13001 // Shorts/Chars vector add
13002 instruct vadd4S_reg(vecD dst, vecD src1, vecD src2) %{
13003   predicate(n->as_Vector()->length() == 4);
13004   match(Set dst (AddVS src1 src2));
13005   size(4);
13006   format %{ "VADD.I16 $dst,$src1,$src2\t! add packed4S" %}
13007   ins_encode %{
13008     bool quad = false;
13009     __ vaddI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
13010              MacroAssembler::VELEM_SIZE_16, quad);
13011   %}
13012   ins_pipe( ialu_reg_reg ); // FIXME
13013 %}
13014 
13015 instruct vadd8S_reg(vecX dst, vecX src1, vecX src2) %{
13016   predicate(n->as_Vector()->length() == 8);
13017   match(Set dst (AddVS src1 src2));
13018   size(4);
13019   format %{ "VADD.I16 $dst.Q,$src1.Q,$src2.Q\t! add packed8S" %}
13020   ins_encode %{
13021     bool quad = true;
13022     __ vaddI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
13023              MacroAssembler::VELEM_SIZE_16, quad);
13024   %}
13025   ins_pipe( ialu_reg_reg ); // FIXME
13026 %}
13027 
13028 // Integers vector add
13029 instruct vadd2I_reg(vecD dst, vecD src1, vecD src2) %{
13030   predicate(n->as_Vector()->length() == 2);
13031   match(Set dst (AddVI src1 src2));
13032   size(4);
13033   format %{ "VADD.I32 $dst.D,$src1.D,$src2.D\t! add packed2I" %}
13034   ins_encode %{
13035     bool quad = false;
13036     __ vaddI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
13037              MacroAssembler::VELEM_SIZE_32, quad);
13038   %}
13039   ins_pipe( ialu_reg_reg ); // FIXME
13040 %}
13041 
13042 instruct vadd4I_reg(vecX dst, vecX src1, vecX src2) %{
13043   predicate(n->as_Vector()->length() == 4);
13044   match(Set dst (AddVI src1 src2));
13045   size(4);
13046   format %{ "VADD.I32 $dst.Q,$src1.Q,$src2.Q\t! add packed4I" %}
13047   ins_encode %{
13048     bool quad = true;
13049     __ vaddI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
13050              MacroAssembler::VELEM_SIZE_32, quad);
13051   %}
13052   ins_pipe( ialu_reg_reg ); // FIXME
13053 %}
13054 
13055 // Longs vector add
13056 instruct vadd2L_reg(vecX dst, vecX src1, vecX src2) %{
13057   predicate(n->as_Vector()->length() == 2);
13058   match(Set dst (AddVL src1 src2));
13059   size(4);
13060   format %{ "VADD.I64 $dst.Q,$src1.Q,$src2.Q\t! add packed2L" %}
13061   ins_encode %{
13062     bool quad = true;
13063     __ vaddI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
13064              MacroAssembler::VELEM_SIZE_64, quad);
13065   %}
13066   ins_pipe( ialu_reg_reg ); // FIXME
13067 %}
13068 
13069 // Floats vector add
13070 instruct vadd2F_reg(vecD dst, vecD src1, vecD src2) %{
13071   predicate(n->as_Vector()->length() == 2 && VM_Version::simd_math_is_compliant());
13072   match(Set dst (AddVF src1 src2));
13073   size(4);
13074   format %{ "VADD.F32 $dst,$src1,$src2\t! add packed2F" %}
13075   ins_encode %{
13076     bool quad = false;
13077     __ vaddF($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
13078              MacroAssembler::VFA_SIZE_F32, quad);
13079   %}
13080   ins_pipe( faddD_reg_reg ); // FIXME
13081 %}
13082 
13083 #ifndef AARCH64
13084 instruct vadd2F_reg_vfp(vecD dst, vecD src1, vecD src2) %{
13085   predicate(n->as_Vector()->length() == 2 && !VM_Version::simd_math_is_compliant());
13086   match(Set dst (AddVF src1 src2));
13087   ins_cost(DEFAULT_COST*2); // FIXME
13088 
13089   size(4*2);
13090   format %{ "FADDS  $dst.a,$src1.a,$src2.a\n\t"
13091             "FADDS  $dst.b,$src1.b,$src2.b" %}
13092   ins_encode %{
13093     __ add_float($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13094     __ add_float($dst$$FloatRegister->successor(),
13095              $src1$$FloatRegister->successor(),
13096              $src2$$FloatRegister->successor());
13097   %}
13098 
13099   ins_pipe(faddF_reg_reg); // FIXME
13100 %}
13101 #endif
13102 
13103 instruct vadd4F_reg_simd(vecX dst, vecX src1, vecX src2) %{
13104   predicate(n->as_Vector()->length() == 4 && VM_Version::simd_math_is_compliant());
13105   match(Set dst (AddVF src1 src2));
13106   size(4);
13107   format %{ "VADD.F32 $dst.Q,$src1.Q,$src2.Q\t! add packed4F" %}
13108   ins_encode %{
13109     bool quad = true;
13110     __ vaddF($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
13111              MacroAssembler::VFA_SIZE_F32, quad);
13112   %}
13113   ins_pipe( faddD_reg_reg ); // FIXME
13114 %}
13115 
13116 #ifdef AARCH64
13117 instruct vadd2D_reg_simd(vecX dst, vecX src1, vecX src2) %{
13118   predicate(n->as_Vector()->length() == 2 && VM_Version::simd_math_is_compliant());
13119   match(Set dst (AddVD src1 src2));
13120   size(4);
13121   format %{ "VADD.F64 $dst.Q,$src1.Q,$src2.Q\t! add packed2D" %}
13122   ins_encode %{
13123     bool quad = true;
13124     __ vaddF($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
13125              MacroAssembler::VFA_SIZE_F64, quad);
13126   %}
13127   ins_pipe( faddD_reg_reg ); // FIXME
13128 %}
13129 #else
13130 instruct vadd4F_reg_vfp(vecX dst, vecX src1, vecX src2) %{
13131   predicate(n->as_Vector()->length() == 4 && !VM_Version::simd_math_is_compliant());
13132   match(Set dst (AddVF src1 src2));
13133   size(4*4);
13134   ins_cost(DEFAULT_COST*4); // FIXME
13135 
13136   format %{ "FADDS  $dst.a,$src1.a,$src2.a\n\t"
13137             "FADDS  $dst.b,$src1.b,$src2.b\n\t"
13138             "FADDS  $dst.c,$src1.c,$src2.c\n\t"
13139             "FADDS  $dst.d,$src1.d,$src2.d" %}
13140 
13141   ins_encode %{
13142     FloatRegister dsta = $dst$$FloatRegister;
13143     FloatRegister src1a = $src1$$FloatRegister;
13144     FloatRegister src2a = $src2$$FloatRegister;
13145     __ add_float(dsta, src1a, src2a);
13146     FloatRegister dstb = dsta->successor();
13147     FloatRegister src1b = src1a->successor();
13148     FloatRegister src2b = src2a->successor();
13149     __ add_float(dstb, src1b, src2b);
13150     FloatRegister dstc = dstb->successor();
13151     FloatRegister src1c = src1b->successor();
13152     FloatRegister src2c = src2b->successor();
13153     __ add_float(dstc, src1c, src2c);
13154     FloatRegister dstd = dstc->successor();
13155     FloatRegister src1d = src1c->successor();
13156     FloatRegister src2d = src2c->successor();
13157     __ add_float(dstd, src1d, src2d);
13158   %}
13159 
13160   ins_pipe(faddF_reg_reg); // FIXME
13161 %}
13162 
13163 instruct vadd2D_reg_vfp(vecX dst, vecX src1, vecX src2) %{
13164   predicate(n->as_Vector()->length() == 2);
13165   match(Set dst (AddVD src1 src2));
13166   size(4*2);
13167   ins_cost(DEFAULT_COST*2); // FIXME
13168 
13169   format %{ "FADDD  $dst.a,$src1.a,$src2.a\n\t"
13170             "FADDD  $dst.b,$src1.b,$src2.b" %}
13171 
13172   ins_encode %{
13173     FloatRegister dsta = $dst$$FloatRegister;
13174     FloatRegister src1a = $src1$$FloatRegister;
13175     FloatRegister src2a = $src2$$FloatRegister;
13176     __ add_double(dsta, src1a, src2a);
13177     FloatRegister dstb = dsta->successor()->successor();
13178     FloatRegister src1b = src1a->successor()->successor();
13179     FloatRegister src2b = src2a->successor()->successor();
13180     __ add_double(dstb, src1b, src2b);
13181   %}
13182 
13183   ins_pipe(faddF_reg_reg); // FIXME
13184 %}
13185 #endif
13186 
13187 
13188 // Bytes vector sub
13189 instruct vsub8B_reg(vecD dst, vecD src1, vecD src2) %{
13190   predicate(n->as_Vector()->length() == 8);
13191   match(Set dst (SubVB src1 src2));
13192   size(4);
13193   format %{ "VSUB.I8 $dst,$src1,$src2\t! sub packed8B" %}
13194   ins_encode %{
13195     bool quad = false;
13196     __ vsubI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
13197              MacroAssembler::VELEM_SIZE_8, quad);
13198   %}
13199   ins_pipe( ialu_reg_reg ); // FIXME
13200 %}
13201 
13202 instruct vsub16B_reg(vecX dst, vecX src1, vecX src2) %{
13203   predicate(n->as_Vector()->length() == 16);
13204   match(Set dst (SubVB src1 src2));
13205   size(4);
13206   format %{ "VSUB.I8 $dst.Q,$src1.Q,$src2.Q\t! sub packed16B" %}
13207   ins_encode %{
13208     bool quad = true;
13209     __ vsubI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
13210              MacroAssembler::VELEM_SIZE_8, quad);
13211   %}
13212   ins_pipe( ialu_reg_reg ); // FIXME
13213 %}
13214 
13215 // Shorts/Chars vector sub
13216 instruct vsub4S_reg(vecD dst, vecD src1, vecD src2) %{
13217   predicate(n->as_Vector()->length() == 4);
13218   match(Set dst (SubVS src1 src2));
13219   size(4);
13220   format %{ "VSUB.I16 $dst,$src1,$src2\t! sub packed4S" %}
13221   ins_encode %{
13222     bool quad = false;
13223     __ vsubI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
13224              MacroAssembler::VELEM_SIZE_16, quad);
13225   %}
13226   ins_pipe( ialu_reg_reg ); // FIXME
13227 %}
13228 
13229 instruct vsub16S_reg(vecX dst, vecX src1, vecX src2) %{
13230   predicate(n->as_Vector()->length() == 8);
13231   match(Set dst (SubVS src1 src2));
13232   size(4);
13233   format %{ "VSUB.I16 $dst.Q,$src1.Q,$src2.Q\t! sub packed8S" %}
13234   ins_encode %{
13235     bool quad = true;
13236     __ vsubI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
13237              MacroAssembler::VELEM_SIZE_16, quad);
13238   %}
13239   ins_pipe( ialu_reg_reg ); // FIXME
13240 %}
13241 
13242 // Integers vector sub
13243 instruct vsub2I_reg(vecD dst, vecD src1, vecD src2) %{
13244   predicate(n->as_Vector()->length() == 2);
13245   match(Set dst (SubVI src1 src2));
13246   size(4);
13247   format %{ "VSUB.I32 $dst,$src1,$src2\t! sub packed2I" %}
13248   ins_encode %{
13249     bool quad = false;
13250     __ vsubI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
13251              MacroAssembler::VELEM_SIZE_32, quad);
13252   %}
13253   ins_pipe( ialu_reg_reg ); // FIXME
13254 %}
13255 
13256 instruct vsub4I_reg(vecX dst, vecX src1, vecX src2) %{
13257   predicate(n->as_Vector()->length() == 4);
13258   match(Set dst (SubVI src1 src2));
13259   size(4);
13260   format %{ "VSUB.I32 $dst.Q,$src1.Q,$src2.Q\t! sub packed4I" %}
13261   ins_encode %{
13262     bool quad = true;
13263     __ vsubI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
13264              MacroAssembler::VELEM_SIZE_32, quad);
13265   %}
13266   ins_pipe( ialu_reg_reg ); // FIXME
13267 %}
13268 
13269 // Longs vector sub
13270 instruct vsub2L_reg(vecX dst, vecX src1, vecX src2) %{
13271   predicate(n->as_Vector()->length() == 2);
13272   match(Set dst (SubVL src1 src2));
13273   size(4);
13274   format %{ "VSUB.I64 $dst.Q,$src1.Q,$src2.Q\t! sub packed2L" %}
13275   ins_encode %{
13276     bool quad = true;
13277     __ vsubI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
13278              MacroAssembler::VELEM_SIZE_64, quad);
13279   %}
13280   ins_pipe( ialu_reg_reg ); // FIXME
13281 %}
13282 
13283 // Floats vector sub
13284 instruct vsub2F_reg(vecD dst, vecD src1, vecD src2) %{
13285   predicate(n->as_Vector()->length() == 2 && VM_Version::simd_math_is_compliant());
13286   match(Set dst (SubVF src1 src2));
13287   size(4);
13288   format %{ "VSUB.F32 $dst,$src1,$src2\t! sub packed2F" %}
13289   ins_encode %{
13290     bool quad = false;
13291     __ vsubF($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
13292              MacroAssembler::VFA_SIZE_F32, quad);
13293   %}
13294   ins_pipe( faddF_reg_reg ); // FIXME
13295 %}
13296 
13297 #ifndef AARCH64
13298 instruct vsub2F_reg_vfp(vecD dst, vecD src1, vecD src2) %{
13299   predicate(n->as_Vector()->length() == 2 && !VM_Version::simd_math_is_compliant());
13300   match(Set dst (SubVF src1 src2));
13301   size(4*2);
13302   ins_cost(DEFAULT_COST*2); // FIXME
13303 
13304   format %{ "FSUBS  $dst.a,$src1.a,$src2.a\n\t"
13305             "FSUBS  $dst.b,$src1.b,$src2.b" %}
13306 
13307   ins_encode %{
13308     FloatRegister dsta = $dst$$FloatRegister;
13309     FloatRegister src1a = $src1$$FloatRegister;
13310     FloatRegister src2a = $src2$$FloatRegister;
13311     __ sub_float(dsta, src1a, src2a);
13312     FloatRegister dstb = dsta->successor();
13313     FloatRegister src1b = src1a->successor();
13314     FloatRegister src2b = src2a->successor();
13315     __ sub_float(dstb, src1b, src2b);
13316   %}
13317 
13318   ins_pipe(faddF_reg_reg); // FIXME
13319 %}
13320 #endif
13321 
13322 
13323 instruct vsub4F_reg(vecX dst, vecX src1, vecX src2) %{
13324   predicate(n->as_Vector()->length() == 4 && VM_Version::simd_math_is_compliant());
13325   match(Set dst (SubVF src1 src2));
13326   size(4);
13327   format %{ "VSUB.F32 $dst.Q,$src1.Q,$src2.Q\t! sub packed4F" %}
13328   ins_encode %{
13329     bool quad = true;
13330     __ vsubF($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
13331              MacroAssembler::VFA_SIZE_F32, quad);
13332   %}
13333   ins_pipe( faddF_reg_reg ); // FIXME
13334 %}
13335 
13336 #ifdef AARCH64
13337 instruct vsub2D_reg_simd(vecX dst, vecX src1, vecX src2) %{
13338   predicate(n->as_Vector()->length() == 2 && VM_Version::simd_math_is_compliant());
13339   match(Set dst (SubVD src1 src2));
13340   size(4);
13341   format %{ "VSUB.F64 $dst.Q,$src1.Q,$src2.Q\t! add packed2D" %}
13342   ins_encode %{
13343     bool quad = true;
13344     __ vsubF($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
13345              MacroAssembler::VFA_SIZE_F64, quad);
13346   %}
13347   ins_pipe( faddD_reg_reg ); // FIXME
13348 %}
13349 #else
13350 instruct vsub4F_reg_vfp(vecX dst, vecX src1, vecX src2) %{
13351   predicate(n->as_Vector()->length() == 4 && !VM_Version::simd_math_is_compliant());
13352   match(Set dst (SubVF src1 src2));
13353   size(4*4);
13354   ins_cost(DEFAULT_COST*4); // FIXME
13355 
13356   format %{ "FSUBS  $dst.a,$src1.a,$src2.a\n\t"
13357             "FSUBS  $dst.b,$src1.b,$src2.b\n\t"
13358             "FSUBS  $dst.c,$src1.c,$src2.c\n\t"
13359             "FSUBS  $dst.d,$src1.d,$src2.d" %}
13360 
13361   ins_encode %{
13362     FloatRegister dsta = $dst$$FloatRegister;
13363     FloatRegister src1a = $src1$$FloatRegister;
13364     FloatRegister src2a = $src2$$FloatRegister;
13365     __ sub_float(dsta, src1a, src2a);
13366     FloatRegister dstb = dsta->successor();
13367     FloatRegister src1b = src1a->successor();
13368     FloatRegister src2b = src2a->successor();
13369     __ sub_float(dstb, src1b, src2b);
13370     FloatRegister dstc = dstb->successor();
13371     FloatRegister src1c = src1b->successor();
13372     FloatRegister src2c = src2b->successor();
13373     __ sub_float(dstc, src1c, src2c);
13374     FloatRegister dstd = dstc->successor();
13375     FloatRegister src1d = src1c->successor();
13376     FloatRegister src2d = src2c->successor();
13377     __ sub_float(dstd, src1d, src2d);
13378   %}
13379 
13380   ins_pipe(faddF_reg_reg); // FIXME
13381 %}
13382 
13383 instruct vsub2D_reg_vfp(vecX dst, vecX src1, vecX src2) %{
13384   predicate(n->as_Vector()->length() == 2);
13385   match(Set dst (SubVD src1 src2));
13386   size(4*2);
13387   ins_cost(DEFAULT_COST*2); // FIXME
13388 
13389   format %{ "FSUBD  $dst.a,$src1.a,$src2.a\n\t"
13390             "FSUBD  $dst.b,$src1.b,$src2.b" %}
13391 
13392   ins_encode %{
13393     FloatRegister dsta = $dst$$FloatRegister;
13394     FloatRegister src1a = $src1$$FloatRegister;
13395     FloatRegister src2a = $src2$$FloatRegister;
13396     __ sub_double(dsta, src1a, src2a);
13397     FloatRegister dstb = dsta->successor()->successor();
13398     FloatRegister src1b = src1a->successor()->successor();
13399     FloatRegister src2b = src2a->successor()->successor();
13400     __ sub_double(dstb, src1b, src2b);
13401   %}
13402 
13403   ins_pipe(faddF_reg_reg); // FIXME
13404 %}
13405 #endif
13406 
13407 // Shorts/Chars vector mul
13408 instruct vmul4S_reg(vecD dst, vecD src1, vecD src2) %{
13409   predicate(n->as_Vector()->length() == 4);
13410   match(Set dst (MulVS src1 src2));
13411   size(4);
13412   format %{ "VMUL.I16 $dst,$src1,$src2\t! mul packed4S" %}
13413   ins_encode %{
13414     __ vmulI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
13415              MacroAssembler::VELEM_SIZE_16, 0);
13416   %}
13417   ins_pipe( ialu_reg_reg ); // FIXME
13418 %}
13419 
13420 instruct vmul8S_reg(vecX dst, vecX src1, vecX src2) %{
13421   predicate(n->as_Vector()->length() == 8);
13422   match(Set dst (MulVS src1 src2));
13423   size(4);
13424   format %{ "VMUL.I16 $dst.Q,$src1.Q,$src2.Q\t! mul packed8S" %}
13425   ins_encode %{
13426     __ vmulI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
13427              MacroAssembler::VELEM_SIZE_16, 1);
13428   %}
13429   ins_pipe( ialu_reg_reg ); // FIXME
13430 %}
13431 
13432 // Integers vector mul
13433 instruct vmul2I_reg(vecD dst, vecD src1, vecD src2) %{
13434   predicate(n->as_Vector()->length() == 2);
13435   match(Set dst (MulVI src1 src2));
13436   size(4);
13437   format %{ "VMUL.I32 $dst,$src1,$src2\t! mul packed2I" %}
13438   ins_encode %{
13439     __ vmulI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
13440              MacroAssembler::VELEM_SIZE_32, 0);
13441   %}
13442   ins_pipe( ialu_reg_reg ); // FIXME
13443 %}
13444 
13445 instruct vmul4I_reg(vecX dst, vecX src1, vecX src2) %{
13446   predicate(n->as_Vector()->length() == 4);
13447   match(Set dst (MulVI src1 src2));
13448   size(4);
13449   format %{ "VMUL.I32 $dst.Q,$src1.Q,$src2.Q\t! mul packed4I" %}
13450   ins_encode %{
13451     __ vmulI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
13452              MacroAssembler::VELEM_SIZE_32, 1);
13453   %}
13454   ins_pipe( ialu_reg_reg ); // FIXME
13455 %}
13456 
13457 // Floats vector mul
13458 instruct vmul2F_reg(vecD dst, vecD src1, vecD src2) %{
13459   predicate(n->as_Vector()->length() == 2 && VM_Version::simd_math_is_compliant());
13460   match(Set dst (MulVF src1 src2));
13461   size(4);
13462   format %{ "VMUL.F32 $dst,$src1,$src2\t! mul packed2F" %}
13463   ins_encode %{
13464     __ vmulF($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
13465              MacroAssembler::VFA_SIZE_F32, 0);
13466   %}
13467   ins_pipe( fmulF_reg_reg ); // FIXME
13468 %}
13469 
13470 #ifndef AARCH64
13471 instruct vmul2F_reg_vfp(vecD dst, vecD src1, vecD src2) %{
13472   predicate(n->as_Vector()->length() == 2 && !VM_Version::simd_math_is_compliant());
13473   match(Set dst (MulVF src1 src2));
13474   size(4*2);
13475   ins_cost(DEFAULT_COST*2); // FIXME
13476 
13477   format %{ "FMULS  $dst.a,$src1.a,$src2.a\n\t"
13478             "FMULS  $dst.b,$src1.b,$src2.b" %}
13479   ins_encode %{
13480     __ mul_float($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13481     __ mul_float($dst$$FloatRegister->successor(),
13482              $src1$$FloatRegister->successor(),
13483              $src2$$FloatRegister->successor());
13484   %}
13485 
13486   ins_pipe(fmulF_reg_reg); // FIXME
13487 %}
13488 #endif
13489 
13490 instruct vmul4F_reg(vecX dst, vecX src1, vecX src2) %{
13491   predicate(n->as_Vector()->length() == 4 && VM_Version::simd_math_is_compliant());
13492   match(Set dst (MulVF src1 src2));
13493   size(4);
13494   format %{ "VMUL.F32 $dst.Q,$src1.Q,$src2.Q\t! mul packed4F" %}
13495   ins_encode %{
13496     __ vmulF($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
13497              MacroAssembler::VFA_SIZE_F32, 1);
13498   %}
13499   ins_pipe( fmulF_reg_reg ); // FIXME
13500 %}
13501 
13502 #ifndef AARCH64
13503 instruct vmul4F_reg_vfp(vecX dst, vecX src1, vecX src2) %{
13504   predicate(n->as_Vector()->length() == 4 && !VM_Version::simd_math_is_compliant());
13505   match(Set dst (MulVF src1 src2));
13506   size(4*4);
13507   ins_cost(DEFAULT_COST*4); // FIXME
13508 
13509   format %{ "FMULS  $dst.a,$src1.a,$src2.a\n\t"
13510             "FMULS  $dst.b,$src1.b,$src2.b\n\t"
13511             "FMULS  $dst.c,$src1.c,$src2.c\n\t"
13512             "FMULS  $dst.d,$src1.d,$src2.d" %}
13513 
13514   ins_encode %{
13515     FloatRegister dsta = $dst$$FloatRegister;
13516     FloatRegister src1a = $src1$$FloatRegister;
13517     FloatRegister src2a = $src2$$FloatRegister;
13518     __ mul_float(dsta, src1a, src2a);
13519     FloatRegister dstb = dsta->successor();
13520     FloatRegister src1b = src1a->successor();
13521     FloatRegister src2b = src2a->successor();
13522     __ mul_float(dstb, src1b, src2b);
13523     FloatRegister dstc = dstb->successor();
13524     FloatRegister src1c = src1b->successor();
13525     FloatRegister src2c = src2b->successor();
13526     __ mul_float(dstc, src1c, src2c);
13527     FloatRegister dstd = dstc->successor();
13528     FloatRegister src1d = src1c->successor();
13529     FloatRegister src2d = src2c->successor();
13530     __ mul_float(dstd, src1d, src2d);
13531   %}
13532 
13533   ins_pipe(fmulF_reg_reg); // FIXME
13534 %}
13535 #endif
13536 
13537 #ifdef AARCH64
13538 instruct vmul2D_reg(vecX dst, vecX src1, vecX src2) %{
13539   predicate(n->as_Vector()->length() == 2 && VM_Version::has_simd());
13540   match(Set dst (MulVD src1 src2));
13541   size(4*1);
13542   ins_cost(DEFAULT_COST*1); // FIXME
13543 
13544   format %{ "FMUL.2D $dst,$src1,$src2\t! double[2]" %}
13545   ins_encode %{
13546     int quad = 1;
13547     __ vmulF($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
13548              MacroAssembler::VFA_SIZE_F64, quad);
13549   %}
13550 
13551   ins_pipe(fdivF_reg_reg); // FIXME
13552 %}
13553 #else
13554 instruct vmul2D_reg_vfp(vecX dst, vecX src1, vecX src2) %{
13555   predicate(n->as_Vector()->length() == 2);
13556   match(Set dst (MulVD src1 src2));
13557   size(4*2);
13558   ins_cost(DEFAULT_COST*2); // FIXME
13559 
13560   format %{ "FMULD  $dst.D.a,$src1.D.a,$src2.D.a\n\t"
13561             "FMULD  $dst.D.b,$src1.D.b,$src2.D.b" %}
13562   ins_encode %{
13563     FloatRegister dsta = $dst$$FloatRegister;
13564     FloatRegister src1a = $src1$$FloatRegister;
13565     FloatRegister src2a = $src2$$FloatRegister;
13566     __ mul_double(dsta, src1a, src2a);
13567     FloatRegister dstb = dsta->successor()->successor();
13568     FloatRegister src1b = src1a->successor()->successor();
13569     FloatRegister src2b = src2a->successor()->successor();
13570     __ mul_double(dstb, src1b, src2b);
13571   %}
13572 
13573   ins_pipe(fmulD_reg_reg); // FIXME
13574 %}
13575 #endif
13576 
13577 
13578 // Floats vector div
13579 instruct vdiv2F_reg_vfp(vecD dst, vecD src1, vecD src2) %{
13580   predicate(n->as_Vector()->length() == 2);
13581   match(Set dst (DivVF src1 src2));
13582 #ifdef AARCH64
13583   size(4*1);
13584   ins_cost(DEFAULT_COST*1); // FIXME
13585 
13586   format %{ "FDIV.2S $dst,$src1,$src2\t! float[2]" %}
13587   ins_encode %{
13588     int quad = 0;
13589     __ vdivF($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
13590              MacroAssembler::VFA_SIZE_F32, quad);
13591   %}
13592 
13593   ins_pipe(fdivF_reg_reg); // FIXME
13594 #else
13595   size(4*2);
13596   ins_cost(DEFAULT_COST*2); // FIXME
13597 
13598   format %{ "FDIVS  $dst.a,$src1.a,$src2.a\n\t"
13599             "FDIVS  $dst.b,$src1.b,$src2.b" %}
13600   ins_encode %{
13601     __ div_float($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13602     __ div_float($dst$$FloatRegister->successor(),
13603              $src1$$FloatRegister->successor(),
13604              $src2$$FloatRegister->successor());
13605   %}
13606 
13607   ins_pipe(fdivF_reg_reg); // FIXME
13608 #endif
13609 %}
13610 
13611 instruct vdiv4F_reg_vfp(vecX dst, vecX src1, vecX src2) %{
13612   predicate(n->as_Vector()->length() == 4);
13613   match(Set dst (DivVF src1 src2));
13614 #ifdef AARCH64
13615   size(4*1);
13616   ins_cost(DEFAULT_COST*1); // FIXME
13617 
13618   format %{ "FDIV.4S $dst,$src1,$src2\t! float[4]" %}
13619   ins_encode %{
13620     int quad = 1;
13621     __ vdivF($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
13622              MacroAssembler::VFA_SIZE_F32, quad);
13623   %}
13624 
13625   ins_pipe(fdivF_reg_reg); // FIXME
13626 #else
13627   size(4*4);
13628   ins_cost(DEFAULT_COST*4); // FIXME
13629 
13630   format %{ "FDIVS  $dst.a,$src1.a,$src2.a\n\t"
13631             "FDIVS  $dst.b,$src1.b,$src2.b\n\t"
13632             "FDIVS  $dst.c,$src1.c,$src2.c\n\t"
13633             "FDIVS  $dst.d,$src1.d,$src2.d" %}
13634 
13635   ins_encode %{
13636     FloatRegister dsta = $dst$$FloatRegister;
13637     FloatRegister src1a = $src1$$FloatRegister;
13638     FloatRegister src2a = $src2$$FloatRegister;
13639     __ div_float(dsta, src1a, src2a);
13640     FloatRegister dstb = dsta->successor();
13641     FloatRegister src1b = src1a->successor();
13642     FloatRegister src2b = src2a->successor();
13643     __ div_float(dstb, src1b, src2b);
13644     FloatRegister dstc = dstb->successor();
13645     FloatRegister src1c = src1b->successor();
13646     FloatRegister src2c = src2b->successor();
13647     __ div_float(dstc, src1c, src2c);
13648     FloatRegister dstd = dstc->successor();
13649     FloatRegister src1d = src1c->successor();
13650     FloatRegister src2d = src2c->successor();
13651     __ div_float(dstd, src1d, src2d);
13652   %}
13653 
13654   ins_pipe(fdivF_reg_reg); // FIXME
13655 #endif
13656 %}
13657 
13658 #ifdef AARCH64
13659 instruct vdiv2D_reg(vecX dst, vecX src1, vecX src2) %{
13660   predicate(n->as_Vector()->length() == 2 && VM_Version::has_simd());
13661   match(Set dst (DivVD src1 src2));
13662   size(4*1);
13663   ins_cost(DEFAULT_COST*1); // FIXME
13664 
13665   format %{ "FDIV.2D $dst,$src1,$src2\t! double[2]" %}
13666   ins_encode %{
13667     int quad = 1;
13668     __ vdivF($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
13669              MacroAssembler::VFA_SIZE_F64, quad);
13670   %}
13671 
13672   ins_pipe(fdivF_reg_reg); // FIXME
13673 %}
13674 #else
13675 instruct vdiv2D_reg_vfp(vecX dst, vecX src1, vecX src2) %{
13676   predicate(n->as_Vector()->length() == 2);
13677   match(Set dst (DivVD src1 src2));
13678   size(4*2);
13679   ins_cost(DEFAULT_COST*2); // FIXME
13680 
13681   format %{ "FDIVD  $dst.D.a,$src1.D.a,$src2.D.a\n\t"
13682             "FDIVD  $dst.D.b,$src1.D.b,$src2.D.b" %}
13683   ins_encode %{
13684     FloatRegister dsta = $dst$$FloatRegister;
13685     FloatRegister src1a = $src1$$FloatRegister;
13686     FloatRegister src2a = $src2$$FloatRegister;
13687     __ div_double(dsta, src1a, src2a);
13688     FloatRegister dstb = dsta->successor()->successor();
13689     FloatRegister src1b = src1a->successor()->successor();
13690     FloatRegister src2b = src2a->successor()->successor();
13691     __ div_double(dstb, src1b, src2b);
13692   %}
13693 
13694   ins_pipe(fdivD_reg_reg); // FIXME
13695 %}
13696 #endif
13697 
13698 // --------------------------------- NEG --------------------------------------
13699 
13700 instruct vneg8B_reg(vecD dst, vecD src) %{
13701   predicate(n->as_Vector()->length_in_bytes() == 8);
13702   effect(DEF dst, USE src);
13703   size(4);
13704   ins_cost(DEFAULT_COST); // FIXME
13705   format %{ "VNEG.S8 $dst.D,$src.D\t! neg packed8B" %}
13706   ins_encode %{
13707     bool quad = false;
13708     __ vnegI($dst$$FloatRegister, $src$$FloatRegister,
13709               MacroAssembler::VELEM_SIZE_8, quad);
13710   %}
13711   ins_pipe( ialu_reg_reg ); // FIXME
13712 %}
13713 
13714 instruct vneg16B_reg(vecX dst, vecX src) %{
13715   predicate(n->as_Vector()->length_in_bytes() == 16);
13716   effect(DEF dst, USE src);
13717   size(4);
13718   ins_cost(DEFAULT_COST); // FIXME
13719   format %{ "VNEG.S8 $dst.Q,$src.Q\t! neg0 packed16B" %}
13720   ins_encode %{
13721     bool _float = false;
13722     bool quad = true;
13723     __ vnegI($dst$$FloatRegister, $src$$FloatRegister,
13724               MacroAssembler::VELEM_SIZE_8, quad);
13725   %}
13726   ins_pipe( ialu_reg_reg ); // FIXME
13727 %}
13728 
13729 // ------------------------------ Shift ---------------------------------------
13730 
13731 instruct vslcntD(vecD dst, iRegI cnt) %{
13732   predicate(n->as_Vector()->length_in_bytes() == 8 && VM_Version::has_simd());
13733   match(Set dst (LShiftCntV cnt));
13734   size(4);
13735   ins_cost(DEFAULT_COST); // FIXME
13736   expand %{
13737     Repl8B_reg_simd(dst, cnt);
13738   %}
13739 %}
13740 
13741 instruct vslcntX(vecX dst, iRegI cnt) %{
13742   predicate(n->as_Vector()->length_in_bytes() == 16 && VM_Version::has_simd());
13743   match(Set dst (LShiftCntV cnt));
13744   size(4);
13745   ins_cost(DEFAULT_COST); // FIXME
13746   expand %{
13747     Repl16B_reg(dst, cnt);
13748   %}
13749 %}
13750 
13751 // Low bits of vector "shift" elements are used, so it
13752 // doesn't matter if we treat it as ints or bytes here.
13753 instruct vsrcntD(vecD dst, iRegI cnt) %{
13754   predicate(n->as_Vector()->length_in_bytes() == 8 && VM_Version::has_simd());
13755   match(Set dst (RShiftCntV cnt));
13756   size(4*2);
13757   ins_cost(DEFAULT_COST*2); // FIXME
13758 
13759   format %{ "VDUP.8 $dst.D,$cnt\n\t"
13760             "VNEG.S8 $dst.D,$dst.D\t! neg packed8B" %}
13761   ins_encode %{
13762     bool quad = false;
13763     __ vdupI($dst$$FloatRegister, $cnt$$Register,
13764              MacroAssembler::VELEM_SIZE_8, quad);
13765     __ vnegI($dst$$FloatRegister, $dst$$FloatRegister,
13766               MacroAssembler::VELEM_SIZE_8, quad);
13767   %}
13768   ins_pipe( ialu_reg_reg ); // FIXME
13769 %}
13770 
13771 instruct vsrcntX(vecX dst, iRegI cnt) %{
13772   predicate(n->as_Vector()->length_in_bytes() == 16 && VM_Version::has_simd());
13773   match(Set dst (RShiftCntV cnt));
13774   size(4*2);
13775   ins_cost(DEFAULT_COST*2); // FIXME
13776   format %{ "VDUP.8 $dst.Q,$cnt\n\t"
13777             "VNEG.S8 $dst.Q,$dst.Q\t! neg packed16B" %}
13778   ins_encode %{
13779     bool quad = true;
13780     __ vdupI($dst$$FloatRegister, $cnt$$Register,
13781              MacroAssembler::VELEM_SIZE_8, quad);
13782     __ vnegI($dst$$FloatRegister, $dst$$FloatRegister,
13783               MacroAssembler::VELEM_SIZE_8, quad);
13784   %}
13785   ins_pipe( ialu_reg_reg ); // FIXME
13786 %}
13787 
13788 // Byte vector logical left/right shift based on sign
13789 instruct vsh8B_reg(vecD dst, vecD src, vecD shift) %{
13790   predicate(n->as_Vector()->length() == 8);
13791   effect(DEF dst, USE src, USE shift);
13792   size(4);
13793   ins_cost(DEFAULT_COST); // FIXME
13794   format %{
13795     "VSHL.U8 $dst.D,$src.D,$shift.D\t! logical left/right shift packed8B"
13796   %}
13797   ins_encode %{
13798     bool quad = false;
13799     __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister,
13800               MacroAssembler::VELEM_SIZE_8, quad);
13801   %}
13802   ins_pipe( ialu_reg_reg ); // FIXME
13803 %}
13804 
13805 instruct vsh16B_reg(vecX dst, vecX src, vecX shift) %{
13806   predicate(n->as_Vector()->length() == 16);
13807   effect(DEF dst, USE src, USE shift);
13808   size(4);
13809   ins_cost(DEFAULT_COST); // FIXME
13810   format %{
13811     "VSHL.U8 $dst.Q,$src.Q,$shift.Q\t! logical left/right shift packed16B"
13812   %}
13813   ins_encode %{
13814     bool quad = true;
13815     __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister,
13816               MacroAssembler::VELEM_SIZE_8, quad);
13817   %}
13818   ins_pipe( ialu_reg_reg ); // FIXME
13819 %}
13820 
13821 // Shorts/Char vector logical left/right shift based on sign
13822 instruct vsh4S_reg(vecD dst, vecD src, vecD shift) %{
13823   predicate(n->as_Vector()->length() == 4);
13824   effect(DEF dst, USE src, USE shift);
13825   size(4);
13826   ins_cost(DEFAULT_COST); // FIXME
13827   format %{
13828     "VSHL.U16 $dst.D,$src.D,$shift.D\t! logical left/right shift packed4S"
13829   %}
13830   ins_encode %{
13831     bool quad = false;
13832     __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister,
13833               MacroAssembler::VELEM_SIZE_16, quad);
13834   %}
13835   ins_pipe( ialu_reg_reg ); // FIXME
13836 %}
13837 
13838 instruct vsh8S_reg(vecX dst, vecX src, vecX shift) %{
13839   predicate(n->as_Vector()->length() == 8);
13840   effect(DEF dst, USE src, USE shift);
13841   size(4);
13842   ins_cost(DEFAULT_COST); // FIXME
13843   format %{
13844     "VSHL.U16 $dst.Q,$src.Q,$shift.Q\t! logical left/right shift packed8S"
13845   %}
13846   ins_encode %{
13847     bool quad = true;
13848     __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister,
13849               MacroAssembler::VELEM_SIZE_16, quad);
13850   %}
13851   ins_pipe( ialu_reg_reg ); // FIXME
13852 %}
13853 
13854 // Integers vector logical left/right shift based on sign
13855 instruct vsh2I_reg(vecD dst, vecD src, vecD shift) %{
13856   predicate(n->as_Vector()->length() == 2);
13857   effect(DEF dst, USE src, USE shift);
13858   size(4);
13859   ins_cost(DEFAULT_COST); // FIXME
13860   format %{
13861     "VSHL.U32 $dst.D,$src.D,$shift.D\t! logical left/right shift packed2I"
13862   %}
13863   ins_encode %{
13864     bool quad = false;
13865     __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister,
13866               MacroAssembler::VELEM_SIZE_32, quad);
13867   %}
13868   ins_pipe( ialu_reg_reg ); // FIXME
13869 %}
13870 
13871 instruct vsh4I_reg(vecX dst, vecX src, vecX shift) %{
13872   predicate(n->as_Vector()->length() == 4);
13873   effect(DEF dst, USE src, USE shift);
13874   size(4);
13875   ins_cost(DEFAULT_COST); // FIXME
13876   format %{
13877     "VSHL.U32 $dst.Q,$src.Q,$shift.Q\t! logical left/right shift packed4I"
13878   %}
13879   ins_encode %{
13880     bool quad = true;
13881     __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister,
13882               MacroAssembler::VELEM_SIZE_32, quad);
13883   %}
13884   ins_pipe( ialu_reg_reg ); // FIXME
13885 %}
13886 
13887 // Longs vector logical left/right shift based on sign
13888 instruct vsh2L_reg(vecX dst, vecX src, vecX shift) %{
13889   predicate(n->as_Vector()->length() == 2);
13890   effect(DEF dst, USE src, USE shift);
13891   size(4);
13892   ins_cost(DEFAULT_COST); // FIXME
13893   format %{
13894     "VSHL.U64 $dst.Q,$src.Q,$shift.Q\t! logical left/right shift packed2L"
13895   %}
13896   ins_encode %{
13897     bool quad = true;
13898     __ vshlUI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister,
13899               MacroAssembler::VELEM_SIZE_64, quad);
13900   %}
13901   ins_pipe( ialu_reg_reg ); // FIXME
13902 %}
13903 
13904 // ------------------------------ LeftShift -----------------------------------
13905 
13906 // Byte vector left shift
13907 instruct vsl8B_reg(vecD dst, vecD src, vecD shift) %{
13908   predicate(n->as_Vector()->length() == 8);
13909   match(Set dst (LShiftVB src shift));
13910   size(4*1);
13911   ins_cost(DEFAULT_COST*1); // FIXME
13912   expand %{
13913     vsh8B_reg(dst, src, shift);
13914   %}
13915 %}
13916 
13917 instruct vsl16B_reg(vecX dst, vecX src, vecX shift) %{
13918   predicate(n->as_Vector()->length() == 16);
13919   match(Set dst (LShiftVB src shift));
13920   size(4*1);
13921   ins_cost(DEFAULT_COST*1); // FIXME
13922   expand %{
13923     vsh16B_reg(dst, src, shift);
13924   %}
13925 %}
13926 
13927 instruct vsl8B_immI(vecD dst, vecD src, immI shift) %{
13928   predicate(n->as_Vector()->length() == 8);
13929   match(Set dst (LShiftVB src shift));
13930   size(4);
13931   ins_cost(DEFAULT_COST); // FIXME
13932   format %{
13933     "VSHL.I8 $dst.D,$src.D,$shift\t! logical left shift packed8B"
13934   %}
13935   ins_encode %{
13936     bool quad = false;
13937     __ vshli($dst$$FloatRegister, $src$$FloatRegister, 8, $shift$$constant,
13938              quad);
13939   %}
13940   ins_pipe( ialu_reg_reg ); // FIXME
13941 %}
13942 
13943 instruct vsl16B_immI(vecX dst, vecX src, immI shift) %{
13944   predicate(n->as_Vector()->length() == 16);
13945   match(Set dst (LShiftVB src shift));
13946   size(4);
13947   ins_cost(DEFAULT_COST); // FIXME
13948   format %{
13949     "VSHL.I8 $dst.Q,$src.Q,$shift\t! logical left shift packed16B"
13950   %}
13951   ins_encode %{
13952     bool quad = true;
13953     __ vshli($dst$$FloatRegister, $src$$FloatRegister, 8, $shift$$constant,
13954              quad);
13955   %}
13956   ins_pipe( ialu_reg_reg ); // FIXME
13957 %}
13958 
13959 // Shorts/Chars vector logical left/right shift
13960 instruct vsl4S_reg(vecD dst, vecD src, vecD shift) %{
13961   predicate(n->as_Vector()->length() == 4);
13962   match(Set dst (LShiftVS src shift));
13963   match(Set dst (URShiftVS src shift));
13964   size(4*1);
13965   ins_cost(DEFAULT_COST*1); // FIXME
13966   expand %{
13967     vsh4S_reg(dst, src, shift);
13968   %}
13969 %}
13970 
13971 instruct vsl8S_reg(vecX dst, vecX src, vecX shift) %{
13972   predicate(n->as_Vector()->length() == 8);
13973   match(Set dst (LShiftVS src shift));
13974   match(Set dst (URShiftVS src shift));
13975   size(4*1);
13976   ins_cost(DEFAULT_COST*1); // FIXME
13977   expand %{
13978     vsh8S_reg(dst, src, shift);
13979   %}
13980 %}
13981 
13982 instruct vsl4S_immI(vecD dst, vecD src, immI shift) %{
13983   predicate(n->as_Vector()->length() == 4);
13984   match(Set dst (LShiftVS src shift));
13985   size(4);
13986   ins_cost(DEFAULT_COST); // FIXME
13987   format %{
13988     "VSHL.I16 $dst.D,$src.D,$shift\t! logical left shift packed4S"
13989   %}
13990   ins_encode %{
13991     bool quad = false;
13992     __ vshli($dst$$FloatRegister, $src$$FloatRegister, 16, $shift$$constant,
13993              quad);
13994   %}
13995   ins_pipe( ialu_reg_reg ); // FIXME
13996 %}
13997 
13998 instruct vsl8S_immI(vecX dst, vecX src, immI shift) %{
13999   predicate(n->as_Vector()->length() == 8);
14000   match(Set dst (LShiftVS src shift));
14001   size(4);
14002   ins_cost(DEFAULT_COST); // FIXME
14003   format %{
14004     "VSHL.I16 $dst.Q,$src.Q,$shift\t! logical left shift packed8S"
14005   %}
14006   ins_encode %{
14007     bool quad = true;
14008     __ vshli($dst$$FloatRegister, $src$$FloatRegister, 16, $shift$$constant,
14009              quad);
14010   %}
14011   ins_pipe( ialu_reg_reg ); // FIXME
14012 %}
14013 
14014 // Integers vector logical left/right shift
14015 instruct vsl2I_reg(vecD dst, vecD src, vecD shift) %{
14016   predicate(n->as_Vector()->length() == 2 && VM_Version::has_simd());
14017   match(Set dst (LShiftVI src shift));
14018   match(Set dst (URShiftVI src shift));
14019   size(4*1);
14020   ins_cost(DEFAULT_COST*1); // FIXME
14021   expand %{
14022     vsh2I_reg(dst, src, shift);
14023   %}
14024 %}
14025 
14026 instruct vsl4I_reg(vecX dst, vecX src, vecX shift) %{
14027   predicate(n->as_Vector()->length() == 4 && VM_Version::has_simd());
14028   match(Set dst (LShiftVI src shift));
14029   match(Set dst (URShiftVI src shift));
14030   size(4*1);
14031   ins_cost(DEFAULT_COST*1); // FIXME
14032   expand %{
14033     vsh4I_reg(dst, src, shift);
14034   %}
14035 %}
14036 
14037 instruct vsl2I_immI(vecD dst, vecD src, immI shift) %{
14038   predicate(n->as_Vector()->length() == 2 && VM_Version::has_simd());
14039   match(Set dst (LShiftVI src shift));
14040   size(4);
14041   ins_cost(DEFAULT_COST); // FIXME
14042   format %{
14043     "VSHL.I32 $dst.D,$src.D,$shift\t! logical left shift packed2I"
14044   %}
14045   ins_encode %{
14046     bool quad = false;
14047     __ vshli($dst$$FloatRegister, $src$$FloatRegister, 32, $shift$$constant,
14048              quad);
14049   %}
14050   ins_pipe( ialu_reg_reg ); // FIXME
14051 %}
14052 
14053 instruct vsl4I_immI(vecX dst, vecX src, immI shift) %{
14054   predicate(n->as_Vector()->length() == 4 && VM_Version::has_simd());
14055   match(Set dst (LShiftVI src shift));
14056   size(4);
14057   ins_cost(DEFAULT_COST); // FIXME
14058   format %{
14059     "VSHL.I32 $dst.Q,$src.Q,$shift\t! logical left shift packed4I"
14060   %}
14061   ins_encode %{
14062     bool quad = true;
14063     __ vshli($dst$$FloatRegister, $src$$FloatRegister, 32, $shift$$constant,
14064              quad);
14065   %}
14066   ins_pipe( ialu_reg_reg ); // FIXME
14067 %}
14068 
14069 // Longs vector logical left/right shift
14070 instruct vsl2L_reg(vecX dst, vecX src, vecX shift) %{
14071   predicate(n->as_Vector()->length() == 2);
14072   match(Set dst (LShiftVL src shift));
14073   match(Set dst (URShiftVL src shift));
14074   size(4*1);
14075   ins_cost(DEFAULT_COST*1); // FIXME
14076   expand %{
14077     vsh2L_reg(dst, src, shift);
14078   %}
14079 %}
14080 
14081 instruct vsl2L_immI(vecX dst, vecX src, immI shift) %{
14082   predicate(n->as_Vector()->length() == 2);
14083   match(Set dst (LShiftVL src shift));
14084   size(4);
14085   ins_cost(DEFAULT_COST); // FIXME
14086   format %{
14087     "VSHL.I64 $dst.Q,$src.Q,$shift\t! logical left shift packed2L"
14088   %}
14089   ins_encode %{
14090     bool quad = true;
14091     __ vshli($dst$$FloatRegister, $src$$FloatRegister, 64, $shift$$constant,
14092              quad);
14093   %}
14094   ins_pipe( ialu_reg_reg ); // FIXME
14095 %}
14096 
14097 // ----------------------- LogicalRightShift -----------------------------------
14098 
14099 // Bytes/Shorts vector logical right shift produces incorrect Java result
14100 // for negative data because java code convert short value into int with
14101 // sign extension before a shift.
14102 
14103 // Chars vector logical right shift
14104 instruct vsrl4S_immI(vecD dst, vecD src, immI shift) %{
14105   predicate(n->as_Vector()->length() == 4);
14106   match(Set dst (URShiftVS src shift));
14107   size(4);
14108   ins_cost(DEFAULT_COST); // FIXME
14109   format %{
14110     "VSHR.U16 $dst.D,$src.D,$shift\t! logical right shift packed4S"
14111   %}
14112   ins_encode %{
14113     bool quad = false;
14114     __ vshrUI($dst$$FloatRegister, $src$$FloatRegister, 16, $shift$$constant,
14115              quad);
14116   %}
14117   ins_pipe( ialu_reg_reg ); // FIXME
14118 %}
14119 
14120 instruct vsrl8S_immI(vecX dst, vecX src, immI shift) %{
14121   predicate(n->as_Vector()->length() == 8);
14122   match(Set dst (URShiftVS src shift));
14123   size(4);
14124   ins_cost(DEFAULT_COST); // FIXME
14125   format %{
14126     "VSHR.U16 $dst.Q,$src.Q,$shift\t! logical right shift packed8S"
14127   %}
14128   ins_encode %{
14129     bool quad = true;
14130     __ vshrUI($dst$$FloatRegister, $src$$FloatRegister, 16, $shift$$constant,
14131              quad);
14132   %}
14133   ins_pipe( ialu_reg_reg ); // FIXME
14134 %}
14135 
14136 // Integers vector logical right shift
14137 instruct vsrl2I_immI(vecD dst, vecD src, immI shift) %{
14138   predicate(n->as_Vector()->length() == 2 && VM_Version::has_simd());
14139   match(Set dst (URShiftVI src shift));
14140   size(4);
14141   ins_cost(DEFAULT_COST); // FIXME
14142   format %{
14143     "VSHR.U32 $dst.D,$src.D,$shift\t! logical right shift packed2I"
14144   %}
14145   ins_encode %{
14146     bool quad = false;
14147     __ vshrUI($dst$$FloatRegister, $src$$FloatRegister, 32, $shift$$constant,
14148              quad);
14149   %}
14150   ins_pipe( ialu_reg_reg ); // FIXME
14151 %}
14152 
14153 instruct vsrl4I_immI(vecX dst, vecX src, immI shift) %{
14154   predicate(n->as_Vector()->length() == 4 && VM_Version::has_simd());
14155   match(Set dst (URShiftVI src shift));
14156   size(4);
14157   ins_cost(DEFAULT_COST); // FIXME
14158   format %{
14159     "VSHR.U32 $dst.Q,$src.Q,$shift\t! logical right shift packed4I"
14160   %}
14161   ins_encode %{
14162     bool quad = true;
14163     __ vshrUI($dst$$FloatRegister, $src$$FloatRegister, 32, $shift$$constant,
14164              quad);
14165   %}
14166   ins_pipe( ialu_reg_reg ); // FIXME
14167 %}
14168 
14169 // Longs vector logical right shift
14170 instruct vsrl2L_immI(vecX dst, vecX src, immI shift) %{
14171   predicate(n->as_Vector()->length() == 2);
14172   match(Set dst (URShiftVL src shift));
14173   size(4);
14174   ins_cost(DEFAULT_COST); // FIXME
14175   format %{
14176     "VSHR.U64 $dst.Q,$src.Q,$shift\t! logical right shift packed2L"
14177   %}
14178   ins_encode %{
14179     bool quad = true;
14180     __ vshrUI($dst$$FloatRegister, $src$$FloatRegister, 64, $shift$$constant,
14181              quad);
14182   %}
14183   ins_pipe( ialu_reg_reg ); // FIXME
14184 %}
14185 
14186 // ------------------- ArithmeticRightShift -----------------------------------
14187 
14188 // Bytes vector arithmetic left/right shift based on sign
14189 instruct vsha8B_reg(vecD dst, vecD src, vecD shift) %{
14190   predicate(n->as_Vector()->length() == 8);
14191   effect(DEF dst, USE src, USE shift);
14192   size(4);
14193   ins_cost(DEFAULT_COST); // FIXME
14194   format %{
14195     "VSHL.S8 $dst.D,$src.D,$shift.D\t! arithmetic right shift packed8B"
14196   %}
14197   ins_encode %{
14198     bool quad = false;
14199     __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister,
14200               MacroAssembler::VELEM_SIZE_8, quad);
14201   %}
14202   ins_pipe( ialu_reg_reg ); // FIXME
14203 %}
14204 
14205 instruct vsha16B_reg(vecX dst, vecX src, vecX shift) %{
14206   predicate(n->as_Vector()->length() == 16);
14207   effect(DEF dst, USE src, USE shift);
14208   size(4);
14209   ins_cost(DEFAULT_COST); // FIXME
14210   format %{
14211     "VSHL.S8 $dst.Q,$src.Q,$shift.Q\t! arithmetic right shift packed16B"
14212   %}
14213   ins_encode %{
14214     bool quad = true;
14215     __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister,
14216               MacroAssembler::VELEM_SIZE_8, quad);
14217   %}
14218   ins_pipe( ialu_reg_reg ); // FIXME
14219 %}
14220 
14221 // Shorts vector arithmetic left/right shift based on sign
14222 instruct vsha4S_reg(vecD dst, vecD src, vecD shift) %{
14223   predicate(n->as_Vector()->length() == 4);
14224   effect(DEF dst, USE src, USE shift);
14225   size(4);
14226   ins_cost(DEFAULT_COST); // FIXME
14227   format %{
14228     "VSHL.S16 $dst.D,$src.D,$shift.D\t! arithmetic right shift packed4S"
14229   %}
14230   ins_encode %{
14231     bool quad = false;
14232     __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister,
14233               MacroAssembler::VELEM_SIZE_16, quad);
14234   %}
14235   ins_pipe( ialu_reg_reg ); // FIXME
14236 %}
14237 
14238 instruct vsha8S_reg(vecX dst, vecX src, vecX shift) %{
14239   predicate(n->as_Vector()->length() == 8);
14240   effect(DEF dst, USE src, USE shift);
14241   size(4);
14242   ins_cost(DEFAULT_COST); // FIXME
14243   format %{
14244     "VSHL.S16 $dst.Q,$src.Q,$shift.Q\t! arithmetic right shift packed8S"
14245   %}
14246   ins_encode %{
14247     bool quad = true;
14248     __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister,
14249               MacroAssembler::VELEM_SIZE_16, quad);
14250   %}
14251   ins_pipe( ialu_reg_reg ); // FIXME
14252 %}
14253 
14254 // Integers vector arithmetic left/right shift based on sign
14255 instruct vsha2I_reg(vecD dst, vecD src, vecD shift) %{
14256   predicate(n->as_Vector()->length() == 2);
14257   effect(DEF dst, USE src, USE shift);
14258   size(4);
14259   ins_cost(DEFAULT_COST); // FIXME
14260   format %{
14261     "VSHL.S32 $dst.D,$src.D,$shift.D\t! arithmetic right shift packed2I"
14262   %}
14263   ins_encode %{
14264     bool quad = false;
14265     __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister,
14266               MacroAssembler::VELEM_SIZE_32, quad);
14267   %}
14268   ins_pipe( ialu_reg_reg ); // FIXME
14269 %}
14270 
14271 instruct vsha4I_reg(vecX dst, vecX src, vecX shift) %{
14272   predicate(n->as_Vector()->length() == 4);
14273   effect(DEF dst, USE src, USE shift);
14274   size(4);
14275   ins_cost(DEFAULT_COST); // FIXME
14276   format %{
14277     "VSHL.S32 $dst.Q,$src.Q,$shift.Q\t! arithmetic right shift packed4I"
14278   %}
14279   ins_encode %{
14280     bool quad = true;
14281     __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister,
14282               MacroAssembler::VELEM_SIZE_32, quad);
14283   %}
14284   ins_pipe( ialu_reg_reg ); // FIXME
14285 %}
14286 
14287 // Longs vector arithmetic left/right shift based on sign
14288 instruct vsha2L_reg(vecX dst, vecX src, vecX shift) %{
14289   predicate(n->as_Vector()->length() == 2);
14290   effect(DEF dst, USE src, USE shift);
14291   size(4);
14292   ins_cost(DEFAULT_COST); // FIXME
14293   format %{
14294     "VSHL.S64 $dst.Q,$src.Q,$shift.Q\t! arithmetic right shift packed2L"
14295   %}
14296   ins_encode %{
14297     bool quad = true;
14298     __ vshlSI($dst$$FloatRegister, $shift$$FloatRegister, $src$$FloatRegister,
14299               MacroAssembler::VELEM_SIZE_64, quad);
14300   %}
14301   ins_pipe( ialu_reg_reg ); // FIXME
14302 %}
14303 
14304 // Byte vector arithmetic right shift
14305 
14306 instruct vsra8B_reg(vecD dst, vecD src, vecD shift) %{
14307   predicate(n->as_Vector()->length() == 8);
14308   match(Set dst (RShiftVB src shift));
14309   size(4);
14310   ins_cost(DEFAULT_COST); // FIXME
14311   expand %{
14312     vsha8B_reg(dst, src, shift);
14313   %}
14314 %}
14315 
14316 instruct vsrl16B_reg(vecX dst, vecX src, vecX shift) %{
14317   predicate(n->as_Vector()->length() == 16);
14318   match(Set dst (RShiftVB src shift));
14319   size(4);
14320   ins_cost(DEFAULT_COST); // FIXME
14321   expand %{
14322     vsha16B_reg(dst, src, shift);
14323   %}
14324 %}
14325 
14326 instruct vsrl8B_immI(vecD dst, vecD src, immI shift) %{
14327   predicate(n->as_Vector()->length() == 8);
14328   match(Set dst (RShiftVB src shift));
14329   size(4);
14330   ins_cost(DEFAULT_COST); // FIXME
14331   format %{
14332     "VSHR.S8 $dst.D,$src.D,$shift\t! logical right shift packed8B"
14333   %}
14334   ins_encode %{
14335     bool quad = false;
14336     __ vshrSI($dst$$FloatRegister, $src$$FloatRegister, 8, $shift$$constant,
14337              quad);
14338   %}
14339   ins_pipe( ialu_reg_reg ); // FIXME
14340 %}
14341 
14342 instruct vsrl16B_immI(vecX dst, vecX src, immI shift) %{
14343   predicate(n->as_Vector()->length() == 16);
14344   match(Set dst (RShiftVB src shift));
14345   size(4);
14346   ins_cost(DEFAULT_COST); // FIXME
14347   format %{
14348     "VSHR.S8 $dst.Q,$src.Q,$shift\t! logical right shift packed16B"
14349   %}
14350   ins_encode %{
14351     bool quad = true;
14352     __ vshrSI($dst$$FloatRegister, $src$$FloatRegister, 8, $shift$$constant,
14353              quad);
14354   %}
14355   ins_pipe( ialu_reg_reg ); // FIXME
14356 %}
14357 
14358 // Shorts vector arithmetic right shift
14359 instruct vsra4S_reg(vecD dst, vecD src, vecD shift) %{
14360   predicate(n->as_Vector()->length() == 4);
14361   match(Set dst (RShiftVS src shift));
14362   size(4);
14363   ins_cost(DEFAULT_COST); // FIXME
14364   expand %{
14365     vsha4S_reg(dst, src, shift);
14366   %}
14367 %}
14368 
14369 instruct vsra8S_reg(vecX dst, vecX src, vecX shift) %{
14370   predicate(n->as_Vector()->length() == 8);
14371   match(Set dst (RShiftVS src shift));
14372   size(4);
14373   ins_cost(DEFAULT_COST); // FIXME
14374   expand %{
14375     vsha8S_reg(dst, src, shift);
14376   %}
14377 %}
14378 
14379 instruct vsra4S_immI(vecD dst, vecD src, immI shift) %{
14380   predicate(n->as_Vector()->length() == 4);
14381   match(Set dst (RShiftVS src shift));
14382   size(4);
14383   ins_cost(DEFAULT_COST); // FIXME
14384   format %{
14385     "VSHR.S16 $dst.D,$src.D,$shift\t! logical right shift packed4S"
14386   %}
14387   ins_encode %{
14388     bool quad = false;
14389     __ vshrSI($dst$$FloatRegister, $src$$FloatRegister, 16, $shift$$constant,
14390              quad);
14391   %}
14392   ins_pipe( ialu_reg_reg ); // FIXME
14393 %}
14394 
14395 instruct vsra8S_immI(vecX dst, vecX src, immI shift) %{
14396   predicate(n->as_Vector()->length() == 8);
14397   match(Set dst (RShiftVS src shift));
14398   size(4);
14399   ins_cost(DEFAULT_COST); // FIXME
14400   format %{
14401     "VSHR.S16 $dst.Q,$src.Q,$shift\t! logical right shift packed8S"
14402   %}
14403   ins_encode %{
14404     bool quad = true;
14405     __ vshrSI($dst$$FloatRegister, $src$$FloatRegister, 16, $shift$$constant,
14406              quad);
14407   %}
14408   ins_pipe( ialu_reg_reg ); // FIXME
14409 %}
14410 
14411 // Integers vector arithmetic right shift
14412 instruct vsra2I_reg(vecD dst, vecD src, vecD shift) %{
14413   predicate(n->as_Vector()->length() == 2);
14414   match(Set dst (RShiftVI src shift));
14415   size(4);
14416   ins_cost(DEFAULT_COST); // FIXME
14417   expand %{
14418     vsha2I_reg(dst, src, shift);
14419   %}
14420 %}
14421 
14422 instruct vsra4I_reg(vecX dst, vecX src, vecX shift) %{
14423   predicate(n->as_Vector()->length() == 4);
14424   match(Set dst (RShiftVI src shift));
14425   size(4);
14426   ins_cost(DEFAULT_COST); // FIXME
14427   expand %{
14428     vsha4I_reg(dst, src, shift);
14429   %}
14430 %}
14431 
14432 instruct vsra2I_immI(vecD dst, vecD src, immI shift) %{
14433   predicate(n->as_Vector()->length() == 2);
14434   match(Set dst (RShiftVI src shift));
14435   size(4);
14436   ins_cost(DEFAULT_COST); // FIXME
14437   format %{
14438     "VSHR.S32 $dst.D,$src.D,$shift\t! logical right shift packed2I"
14439   %}
14440   ins_encode %{
14441     bool quad = false;
14442     __ vshrSI($dst$$FloatRegister, $src$$FloatRegister, 32, $shift$$constant,
14443              quad);
14444   %}
14445   ins_pipe( ialu_reg_reg ); // FIXME
14446 %}
14447 
14448 instruct vsra4I_immI(vecX dst, vecX src, immI shift) %{
14449   predicate(n->as_Vector()->length() == 4);
14450   match(Set dst (RShiftVI src shift));
14451   size(4);
14452   ins_cost(DEFAULT_COST); // FIXME
14453   format %{
14454     "VSHR.S32 $dst.Q,$src.Q,$shift\t! logical right shift packed4I"
14455   %}
14456   ins_encode %{
14457     bool quad = true;
14458     __ vshrSI($dst$$FloatRegister, $src$$FloatRegister, 32, $shift$$constant,
14459              quad);
14460   %}
14461   ins_pipe( ialu_reg_reg ); // FIXME
14462 %}
14463 
14464 // Longs vector arithmetic right shift
14465 instruct vsra2L_reg(vecX dst, vecX src, vecX shift) %{
14466   predicate(n->as_Vector()->length() == 2);
14467   match(Set dst (RShiftVL src shift));
14468   size(4);
14469   ins_cost(DEFAULT_COST); // FIXME
14470   expand %{
14471     vsha2L_reg(dst, src, shift);
14472   %}
14473 %}
14474 
14475 instruct vsra2L_immI(vecX dst, vecX src, immI shift) %{
14476   predicate(n->as_Vector()->length() == 2);
14477   match(Set dst (RShiftVL src shift));
14478   size(4);
14479   ins_cost(DEFAULT_COST); // FIXME
14480   format %{
14481     "VSHR.S64 $dst.Q,$src.Q,$shift\t! logical right shift packed2L"
14482   %}
14483   ins_encode %{
14484     bool quad = true;
14485     __ vshrSI($dst$$FloatRegister, $src$$FloatRegister, 64, $shift$$constant,
14486              quad);
14487   %}
14488   ins_pipe( ialu_reg_reg ); // FIXME
14489 %}
14490 
14491 // --------------------------------- AND --------------------------------------
14492 
14493 instruct vandD(vecD dst, vecD src1, vecD src2) %{
14494   predicate(n->as_Vector()->length_in_bytes() == 8);
14495   match(Set dst (AndV src1 src2));
14496   format %{ "VAND    $dst.D,$src1.D,$src2.D\t! and vectors (8 bytes)" %}
14497   ins_encode %{
14498     bool quad = false;
14499     __ vandI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
14500              quad);
14501   %}
14502   ins_pipe( ialu_reg_reg ); // FIXME
14503 %}
14504 
14505 instruct vandX(vecX dst, vecX src1, vecX src2) %{
14506   predicate(n->as_Vector()->length_in_bytes() == 16);
14507   match(Set dst (AndV src1 src2));
14508   format %{ "VAND    $dst.Q,$src1.Q,$src2.Q\t! and vectors (16 bytes)" %}
14509   ins_encode %{
14510     bool quad = true;
14511     __ vandI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
14512              quad);
14513   %}
14514   ins_pipe( ialu_reg_reg ); // FIXME
14515 %}
14516 
14517 // --------------------------------- OR ---------------------------------------
14518 
14519 instruct vorD(vecD dst, vecD src1, vecD src2) %{
14520   predicate(n->as_Vector()->length_in_bytes() == 8);
14521   match(Set dst (OrV src1 src2));
14522   format %{ "VOR     $dst.D,$src1.D,$src2.D\t! and vectors (8 bytes)" %}
14523   ins_encode %{
14524     bool quad = false;
14525     __ vorI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
14526             quad);
14527   %}
14528   ins_pipe( ialu_reg_reg ); // FIXME
14529 %}
14530 
14531 instruct vorX(vecX dst, vecX src1, vecX src2) %{
14532   predicate(n->as_Vector()->length_in_bytes() == 16);
14533   match(Set dst (OrV src1 src2));
14534   format %{ "VOR     $dst.Q,$src1.Q,$src2.Q\t! and vectors (16 bytes)" %}
14535   ins_encode %{
14536     bool quad = true;
14537     __ vorI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
14538             quad);
14539   %}
14540   ins_pipe( ialu_reg_reg ); // FIXME
14541 %}
14542 
14543 // --------------------------------- XOR --------------------------------------
14544 
14545 instruct vxorD(vecD dst, vecD src1, vecD src2) %{
14546   predicate(n->as_Vector()->length_in_bytes() == 8);
14547   match(Set dst (XorV src1 src2));
14548   format %{ "VXOR    $dst.D,$src1.D,$src2.D\t! and vectors (8 bytes)" %}
14549   ins_encode %{
14550     bool quad = false;
14551     __ vxorI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
14552              quad);
14553   %}
14554   ins_pipe( ialu_reg_reg ); // FIXME
14555 %}
14556 
14557 instruct vxorX(vecX dst, vecX src1, vecX src2) %{
14558   predicate(n->as_Vector()->length_in_bytes() == 16);
14559   match(Set dst (XorV src1 src2));
14560   format %{ "VXOR    $dst.Q,$src1.Q,$src2.Q\t! and vectors (16 bytes)" %}
14561   ins_encode %{
14562     bool quad = true;
14563     __ vxorI($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
14564              quad);
14565   %}
14566   ins_pipe( ialu_reg_reg ); // FIXME
14567 %}
14568 
14569 
14570 //----------PEEPHOLE RULES-----------------------------------------------------
14571 // These must follow all instruction definitions as they use the names
14572 // defined in the instructions definitions.
14573 //
14574 // peepmatch ( root_instr_name [preceding_instruction]* );
14575 //
14576 // peepconstraint %{
14577 // (instruction_number.operand_name relational_op instruction_number.operand_name
14578 //  [, ...] );
14579 // // instruction numbers are zero-based using left to right order in peepmatch
14580 //
14581 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
14582 // // provide an instruction_number.operand_name for each operand that appears
14583 // // in the replacement instruction's match rule
14584 //
14585 // ---------VM FLAGS---------------------------------------------------------
14586 //
14587 // All peephole optimizations can be turned off using -XX:-OptoPeephole
14588 //
14589 // Each peephole rule is given an identifying number starting with zero and
14590 // increasing by one in the order seen by the parser.  An individual peephole
14591 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
14592 // on the command-line.
14593 //
14594 // ---------CURRENT LIMITATIONS----------------------------------------------
14595 //
14596 // Only match adjacent instructions in same basic block
14597 // Only equality constraints
14598 // Only constraints between operands, not (0.dest_reg == EAX_enc)
14599 // Only one replacement instruction
14600 //
14601 // ---------EXAMPLE----------------------------------------------------------
14602 //
14603 // // pertinent parts of existing instructions in architecture description
14604 // instruct movI(eRegI dst, eRegI src) %{
14605 //   match(Set dst (CopyI src));
14606 // %}
14607 //
14608 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
14609 //   match(Set dst (AddI dst src));
14610 //   effect(KILL cr);
14611 // %}
14612 //
14613 // // Change (inc mov) to lea
14614 // peephole %{
14615 //   // increment preceeded by register-register move
14616 //   peepmatch ( incI_eReg movI );
14617 //   // require that the destination register of the increment
14618 //   // match the destination register of the move
14619 //   peepconstraint ( 0.dst == 1.dst );
14620 //   // construct a replacement instruction that sets
14621 //   // the destination to ( move's source register + one )
14622 //   peepreplace ( incI_eReg_immI1( 0.dst 1.src 0.src ) );
14623 // %}
14624 //
14625 
14626 // // Change load of spilled value to only a spill
14627 // instruct storeI(memory mem, eRegI src) %{
14628 //   match(Set mem (StoreI mem src));
14629 // %}
14630 //
14631 // instruct loadI(eRegI dst, memory mem) %{
14632 //   match(Set dst (LoadI mem));
14633 // %}
14634 //
14635 // peephole %{
14636 //   peepmatch ( loadI storeI );
14637 //   peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
14638 //   peepreplace ( storeI( 1.mem 1.mem 1.src ) );
14639 // %}
14640 
14641 //----------SMARTSPILL RULES---------------------------------------------------
14642 // These must follow all instruction definitions as they use the names
14643 // defined in the instructions definitions.
14644 //
14645 // ARM will probably not have any of these rules due to RISC instruction set.
14646 
14647 //----------PIPELINE-----------------------------------------------------------
14648 // Rules which define the behavior of the target architectures pipeline.